xref: /xnu-11215.81.4/bsd/vfs/vfs_cluster.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1993
31  *	The Regents of the University of California.  All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  * 3. All advertising materials mentioning features or use of this software
42  *    must display the following acknowledgement:
43  *	This product includes software developed by the University of
44  *	California, Berkeley and its contributors.
45  * 4. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)vfs_cluster.c	8.10 (Berkeley) 3/28/95
62  */
63 
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/buf_internal.h>
67 #include <sys/mount_internal.h>
68 #include <sys/vnode_internal.h>
69 #include <sys/trace.h>
70 #include <kern/kalloc.h>
71 #include <sys/time.h>
72 #include <sys/kernel.h>
73 #include <sys/resourcevar.h>
74 #include <miscfs/specfs/specdev.h>
75 #include <sys/uio_internal.h>
76 #include <libkern/libkern.h>
77 #include <machine/machine_routines.h>
78 
79 #include <sys/ubc_internal.h>
80 #include <vm/vnode_pager.h>
81 #include <vm/vm_upl.h>
82 
83 #include <mach/mach_types.h>
84 #include <mach/memory_object_types.h>
85 #include <mach/vm_map.h>
86 #include <mach/upl.h>
87 #include <mach/thread_info.h>
88 #include <kern/task.h>
89 #include <kern/policy_internal.h>
90 #include <kern/thread.h>
91 
92 #include <vm/vm_kern_xnu.h>
93 #include <vm/vm_map_xnu.h>
94 #include <vm/vm_pageout_xnu.h>
95 #include <vm/vm_fault.h>
96 #include <vm/vm_ubc.h>
97 
98 #include <sys/kdebug.h>
99 #include <sys/kdebug_triage.h>
100 #include <libkern/OSAtomic.h>
101 
102 #include <sys/sdt.h>
103 
104 #include <stdbool.h>
105 
106 #include <vfs/vfs_disk_conditioner.h>
107 
108 #if 0
109 #undef KERNEL_DEBUG
110 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
111 #endif
112 
113 
114 #define CL_READ         0x01
115 #define CL_WRITE        0x02
116 #define CL_ASYNC        0x04
117 #define CL_COMMIT       0x08
118 #define CL_PAGEOUT      0x10
119 #define CL_AGE          0x20
120 #define CL_NOZERO       0x40
121 #define CL_PAGEIN       0x80
122 #define CL_DEV_MEMORY   0x100
123 #define CL_PRESERVE     0x200
124 #define CL_THROTTLE     0x400
125 #define CL_KEEPCACHED   0x800
126 #define CL_DIRECT_IO    0x1000
127 #define CL_PASSIVE      0x2000
128 #define CL_IOSTREAMING  0x4000
129 #define CL_CLOSE        0x8000
130 #define CL_ENCRYPTED    0x10000
131 #define CL_RAW_ENCRYPTED        0x20000
132 #define CL_NOCACHE      0x40000
133 #define CL_DIRECT_IO_FSBLKSZ    0x80000
134 
135 #define MAX_VECTOR_UPL_SIZE     (2 * MAX_UPL_SIZE_BYTES)
136 
137 #define CLUSTER_IO_WAITING              ((buf_t)1)
138 
139 extern void vector_upl_set_iostate(upl_t, upl_t, vm_offset_t, upl_size_t);
140 
141 struct clios {
142 	lck_mtx_t io_mtxp;
143 	u_int  io_completed;       /* amount of io that has currently completed */
144 	u_int  io_issued;          /* amount of io that was successfully issued */
145 	int    io_error;           /* error code of first error encountered */
146 	int    io_wanted;          /* someone is sleeping waiting for a change in state */
147 };
148 
149 struct cl_direct_read_lock {
150 	LIST_ENTRY(cl_direct_read_lock)         chain;
151 	int32_t                                                         ref_count;
152 	vnode_t                                                         vp;
153 	lck_rw_t                                                        rw_lock;
154 };
155 
156 #define CL_DIRECT_READ_LOCK_BUCKETS 61
157 
158 static LIST_HEAD(cl_direct_read_locks, cl_direct_read_lock)
159 cl_direct_read_locks[CL_DIRECT_READ_LOCK_BUCKETS];
160 
161 static LCK_GRP_DECLARE(cl_mtx_grp, "cluster I/O");
162 static LCK_MTX_DECLARE(cl_transaction_mtxp, &cl_mtx_grp);
163 static LCK_SPIN_DECLARE(cl_direct_read_spin_lock, &cl_mtx_grp);
164 
165 static ZONE_DEFINE(cl_rd_zone, "cluster_read",
166     sizeof(struct cl_readahead), ZC_ZFREE_CLEARMEM);
167 
168 static ZONE_DEFINE(cl_wr_zone, "cluster_write",
169     sizeof(struct cl_writebehind), ZC_ZFREE_CLEARMEM);
170 
171 #define IO_UNKNOWN      0
172 #define IO_DIRECT       1
173 #define IO_CONTIG       2
174 #define IO_COPY         3
175 
176 #define PUSH_DELAY      0x01
177 #define PUSH_ALL        0x02
178 #define PUSH_SYNC       0x04
179 
180 
181 static void cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset, size_t verify_block_size);
182 static void cluster_wait_IO(buf_t cbp_head, int async);
183 static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait);
184 
185 static int cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length);
186 
187 static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
188     int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg);
189 static void cluster_iodone_verify_continue(void);
190 static int cluster_iodone(buf_t bp, void *callback_arg);
191 static int cluster_iodone_finish(buf_t cbp_head, void *callback_arg);
192 static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp);
193 static int cluster_is_throttled(vnode_t vp);
194 
195 static void cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name);
196 
197 static void cluster_syncup(vnode_t vp, off_t newEOF, int (*)(buf_t, void *), void *callback_arg, int flags);
198 
199 static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference);
200 static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference);
201 
202 static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags,
203     int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
204 static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
205     int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
206 static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
207     int (*)(buf_t, void *), void *callback_arg, int flags) __attribute__((noinline));
208 
209 static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF,
210     off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
211 static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
212     int flags, int (*callback)(buf_t, void *), void *callback_arg, uint32_t min_io_size) __attribute__((noinline));
213 static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF,
214     int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag) __attribute__((noinline));
215 
216 static void cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes, boolean_t *first_pass,
217     off_t write_off, int write_cnt, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
218 
219 static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg);
220 
221 static int      cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
222 static void     cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra,
223     int (*callback)(buf_t, void *), void *callback_arg, int bflag);
224 
225 static int      cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_ioitiated);
226 
227 static int      cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *),
228     void *callback_arg, int *err, boolean_t vm_initiated);
229 
230 static int      sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
231 static int      sparse_cluster_push(struct cl_writebehind *, void **cmapp, vnode_t vp, off_t EOF, int push_flag,
232     int io_flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
233 static int      sparse_cluster_add(struct cl_writebehind *, void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF,
234     int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
235 
236 static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp);
237 static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp);
238 static kern_return_t vfs_drt_control(void **cmapp, int op_type);
239 static kern_return_t vfs_get_scmap_push_behavior_internal(void **cmapp, int *push_flag);
240 
241 
242 /*
243  * For throttled IO to check whether
244  * a block is cached by the boot cache
245  * and thus it can avoid delaying the IO.
246  *
247  * bootcache_contains_block is initially
248  * NULL. The BootCache will set it while
249  * the cache is active and clear it when
250  * the cache is jettisoned.
251  *
252  * Returns 0 if the block is not
253  * contained in the cache, 1 if it is
254  * contained.
255  *
256  * The function pointer remains valid
257  * after the cache has been evicted even
258  * if bootcache_contains_block has been
259  * cleared.
260  *
261  * See rdar://9974130 The new throttling mechanism breaks the boot cache for throttled IOs
262  */
263 int (*bootcache_contains_block)(dev_t device, u_int64_t blkno) = NULL;
264 
265 
266 /*
267  * limit the internal I/O size so that we
268  * can represent it in a 32 bit int
269  */
270 #define MAX_IO_REQUEST_SIZE     (1024 * 1024 * 512)
271 #define MAX_IO_CONTIG_SIZE      MAX_UPL_SIZE_BYTES
272 #define MAX_VECTS               16
273 /*
274  * The MIN_DIRECT_WRITE_SIZE governs how much I/O should be issued before we consider
275  * allowing the caller to bypass the buffer cache.  For small I/Os (less than 16k),
276  * we have not historically allowed the write to bypass the UBC.
277  */
278 #define MIN_DIRECT_WRITE_SIZE   (16384)
279 
280 #define WRITE_THROTTLE          6
281 #define WRITE_THROTTLE_SSD      2
282 #define WRITE_BEHIND            1
283 #define WRITE_BEHIND_SSD        1
284 
285 #if !defined(XNU_TARGET_OS_OSX)
286 #define PREFETCH                1
287 #define PREFETCH_SSD            1
288 uint32_t speculative_prefetch_max = (2048 * 1024);              /* maximum bytes in a specluative read-ahead */
289 uint32_t speculative_prefetch_max_iosize = (512 * 1024);        /* maximum I/O size to use in a specluative read-ahead */
290 #else /* XNU_TARGET_OS_OSX */
291 #define PREFETCH                3
292 #define PREFETCH_SSD            2
293 uint32_t speculative_prefetch_max = (MAX_UPL_SIZE_BYTES * 3);   /* maximum bytes in a specluative read-ahead */
294 uint32_t speculative_prefetch_max_iosize = (512 * 1024);        /* maximum I/O size to use in a specluative read-ahead on SSDs*/
295 #endif /* ! XNU_TARGET_OS_OSX */
296 
297 /* maximum bytes for read-ahead */
298 uint32_t prefetch_max = (1024 * 1024 * 1024);
299 /* maximum bytes for outstanding reads */
300 uint32_t overlapping_read_max = (1024 * 1024 * 1024);
301 /* maximum bytes for outstanding writes */
302 uint32_t overlapping_write_max = (1024 * 1024 * 1024);
303 
304 #define IO_SCALE(vp, base)              (vp->v_mount->mnt_ioscale * (base))
305 #define MAX_CLUSTER_SIZE(vp)            (cluster_max_io_size(vp->v_mount, CL_WRITE))
306 
307 int     speculative_reads_disabled = 0;
308 
309 /*
310  * throttle the number of async writes that
311  * can be outstanding on a single vnode
312  * before we issue a synchronous write
313  */
314 #define THROTTLE_MAXCNT 0
315 
316 uint32_t throttle_max_iosize = (128 * 1024);
317 
318 #define THROTTLE_MAX_IOSIZE (throttle_max_iosize)
319 
320 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_max_iosize, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_max_iosize, 0, "");
321 
322 struct verify_buf {
323 	TAILQ_ENTRY(verify_buf) vb_entry;
324 	buf_t vb_cbp;
325 	void* vb_callback_arg;
326 	int32_t vb_whichq;
327 };
328 
329 TAILQ_HEAD(, verify_buf) verify_free_head;
330 TAILQ_HEAD(, verify_buf) verify_work_head;
331 
332 #define MAX_VERIFY_THREADS 4
333 #define MAX_REQUESTS_PER_THREAD  2
334 
335 static struct verify_buf verify_bufs[MAX_VERIFY_THREADS * MAX_REQUESTS_PER_THREAD];
336 /*
337  * Each thread needs to check if the item at the head of the queue has a UPL
338  * pointer that is any of the threads are currently operating on.
339  * slot 0 is for the io completion thread to do the request inline if there are no free
340  * queue slots.
341  */
342 static int verify_in_flight = 0;
343 
344 #if defined(XNU_TARGET_OS_IOS)
345 #define NUM_DEFAULT_THREADS 2
346 #elif defined(XNU_TARGET_OS_OSX)
347 #define NUM_DEFAULT_THREADS 4
348 #else
349 #define NUM_DEFAULT_THREADS 0
350 #endif
351 
352 static TUNABLE(uint32_t, num_verify_threads, "num_verify_threads", NUM_DEFAULT_THREADS);
353 static uint32_t cluster_verify_threads = 0; /* will be launched as needed upto num_verify_threads */
354 
355 static void
cluster_verify_init(void)356 cluster_verify_init(void)
357 {
358 	TAILQ_INIT(&verify_free_head);
359 	TAILQ_INIT(&verify_work_head);
360 
361 	if (num_verify_threads > MAX_VERIFY_THREADS) {
362 		num_verify_threads = MAX_VERIFY_THREADS;
363 	}
364 
365 	for (int i = 0; i < num_verify_threads * MAX_REQUESTS_PER_THREAD; i++) {
366 		TAILQ_INSERT_TAIL(&verify_free_head, &verify_bufs[i], vb_entry);
367 	}
368 }
369 
370 void
cluster_init(void)371 cluster_init(void)
372 {
373 	for (int i = 0; i < CL_DIRECT_READ_LOCK_BUCKETS; ++i) {
374 		LIST_INIT(&cl_direct_read_locks[i]);
375 	}
376 
377 	cluster_verify_init();
378 }
379 
380 uint32_t
cluster_max_io_size(mount_t mp,int type)381 cluster_max_io_size(mount_t mp, int type)
382 {
383 	uint32_t        max_io_size;
384 	uint32_t        segcnt;
385 	uint32_t        maxcnt;
386 
387 	switch (type) {
388 	case CL_READ:
389 		segcnt = mp->mnt_segreadcnt;
390 		maxcnt = mp->mnt_maxreadcnt;
391 		break;
392 	case CL_WRITE:
393 		segcnt = mp->mnt_segwritecnt;
394 		maxcnt = mp->mnt_maxwritecnt;
395 		break;
396 	default:
397 		segcnt = min(mp->mnt_segreadcnt, mp->mnt_segwritecnt);
398 		maxcnt = min(mp->mnt_maxreadcnt, mp->mnt_maxwritecnt);
399 		break;
400 	}
401 	if (segcnt > (MAX_UPL_SIZE_BYTES >> PAGE_SHIFT)) {
402 		/*
403 		 * don't allow a size beyond the max UPL size we can create
404 		 */
405 		segcnt = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
406 	}
407 	max_io_size = min((segcnt * PAGE_SIZE), maxcnt);
408 
409 	if (max_io_size < MAX_UPL_TRANSFER_BYTES) {
410 		/*
411 		 * don't allow a size smaller than the old fixed limit
412 		 */
413 		max_io_size = MAX_UPL_TRANSFER_BYTES;
414 	} else {
415 		/*
416 		 * make sure the size specified is a multiple of PAGE_SIZE
417 		 */
418 		max_io_size &= ~PAGE_MASK;
419 	}
420 	return max_io_size;
421 }
422 
423 /*
424  * Returns max prefetch value. If the value overflows or exceeds the specified
425  * 'prefetch_limit', it will be capped at 'prefetch_limit' value.
426  */
427 static inline uint32_t
cluster_max_prefetch(vnode_t vp,uint32_t max_io_size,uint32_t prefetch_limit)428 cluster_max_prefetch(vnode_t vp, uint32_t max_io_size, uint32_t prefetch_limit)
429 {
430 	bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
431 	uint32_t io_scale = IO_SCALE(vp, is_ssd ? PREFETCH_SSD : PREFETCH);
432 	uint32_t prefetch = 0;
433 
434 	if (__improbable(os_mul_overflow(max_io_size, io_scale, &prefetch) ||
435 	    (prefetch > prefetch_limit))) {
436 		prefetch = prefetch_limit;
437 	}
438 
439 	return prefetch;
440 }
441 
442 static inline uint32_t
calculate_max_throttle_size(vnode_t vp)443 calculate_max_throttle_size(vnode_t vp)
444 {
445 	bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
446 	uint32_t io_scale = IO_SCALE(vp, is_ssd ? 2 : 1);
447 
448 	return MIN(io_scale * THROTTLE_MAX_IOSIZE, MAX_UPL_TRANSFER_BYTES);
449 }
450 
451 static inline uint32_t
calculate_max_throttle_cnt(vnode_t vp)452 calculate_max_throttle_cnt(vnode_t vp)
453 {
454 	bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
455 	uint32_t io_scale = IO_SCALE(vp, 1);
456 
457 	return is_ssd ? MIN(io_scale, 4) : THROTTLE_MAXCNT;
458 }
459 
460 #define CLW_ALLOCATE            0x01
461 #define CLW_RETURNLOCKED        0x02
462 #define CLW_IONOCACHE           0x04
463 #define CLW_IOPASSIVE   0x08
464 
465 /*
466  * if the read ahead context doesn't yet exist,
467  * allocate and initialize it...
468  * the vnode lock serializes multiple callers
469  * during the actual assignment... first one
470  * to grab the lock wins... the other callers
471  * will release the now unnecessary storage
472  *
473  * once the context is present, try to grab (but don't block on)
474  * the lock associated with it... if someone
475  * else currently owns it, than the read
476  * will run without read-ahead.  this allows
477  * multiple readers to run in parallel and
478  * since there's only 1 read ahead context,
479  * there's no real loss in only allowing 1
480  * reader to have read-ahead enabled.
481  */
482 static struct cl_readahead *
cluster_get_rap(vnode_t vp)483 cluster_get_rap(vnode_t vp)
484 {
485 	struct ubc_info         *ubc;
486 	struct cl_readahead     *rap;
487 
488 	ubc = vp->v_ubcinfo;
489 
490 	if ((rap = ubc->cl_rahead) == NULL) {
491 		rap = zalloc_flags(cl_rd_zone, Z_WAITOK | Z_ZERO);
492 		rap->cl_lastr = -1;
493 		lck_mtx_init(&rap->cl_lockr, &cl_mtx_grp, LCK_ATTR_NULL);
494 
495 		vnode_lock(vp);
496 
497 		if (ubc->cl_rahead == NULL) {
498 			ubc->cl_rahead = rap;
499 		} else {
500 			lck_mtx_destroy(&rap->cl_lockr, &cl_mtx_grp);
501 			zfree(cl_rd_zone, rap);
502 			rap = ubc->cl_rahead;
503 		}
504 		vnode_unlock(vp);
505 	}
506 	if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE) {
507 		return rap;
508 	}
509 
510 	return (struct cl_readahead *)NULL;
511 }
512 
513 
514 /*
515  * if the write behind context doesn't yet exist,
516  * and CLW_ALLOCATE is specified, allocate and initialize it...
517  * the vnode lock serializes multiple callers
518  * during the actual assignment... first one
519  * to grab the lock wins... the other callers
520  * will release the now unnecessary storage
521  *
522  * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
523  * the lock associated with the write behind context before
524  * returning
525  */
526 
527 static struct cl_writebehind *
cluster_get_wbp(vnode_t vp,int flags)528 cluster_get_wbp(vnode_t vp, int flags)
529 {
530 	struct ubc_info *ubc;
531 	struct cl_writebehind *wbp;
532 
533 	ubc = vp->v_ubcinfo;
534 
535 	if ((wbp = ubc->cl_wbehind) == NULL) {
536 		if (!(flags & CLW_ALLOCATE)) {
537 			return (struct cl_writebehind *)NULL;
538 		}
539 
540 		wbp = zalloc_flags(cl_wr_zone, Z_WAITOK | Z_ZERO);
541 
542 		lck_mtx_init(&wbp->cl_lockw, &cl_mtx_grp, LCK_ATTR_NULL);
543 
544 		vnode_lock(vp);
545 
546 		if (ubc->cl_wbehind == NULL) {
547 			ubc->cl_wbehind = wbp;
548 		} else {
549 			lck_mtx_destroy(&wbp->cl_lockw, &cl_mtx_grp);
550 			zfree(cl_wr_zone, wbp);
551 			wbp = ubc->cl_wbehind;
552 		}
553 		vnode_unlock(vp);
554 	}
555 	if (flags & CLW_RETURNLOCKED) {
556 		lck_mtx_lock(&wbp->cl_lockw);
557 	}
558 
559 	return wbp;
560 }
561 
562 
563 static void
cluster_syncup(vnode_t vp,off_t newEOF,int (* callback)(buf_t,void *),void * callback_arg,int flags)564 cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, int flags)
565 {
566 	struct cl_writebehind *wbp;
567 
568 	if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
569 		if (wbp->cl_number) {
570 			lck_mtx_lock(&wbp->cl_lockw);
571 
572 			cluster_try_push(wbp, vp, newEOF, PUSH_ALL | flags, 0, callback, callback_arg, NULL, FALSE);
573 
574 			lck_mtx_unlock(&wbp->cl_lockw);
575 		}
576 	}
577 }
578 
579 
580 static int
cluster_io_present_in_BC(vnode_t vp,off_t f_offset)581 cluster_io_present_in_BC(vnode_t vp, off_t f_offset)
582 {
583 	daddr64_t blkno;
584 	size_t    io_size;
585 	int (*bootcache_check_fn)(dev_t device, u_int64_t blkno) = bootcache_contains_block;
586 
587 	if (bootcache_check_fn && vp->v_mount && vp->v_mount->mnt_devvp) {
588 		if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL)) {
589 			return 0;
590 		}
591 
592 		if (io_size == 0) {
593 			return 0;
594 		}
595 
596 		if (bootcache_check_fn(vp->v_mount->mnt_devvp->v_rdev, blkno)) {
597 			return 1;
598 		}
599 	}
600 	return 0;
601 }
602 
603 
604 static int
cluster_is_throttled(vnode_t vp)605 cluster_is_throttled(vnode_t vp)
606 {
607 	return throttle_io_will_be_throttled(-1, vp->v_mount);
608 }
609 
610 
611 static void
cluster_iostate_wait(struct clios * iostate,u_int target,const char * wait_name)612 cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name)
613 {
614 	lck_mtx_lock(&iostate->io_mtxp);
615 
616 	while ((iostate->io_issued - iostate->io_completed) > target) {
617 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
618 		    iostate->io_issued, iostate->io_completed, target, 0, 0);
619 
620 		iostate->io_wanted = 1;
621 		msleep((caddr_t)&iostate->io_wanted, &iostate->io_mtxp, PRIBIO + 1, wait_name, NULL);
622 
623 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
624 		    iostate->io_issued, iostate->io_completed, target, 0, 0);
625 	}
626 	lck_mtx_unlock(&iostate->io_mtxp);
627 }
628 
629 
630 static void
cluster_handle_associated_upl(struct clios * iostate,upl_t upl,upl_offset_t upl_offset,upl_size_t size,off_t f_offset)631 cluster_handle_associated_upl(struct clios *iostate, upl_t upl,
632     upl_offset_t upl_offset, upl_size_t size, off_t f_offset)
633 {
634 	if (!size) {
635 		return;
636 	}
637 
638 	upl_t associated_upl = upl_associated_upl(upl);
639 
640 	if (!associated_upl) {
641 		return;
642 	}
643 
644 	/*
645 	 * The associated upl functions as a "range lock" for the file.
646 	 *
647 	 * The associated upl is created and is attached to to the upl in
648 	 * cluster_io when the direct io write is being started. Since the
649 	 * upl may be released in parts so the corresponding associated upl
650 	 * has to be released in parts as well.
651 	 *
652 	 * We have the f_offset, upl_offset and size and from that we have figure
653 	 * out the associated upl offset and length, we are interested in.
654 	 */
655 	upl_offset_t assoc_upl_offset, assoc_upl_end;
656 
657 	/*                        ALIGNED UPL's                            */
658 	if ((upl_offset & PAGE_MASK) == (f_offset & PAGE_MASK)) {
659 		assoc_upl_offset = trunc_page_32(upl_offset);
660 		assoc_upl_end = round_page_32(upl_offset + size);
661 		goto do_commit;
662 	}
663 
664 	/*
665 	 *                    HANDLE UNALIGNED UPLS
666 	 *
667 	 *  ( See also cluster_io where the associated upl is created )
668 	 *  While we create the upl in one go, we will be dumping the pages in
669 	 *  the upl in "transaction sized chunks" relative to the upl. Except
670 	 *  for the first transction, the upl_offset will always be page aligned.
671 	 *  and when the upl's are not aligned the associated upl offset will not
672 	 *  be page aligned and so we have to truncate and round up the starting
673 	 *  and the end of the pages in question and see if they are shared with
674 	 *  other transctions or not. If two transctions "share" a page in the
675 	 *  associated upl, the first one to complete "marks" it and skips that
676 	 *  page and the second  one will include it in the "commit range"
677 	 *
678 	 *  As an example, consider the case where 4 transctions are needed (this
679 	 *  is the worst case).
680 	 *
681 	 *  Transaction for 0-1 (size -> PAGE_SIZE - upl_offset)
682 	 *
683 	 *  This covers the associated upl from a -> c. a->b is not shared but
684 	 *  b-c is shared with the next transction so the first one to complete
685 	 *  will only "mark" it.
686 	 *
687 	 *  Transaction for 1-2 (size -> PAGE_SIZE)
688 	 *
689 	 *  For transaction 1, assoc_upl_offset would be 0 (corresponding to the
690 	 *  file offset a or b depending on what file offset the upl_offset
691 	 *  corrssponds to ) and assoc_upl_end would correspond to the file
692 	 *  offset c.
693 	 *
694 	 *                 (associated_upl - based on f_offset alignment)
695 	 *       0         a    b    c    d    e     f
696 	 *       <----|----|----|----|----|----|-----|---->
697 	 *
698 	 *
699 	 *                  (upl - based on user buffer address alignment)
700 	 *                   <__--|----|----|--__>
701 	 *
702 	 *                   0    1    2    3
703 	 *
704 	 */
705 	upl_size_t assoc_upl_size = upl_get_size(associated_upl);
706 #if 0
707 	/* knock off the simple case first -> this transaction covers the entire UPL */
708 	upl_offset_t upl_end = round_page_32(upl_offset + size);
709 	upl_size_t upl_size = vector_upl_get_size(upl);
710 
711 	if ((trunc_page_32(upl_offset) == 0) && (upl_end == upl_size)) {
712 		assoc_upl_offset = 0;
713 		assoc_upl_end = assoc_upl_size;
714 		goto do_commit;
715 	}
716 #endif
717 	off_t assoc_upl_start_f_offset = upl_adjusted_offset(associated_upl, PAGE_MASK);
718 
719 	assoc_upl_offset = (upl_offset_t)trunc_page_64(f_offset - assoc_upl_start_f_offset);
720 	assoc_upl_end = round_page_64(f_offset + size) - assoc_upl_start_f_offset;
721 
722 	/*
723 	 * We can only sanity check the offset returned by upl_adjusted_offset
724 	 * for the first transaction for this UPL i.e. when (upl_offset < PAGE_SIZE)
725 	 */
726 	assertf((upl_offset >= PAGE_SIZE) || ((assoc_upl_start_f_offset == trunc_page_64(f_offset)) && (assoc_upl_offset == 0)),
727 	    "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld,  assoc_upl_offset = %d",
728 	    upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_offset);
729 
730 	assertf((upl_offset == assoc_upl_offset) || (upl_offset > assoc_upl_offset && ((upl_offset - assoc_upl_offset) <= PAGE_SIZE)) ||
731 	    (assoc_upl_offset > upl_offset && ((assoc_upl_offset - upl_offset) <= PAGE_SIZE)),
732 	    "abs(upl_offset - assoc_upl_offset) >  PAGE_SIZE : "
733 	    "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_offset = %d",
734 	    upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_offset);
735 
736 	assertf(assoc_upl_end <= assoc_upl_size,
737 	    "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_size = %d, assoc_upl_offset = %d, assoc_upl_end = %d",
738 	    upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_size, assoc_upl_offset, assoc_upl_end);
739 
740 	assertf((assoc_upl_size > PAGE_SIZE) || (assoc_upl_offset == 0 && assoc_upl_end == PAGE_SIZE),
741 	    "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_size = %d, assoc_upl_offset = %d, assoc_upl_end = %d",
742 	    upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_size, assoc_upl_offset, assoc_upl_end);
743 
744 	if (assoc_upl_size == PAGE_SIZE) {
745 		assoc_upl_offset = 0;
746 		assoc_upl_end = PAGE_SIZE;
747 		goto do_commit;
748 	}
749 
750 	/*
751 	 * We have to check if the first and last pages of the associated UPL
752 	 * range could potentially be shared with other transactions and if the
753 	 * "sharing transactions" are both done. The first one sets the mark bit
754 	 * and the second one checks it and if set it includes that page in the
755 	 * pages to be "freed".
756 	 */
757 	bool check_first_pg = (assoc_upl_offset != 0) || ((f_offset + size) < (assoc_upl_start_f_offset + PAGE_SIZE));
758 	bool check_last_pg = (assoc_upl_end != assoc_upl_size) || (f_offset > ((assoc_upl_start_f_offset + assoc_upl_size) - PAGE_SIZE));
759 
760 	if (check_first_pg || check_last_pg) {
761 		int first_pg = assoc_upl_offset >> PAGE_SHIFT;
762 		int last_pg = trunc_page_32(assoc_upl_end - 1) >> PAGE_SHIFT;
763 		upl_page_info_t *assoc_pl = UPL_GET_INTERNAL_PAGE_LIST(associated_upl);
764 
765 		lck_mtx_lock_spin(&iostate->io_mtxp);
766 		if (check_first_pg && !upl_page_get_mark(assoc_pl, first_pg)) {
767 			/*
768 			 * The first page isn't marked so let another transaction
769 			 * completion handle it.
770 			 */
771 			upl_page_set_mark(assoc_pl, first_pg, true);
772 			assoc_upl_offset += PAGE_SIZE;
773 		}
774 		if (check_last_pg && !upl_page_get_mark(assoc_pl, last_pg)) {
775 			/*
776 			 * The last page isn't marked so mark the page and let another
777 			 * transaction completion handle it.
778 			 */
779 			upl_page_set_mark(assoc_pl, last_pg, true);
780 			assoc_upl_end -= PAGE_SIZE;
781 		}
782 		lck_mtx_unlock(&iostate->io_mtxp);
783 	}
784 
785 	if (assoc_upl_end <= assoc_upl_offset) {
786 		return;
787 	}
788 
789 do_commit:
790 	size = assoc_upl_end - assoc_upl_offset;
791 
792 	boolean_t empty;
793 
794 	/*
795 	 * We can unlock these pages now and as this is for a
796 	 * direct/uncached write, we want to dump the pages too.
797 	 */
798 	kern_return_t kr = upl_abort_range(associated_upl, assoc_upl_offset, size,
799 	    UPL_ABORT_DUMP_PAGES, &empty);
800 
801 	assert(!kr);
802 
803 	if (!kr && empty) {
804 		upl_set_associated_upl(upl, NULL);
805 		upl_deallocate(associated_upl);
806 	}
807 }
808 
809 static void
cluster_iodone_verify_continue(void)810 cluster_iodone_verify_continue(void)
811 {
812 	lck_mtx_lock_spin(&cl_transaction_mtxp);
813 	for (;;) {
814 		struct verify_buf *vb = TAILQ_FIRST(&verify_work_head);
815 
816 		if (!vb) {
817 			assert_wait(&verify_work_head, (THREAD_UNINT));
818 			break;
819 		}
820 		buf_t cbp = vb->vb_cbp;
821 		void* callback_arg = vb->vb_callback_arg;
822 
823 		TAILQ_REMOVE(&verify_work_head, vb, vb_entry);
824 		vb->vb_cbp = NULL;
825 		vb->vb_callback_arg = NULL;
826 		vb->vb_whichq = 0;
827 		TAILQ_INSERT_TAIL(&verify_free_head, vb, vb_entry);
828 		lck_mtx_unlock(&cl_transaction_mtxp);
829 
830 		(void)cluster_iodone_finish(cbp, callback_arg);
831 		cbp = NULL;
832 		lck_mtx_lock_spin(&cl_transaction_mtxp);
833 	}
834 	lck_mtx_unlock(&cl_transaction_mtxp);
835 	thread_block((thread_continue_t)cluster_iodone_verify_continue);
836 	/* NOT REACHED */
837 }
838 
839 static void
cluster_verify_thread(void)840 cluster_verify_thread(void)
841 {
842 	thread_set_thread_name(current_thread(), "cluster_verify_thread");
843 #if !defined(__x86_64__)
844 	thread_group_join_io_storage();
845 #endif /* __x86_64__ */
846 	cluster_iodone_verify_continue();
847 	/* NOT REACHED */
848 }
849 
850 static bool
enqueue_buf_for_verify(buf_t cbp,void * callback_arg)851 enqueue_buf_for_verify(buf_t cbp, void *callback_arg)
852 {
853 	struct verify_buf *vb;
854 
855 	vb = TAILQ_FIRST(&verify_free_head);
856 	if (vb) {
857 		TAILQ_REMOVE(&verify_free_head, vb, vb_entry);
858 		vb->vb_cbp = cbp;
859 		vb->vb_callback_arg = callback_arg;
860 		vb->vb_whichq = 1;
861 		TAILQ_INSERT_TAIL(&verify_work_head, vb, vb_entry);
862 		return true;
863 	} else {
864 		return false;
865 	}
866 }
867 
868 static int
cluster_ioerror(upl_t upl,int upl_offset,int abort_size,int error,int io_flags,vnode_t vp)869 cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp)
870 {
871 	int upl_abort_code = 0;
872 	int page_in  = 0;
873 	int page_out = 0;
874 
875 	if ((io_flags & (B_PHYS | B_CACHE)) == (B_PHYS | B_CACHE)) {
876 		/*
877 		 * direct write of any flavor, or a direct read that wasn't aligned
878 		 */
879 		ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY);
880 	} else {
881 		if (io_flags & B_PAGEIO) {
882 			if (io_flags & B_READ) {
883 				page_in  = 1;
884 			} else {
885 				page_out = 1;
886 			}
887 		}
888 		if (io_flags & B_CACHE) {
889 			/*
890 			 * leave pages in the cache unchanged on error
891 			 */
892 			upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
893 		} else if (((io_flags & B_READ) == 0) && ((error != ENXIO) || vnode_isswap(vp))) {
894 			/*
895 			 * transient error on pageout/write path... leave pages unchanged
896 			 */
897 			upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
898 		} else if (page_in) {
899 			upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
900 		} else {
901 			upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
902 		}
903 
904 		ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code);
905 	}
906 	return upl_abort_code;
907 }
908 
909 
910 static int
cluster_iodone(buf_t bp,void * callback_arg)911 cluster_iodone(buf_t bp, void *callback_arg)
912 {
913 	buf_t   cbp;
914 	buf_t   cbp_head;
915 	int     error = 0;
916 	boolean_t       transaction_complete = FALSE;
917 	bool async;
918 
919 	__IGNORE_WCASTALIGN(cbp_head = (buf_t)(bp->b_trans_head));
920 
921 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START,
922 	    cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
923 
924 	async = cluster_verify_threads &&
925 	    (os_atomic_load(&cbp_head->b_attr.ba_flags, acquire) & BA_ASYNC_VERIFY);
926 
927 	assert(!async || cbp_head->b_attr.ba_verify_ctx);
928 
929 	if (cbp_head->b_trans_next || !(cbp_head->b_flags & B_EOT)) {
930 		lck_mtx_lock_spin(&cl_transaction_mtxp);
931 
932 		bp->b_flags |= B_TDONE;
933 
934 		for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
935 			/*
936 			 * all I/O requests that are part of this transaction
937 			 * have to complete before we can process it
938 			 */
939 			if (!(cbp->b_flags & B_TDONE)) {
940 				KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
941 				    cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
942 
943 				lck_mtx_unlock(&cl_transaction_mtxp);
944 
945 				return 0;
946 			}
947 
948 			if (cbp->b_trans_next == CLUSTER_IO_WAITING) {
949 				KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
950 				    cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
951 
952 				lck_mtx_unlock(&cl_transaction_mtxp);
953 				wakeup(cbp);
954 
955 				return 0;
956 			}
957 
958 			if (cbp->b_flags & B_EOT) {
959 				transaction_complete = TRUE;
960 
961 				if (async) {
962 					async = enqueue_buf_for_verify(cbp_head, callback_arg);
963 				}
964 			}
965 		}
966 		lck_mtx_unlock(&cl_transaction_mtxp);
967 
968 		if (transaction_complete == FALSE) {
969 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
970 			    cbp_head, 0, 0, 0, 0);
971 			return 0;
972 		}
973 	} else if (async) {
974 		lck_mtx_lock_spin(&cl_transaction_mtxp);
975 		async = enqueue_buf_for_verify(cbp_head, callback_arg);
976 		lck_mtx_unlock(&cl_transaction_mtxp);
977 	}
978 
979 	if (async) {
980 		wakeup(&verify_work_head);
981 	} else {
982 		error = cluster_iodone_finish(cbp_head, callback_arg);
983 	}
984 
985 	return error;
986 }
987 
988 static int
cluster_iodone_finish(buf_t cbp_head,void * callback_arg)989 cluster_iodone_finish(buf_t cbp_head, void *callback_arg)
990 {
991 	int     b_flags;
992 	int     error;
993 	int     total_size;
994 	int     total_resid;
995 	int     upl_offset;
996 	int     zero_offset;
997 	int     pg_offset = 0;
998 	int     commit_size = 0;
999 	int     upl_flags = 0;
1000 	int     transaction_size = 0;
1001 	upl_t   upl;
1002 	buf_t   cbp;
1003 	buf_t   cbp_next;
1004 	buf_t   real_bp;
1005 	vnode_t vp;
1006 	struct  clios *iostate;
1007 	void    *verify_ctx;
1008 
1009 	error       = 0;
1010 	total_size  = 0;
1011 	total_resid = 0;
1012 
1013 	cbp        = cbp_head;
1014 	vp         = cbp->b_vp;
1015 	upl_offset = cbp->b_uploffset;
1016 	upl        = cbp->b_upl;
1017 	b_flags    = cbp->b_flags;
1018 	real_bp    = cbp->b_real_bp;
1019 	zero_offset = cbp->b_validend;
1020 	iostate    = (struct clios *)cbp->b_iostate;
1021 
1022 	if (real_bp) {
1023 		real_bp->b_dev = cbp->b_dev;
1024 	}
1025 
1026 	while (cbp) {
1027 		if ((cbp->b_flags & B_ERROR) && error == 0) {
1028 			error = cbp->b_error;
1029 		}
1030 
1031 		total_resid += cbp->b_resid;
1032 		total_size  += cbp->b_bcount;
1033 
1034 		cbp_next = cbp->b_trans_next;
1035 
1036 		if (cbp_next == NULL) {
1037 			/*
1038 			 * compute the overall size of the transaction
1039 			 * in case we created one that has 'holes' in it
1040 			 * 'total_size' represents the amount of I/O we
1041 			 * did, not the span of the transaction w/r to the UPL
1042 			 */
1043 			transaction_size = cbp->b_uploffset + cbp->b_bcount - upl_offset;
1044 		}
1045 
1046 		cbp = cbp_next;
1047 	}
1048 
1049 	if (ISSET(b_flags, B_COMMIT_UPL)) {
1050 		cluster_handle_associated_upl(iostate,
1051 		    cbp_head->b_upl,
1052 		    upl_offset,
1053 		    transaction_size,
1054 		    cbp_head->b_clfoffset);
1055 	}
1056 
1057 	if (error == 0 && total_resid) {
1058 		error = EIO;
1059 	}
1060 
1061 	if (error == 0) {
1062 		int     (*cliodone_func)(buf_t, void *) = (int (*)(buf_t, void *))(cbp_head->b_cliodone);
1063 
1064 		if (cliodone_func != NULL) {
1065 			cbp_head->b_bcount = transaction_size;
1066 
1067 			error = (*cliodone_func)(cbp_head, callback_arg);
1068 		}
1069 	}
1070 	if (zero_offset) {
1071 		cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
1072 	}
1073 
1074 	verify_ctx = cbp_head->b_attr.ba_verify_ctx;
1075 	cbp_head->b_attr.ba_verify_ctx = NULL;
1076 	if (verify_ctx) {
1077 		vnode_verify_flags_t verify_flags = VNODE_VERIFY_CONTEXT_FREE;
1078 		caddr_t verify_buf = NULL;
1079 		off_t start_off = cbp_head->b_clfoffset;
1080 		size_t verify_length = transaction_size;
1081 		vm_offset_t vaddr;
1082 
1083 		if (!error) {
1084 			/*
1085 			 * Map it in.
1086 			 *
1087 			 * ubc_upl_map_range unfortunately cannot handle concurrent map
1088 			 * requests for the same UPL and returns failures when it can't
1089 			 * map. The map exclusive mechanism enforces mutual exclusion
1090 			 * for concurrent requests.
1091 			 */
1092 			os_atomic_inc(&verify_in_flight, relaxed);
1093 			upl_set_map_exclusive(upl);
1094 			error = ubc_upl_map_range(upl, upl_offset, round_page(transaction_size), VM_PROT_DEFAULT, &vaddr);
1095 			if (error) {
1096 				upl_clear_map_exclusive(upl);
1097 				printf("ubc_upl_map_range returned error %d upl = %p, upl_offset = %d, size = %d",
1098 				    error, upl, (int)upl_offset, (int)round_page(transaction_size));
1099 				error  = EIO;
1100 				if (os_atomic_dec_orig(&verify_in_flight, relaxed) == 0) {
1101 					panic("verify_in_flight underflow");
1102 				}
1103 			} else {
1104 				verify_buf = (caddr_t)vaddr;
1105 				verify_flags |= VNODE_VERIFY_WITH_CONTEXT;
1106 			}
1107 		}
1108 
1109 		int verify_error = VNOP_VERIFY(vp, start_off, (uint8_t *)verify_buf, verify_length, 0, &verify_ctx, verify_flags, NULL);
1110 		if (!error) {
1111 			error = verify_error;
1112 		}
1113 
1114 		if (verify_buf) {
1115 			(void)ubc_upl_unmap_range(upl, upl_offset, round_page(transaction_size));
1116 			upl_clear_map_exclusive(upl);
1117 			verify_buf = NULL;
1118 			if (os_atomic_dec_orig(&verify_in_flight, relaxed) == 0) {
1119 				panic("verify_in_flight underflow");
1120 			}
1121 		}
1122 	} else if (cbp_head->b_attr.ba_flags & BA_WILL_VERIFY) {
1123 		error = EBADMSG;
1124 	}
1125 
1126 	if (iostate) {
1127 		int need_wakeup = 0;
1128 
1129 		/*
1130 		 * someone has issued multiple I/Os asynchrounsly
1131 		 * and is waiting for them to complete (streaming)
1132 		 */
1133 		lck_mtx_lock_spin(&iostate->io_mtxp);
1134 
1135 		if (error && iostate->io_error == 0) {
1136 			iostate->io_error = error;
1137 		}
1138 
1139 		iostate->io_completed += total_size;
1140 
1141 		if (iostate->io_wanted) {
1142 			/*
1143 			 * someone is waiting for the state of
1144 			 * this io stream to change
1145 			 */
1146 			iostate->io_wanted = 0;
1147 			need_wakeup = 1;
1148 		}
1149 		lck_mtx_unlock(&iostate->io_mtxp);
1150 
1151 		if (need_wakeup) {
1152 			wakeup((caddr_t)&iostate->io_wanted);
1153 		}
1154 	}
1155 
1156 	if (b_flags & B_COMMIT_UPL) {
1157 		pg_offset   = upl_offset & PAGE_MASK;
1158 		commit_size = (pg_offset + transaction_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1159 
1160 		if (error) {
1161 			upl_set_iodone_error(upl, error);
1162 
1163 			upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags, vp);
1164 		} else {
1165 			upl_flags = UPL_COMMIT_FREE_ON_EMPTY;
1166 
1167 			if ((b_flags & B_PHYS) && (b_flags & B_READ)) {
1168 				upl_flags |= UPL_COMMIT_SET_DIRTY;
1169 			}
1170 
1171 			if (b_flags & B_AGE) {
1172 				upl_flags |= UPL_COMMIT_INACTIVATE;
1173 			}
1174 
1175 			ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags);
1176 		}
1177 	}
1178 
1179 	cbp = cbp_head->b_trans_next;
1180 	while (cbp) {
1181 		cbp_next = cbp->b_trans_next;
1182 
1183 		if (cbp != cbp_head) {
1184 			free_io_buf(cbp);
1185 		}
1186 
1187 		cbp = cbp_next;
1188 	}
1189 	free_io_buf(cbp_head);
1190 
1191 	if (real_bp) {
1192 		if (error) {
1193 			real_bp->b_flags |= B_ERROR;
1194 			real_bp->b_error = error;
1195 		}
1196 		real_bp->b_resid = total_resid;
1197 
1198 		buf_biodone(real_bp);
1199 	}
1200 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
1201 	    upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0);
1202 
1203 	return error;
1204 }
1205 
1206 
1207 uint32_t
cluster_throttle_io_limit(vnode_t vp,uint32_t * limit)1208 cluster_throttle_io_limit(vnode_t vp, uint32_t *limit)
1209 {
1210 	if (cluster_is_throttled(vp)) {
1211 		*limit = calculate_max_throttle_size(vp);
1212 		return 1;
1213 	}
1214 	return 0;
1215 }
1216 
1217 
1218 void
cluster_zero(upl_t upl,upl_offset_t upl_offset,int size,buf_t bp)1219 cluster_zero(upl_t upl, upl_offset_t upl_offset, int size, buf_t bp)
1220 {
1221 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START,
1222 	    upl_offset, size, bp, 0, 0);
1223 
1224 	if (bp == NULL || bp->b_datap == 0) {
1225 		upl_page_info_t *pl;
1226 		addr64_t        zero_addr;
1227 
1228 		pl = ubc_upl_pageinfo(upl);
1229 
1230 		if (upl_device_page(pl) == TRUE) {
1231 			zero_addr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + upl_offset;
1232 
1233 			bzero_phys_nc(zero_addr, size);
1234 		} else {
1235 			while (size) {
1236 				int     page_offset;
1237 				int     page_index;
1238 				int     zero_cnt;
1239 
1240 				page_index  = upl_offset / PAGE_SIZE;
1241 				page_offset = upl_offset & PAGE_MASK;
1242 
1243 				zero_addr = ((addr64_t)upl_phys_page(pl, page_index) << PAGE_SHIFT) + page_offset;
1244 				zero_cnt  = min(PAGE_SIZE - page_offset, size);
1245 
1246 				bzero_phys(zero_addr, zero_cnt);
1247 
1248 				size       -= zero_cnt;
1249 				upl_offset += zero_cnt;
1250 			}
1251 		}
1252 	} else {
1253 		bzero((caddr_t)((vm_offset_t)bp->b_datap + upl_offset), size);
1254 	}
1255 
1256 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END,
1257 	    upl_offset, size, 0, 0, 0);
1258 }
1259 
1260 
1261 static void
cluster_EOT(buf_t cbp_head,buf_t cbp_tail,int zero_offset,size_t verify_block_size)1262 cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset, size_t verify_block_size)
1263 {
1264 	/*
1265 	 * We will assign a verification context to cbp_head.
1266 	 * This will be passed back to the filesystem  when
1267 	 * verifying (in cluster_iodone).
1268 	 */
1269 	if (verify_block_size) {
1270 		off_t start_off = cbp_head->b_clfoffset;
1271 		size_t length;
1272 		void *verify_ctx = NULL;
1273 		int error = 0;
1274 		vnode_t vp = buf_vnode(cbp_head);
1275 
1276 		if (cbp_head == cbp_tail) {
1277 			length = cbp_head->b_bcount;
1278 		} else {
1279 			length = (cbp_tail->b_clfoffset + cbp_tail->b_bcount) - start_off;
1280 		}
1281 
1282 		/*
1283 		 * zero_offset is non zero for the transaction containing the EOF
1284 		 * (if the filesize is not page aligned). In that case we might
1285 		 * have the transaction size not be page/verify block size aligned
1286 		 */
1287 		if ((zero_offset == 0) &&
1288 		    ((length < verify_block_size) || (length % verify_block_size)) != 0) {
1289 			panic("%s length = %zu, verify_block_size = %zu",
1290 			    __FUNCTION__, length, verify_block_size);
1291 		}
1292 
1293 		error = VNOP_VERIFY(vp, start_off, NULL, length,
1294 		    &verify_block_size, &verify_ctx, VNODE_VERIFY_CONTEXT_ALLOC, NULL);
1295 
1296 		assert(!(error && verify_ctx));
1297 
1298 		if (verify_ctx) {
1299 			if (num_verify_threads && (os_atomic_load(&cluster_verify_threads, relaxed) == 0)) {
1300 				if (os_atomic_inc_orig(&cluster_verify_threads, relaxed) == 0) {
1301 					thread_t thread;
1302 					int i;
1303 
1304 					for (i = 0; i < num_verify_threads && i < MAX_VERIFY_THREADS; i++) {
1305 						kernel_thread_start((thread_continue_t)cluster_verify_thread, NULL, &thread);
1306 						thread_deallocate(thread);
1307 					}
1308 					os_atomic_store(&cluster_verify_threads, i, relaxed);
1309 				} else {
1310 					os_atomic_dec(&cluster_verify_threads, relaxed);
1311 				}
1312 			}
1313 			cbp_head->b_attr.ba_verify_ctx = verify_ctx;
1314 			/*
1315 			 * At least one thread is busy (at the time we
1316 			 * checked), so we can let it get queued for
1317 			 * async processing. It's fine if we occasionally get
1318 			 * this wrong.
1319 			 */
1320 			if (os_atomic_load(&verify_in_flight, relaxed)) {
1321 				/* This flag and the setting of ba_verify_ctx needs to be ordered */
1322 				os_atomic_or(&cbp_head->b_attr.ba_flags, BA_ASYNC_VERIFY, release);
1323 			}
1324 		}
1325 	} else {
1326 		cbp_head->b_attr.ba_verify_ctx = NULL;
1327 	}
1328 
1329 	cbp_head->b_validend = zero_offset;
1330 	cbp_tail->b_flags |= B_EOT;
1331 }
1332 
1333 static void
cluster_wait_IO(buf_t cbp_head,int async)1334 cluster_wait_IO(buf_t cbp_head, int async)
1335 {
1336 	buf_t   cbp;
1337 
1338 	if (async) {
1339 		/*
1340 		 * Async callback completion will not normally generate a
1341 		 * wakeup upon I/O completion.  To get woken up, we set
1342 		 * b_trans_next (which is safe for us to modify) on the last
1343 		 * buffer to CLUSTER_IO_WAITING so that cluster_iodone knows
1344 		 * to wake us up when all buffers as part of this transaction
1345 		 * are completed.  This is done under the umbrella of
1346 		 * cl_transaction_mtxp which is also taken in cluster_iodone.
1347 		 */
1348 		bool done = true;
1349 		buf_t last = NULL;
1350 
1351 		lck_mtx_lock_spin(&cl_transaction_mtxp);
1352 
1353 		for (cbp = cbp_head; cbp; last = cbp, cbp = cbp->b_trans_next) {
1354 			if (!ISSET(cbp->b_flags, B_TDONE)) {
1355 				done = false;
1356 			}
1357 		}
1358 
1359 		if (!done) {
1360 			last->b_trans_next = CLUSTER_IO_WAITING;
1361 
1362 			DTRACE_IO1(wait__start, buf_t, last);
1363 			do {
1364 				msleep(last, &cl_transaction_mtxp, PSPIN | (PRIBIO + 1), "cluster_wait_IO", NULL);
1365 
1366 				/*
1367 				 * We should only have been woken up if all the
1368 				 * buffers are completed, but just in case...
1369 				 */
1370 				done = true;
1371 				for (cbp = cbp_head; cbp != CLUSTER_IO_WAITING; cbp = cbp->b_trans_next) {
1372 					if (!ISSET(cbp->b_flags, B_TDONE)) {
1373 						done = false;
1374 						break;
1375 					}
1376 				}
1377 			} while (!done);
1378 			DTRACE_IO1(wait__done, buf_t, last);
1379 
1380 			last->b_trans_next = NULL;
1381 		}
1382 
1383 		lck_mtx_unlock(&cl_transaction_mtxp);
1384 	} else { // !async
1385 		for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
1386 			buf_biowait(cbp);
1387 		}
1388 	}
1389 }
1390 
1391 static void
cluster_complete_transaction(buf_t * cbp_head,void * callback_arg,int * retval,int flags,int needwait)1392 cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait)
1393 {
1394 	buf_t   cbp;
1395 	int     error;
1396 	boolean_t isswapout = FALSE;
1397 
1398 	/*
1399 	 * cluster_complete_transaction will
1400 	 * only be called if we've issued a complete chain in synchronous mode
1401 	 * or, we've already done a cluster_wait_IO on an incomplete chain
1402 	 */
1403 	if (needwait) {
1404 		for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) {
1405 			buf_biowait(cbp);
1406 		}
1407 	}
1408 	/*
1409 	 * we've already waited on all of the I/Os in this transaction,
1410 	 * so mark all of the buf_t's in this transaction as B_TDONE
1411 	 * so that cluster_iodone sees the transaction as completed
1412 	 */
1413 	for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) {
1414 		cbp->b_flags |= B_TDONE;
1415 		cbp->b_attr.ba_flags &= ~BA_ASYNC_VERIFY;
1416 	}
1417 	cbp = *cbp_head;
1418 
1419 	if ((flags & (CL_ASYNC | CL_PAGEOUT)) == CL_PAGEOUT && vnode_isswap(cbp->b_vp)) {
1420 		isswapout = TRUE;
1421 	}
1422 
1423 	error = cluster_iodone(cbp, callback_arg);
1424 
1425 	if (!(flags & CL_ASYNC) && error && *retval == 0) {
1426 		if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO)) {
1427 			*retval = error;
1428 		} else if (isswapout == TRUE) {
1429 			*retval = error;
1430 		}
1431 	}
1432 	*cbp_head = (buf_t)NULL;
1433 }
1434 
1435 
1436 static int
cluster_io(vnode_t vp,upl_t upl,vm_offset_t upl_offset,off_t f_offset,int non_rounded_size,int flags,buf_t real_bp,struct clios * iostate,int (* callback)(buf_t,void *),void * callback_arg)1437 cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
1438     int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
1439 {
1440 	buf_t   cbp;
1441 	u_int   size;
1442 	u_int   io_size;
1443 	int     io_flags;
1444 	int     bmap_flags;
1445 	int     error = 0;
1446 	int     retval = 0;
1447 	buf_t   cbp_head = NULL;
1448 	buf_t   cbp_tail = NULL;
1449 	int     trans_count = 0;
1450 	int     max_trans_count;
1451 	u_int   pg_count;
1452 	int     pg_offset;
1453 	u_int   max_iosize;
1454 	u_int   max_vectors;
1455 	int     priv;
1456 	int     zero_offset = 0;
1457 	int     async_throttle = 0;
1458 	mount_t mp;
1459 	size_t verify_block_size = 0;
1460 	vm_offset_t upl_end_offset;
1461 	boolean_t   need_EOT = FALSE;
1462 
1463 	/*
1464 	 * we currently don't support buffers larger than a page
1465 	 */
1466 	if (real_bp && non_rounded_size > PAGE_SIZE) {
1467 		panic("%s(): Called with real buffer of size %d bytes which "
1468 		    "is greater than the maximum allowed size of "
1469 		    "%d bytes (the system PAGE_SIZE).\n",
1470 		    __FUNCTION__, non_rounded_size, PAGE_SIZE);
1471 	}
1472 
1473 	mp = vp->v_mount;
1474 
1475 	/*
1476 	 * we don't want to do any funny rounding of the size for IO requests
1477 	 * coming through the DIRECT or CONTIGUOUS paths...  those pages don't
1478 	 * belong to us... we can't extend (nor do we need to) the I/O to fill
1479 	 * out a page
1480 	 */
1481 	if (mp->mnt_devblocksize > 1 && !(flags & (CL_DEV_MEMORY | CL_DIRECT_IO))) {
1482 		/*
1483 		 * round the requested size up so that this I/O ends on a
1484 		 * page boundary in case this is a 'write'... if the filesystem
1485 		 * has blocks allocated to back the page beyond the EOF, we want to
1486 		 * make sure to write out the zero's that are sitting beyond the EOF
1487 		 * so that in case the filesystem doesn't explicitly zero this area
1488 		 * if a hole is created via a lseek/write beyond the current EOF,
1489 		 * it will return zeros when it's read back from the disk.  If the
1490 		 * physical allocation doesn't extend for the whole page, we'll
1491 		 * only write/read from the disk up to the end of this allocation
1492 		 * via the extent info returned from the VNOP_BLOCKMAP call.
1493 		 */
1494 		pg_offset = upl_offset & PAGE_MASK;
1495 
1496 		size = (((non_rounded_size + pg_offset) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - pg_offset;
1497 	} else {
1498 		/*
1499 		 * anyone advertising a blocksize of 1 byte probably
1500 		 * can't deal with us rounding up the request size
1501 		 * AFP is one such filesystem/device
1502 		 */
1503 		size = non_rounded_size;
1504 	}
1505 	upl_end_offset = upl_offset + size;
1506 
1507 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, (int)f_offset, size, upl_offset, flags, 0);
1508 
1509 	/*
1510 	 * Set the maximum transaction size to the maximum desired number of
1511 	 * buffers.
1512 	 */
1513 	max_trans_count = 8;
1514 	if (flags & CL_DEV_MEMORY) {
1515 		max_trans_count = 16;
1516 	}
1517 
1518 	if (flags & CL_READ) {
1519 		io_flags = B_READ;
1520 		bmap_flags = VNODE_READ;
1521 
1522 		max_iosize  = mp->mnt_maxreadcnt;
1523 		max_vectors = mp->mnt_segreadcnt;
1524 
1525 		/* See if we can do cluster verification (pageins and aligned reads) */
1526 		if ((flags & CL_PAGEIN || cluster_verify_threads) &&
1527 		    !(mp->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1528 		    (VNOP_VERIFY(vp, f_offset, NULL, 0, &verify_block_size, NULL, VNODE_VERIFY_DEFAULT, NULL) == 0) &&
1529 		    verify_block_size) {
1530 			if (verify_block_size != PAGE_SIZE) {
1531 				verify_block_size = 0;
1532 			}
1533 			if (real_bp && verify_block_size) {
1534 				panic("%s(): Called with real buffer and needs verification ",
1535 				    __FUNCTION__);
1536 			}
1537 			/*
1538 			 * For reads, only allow cluster verification if f_offset
1539 			 * and upl_offset are both page aligned. If they are not
1540 			 * page aligned, leave it to the filesystem to do verification
1541 			 * Furthermore, the size also has to be aligned to page size.
1542 			 * Strictly speaking the alignments need to be for verify_block_size
1543 			 * but since the only verify_block_size that is currently supported
1544 			 * is page size, we check against page alignment.
1545 			 */
1546 			if (verify_block_size && !(flags & CL_PAGEIN) &&
1547 			    ((f_offset & PAGE_MASK) || (upl_offset & PAGE_MASK) || (non_rounded_size & PAGE_MASK))) {
1548 				verify_block_size = 0;
1549 			}
1550 		}
1551 	} else {
1552 		io_flags = B_WRITE;
1553 		bmap_flags = VNODE_WRITE;
1554 
1555 		max_iosize  = mp->mnt_maxwritecnt;
1556 		max_vectors = mp->mnt_segwritecnt;
1557 	}
1558 	if (verify_block_size) {
1559 		bmap_flags |= VNODE_CLUSTER_VERIFY;
1560 	}
1561 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_NONE, max_iosize, max_vectors, mp->mnt_devblocksize, 0, 0);
1562 
1563 	/*
1564 	 * make sure the maximum iosize is a
1565 	 * multiple of the page size
1566 	 */
1567 	max_iosize  &= ~PAGE_MASK;
1568 
1569 	/*
1570 	 * Ensure the maximum iosize is sensible.
1571 	 */
1572 	if (!max_iosize) {
1573 		max_iosize = PAGE_SIZE;
1574 	}
1575 
1576 	if (flags & CL_THROTTLE) {
1577 		if (!(flags & CL_PAGEOUT) && cluster_is_throttled(vp)) {
1578 			uint32_t max_throttle_size = calculate_max_throttle_size(vp);
1579 
1580 			if (max_iosize > max_throttle_size) {
1581 				max_iosize = max_throttle_size;
1582 			}
1583 			async_throttle = calculate_max_throttle_cnt(vp);
1584 		} else {
1585 			if ((flags & CL_DEV_MEMORY)) {
1586 				async_throttle = IO_SCALE(vp, VNODE_ASYNC_THROTTLE);
1587 			} else {
1588 				u_int max_cluster;
1589 				u_int max_cluster_size;
1590 				u_int scale;
1591 
1592 				if (vp->v_mount->mnt_minsaturationbytecount) {
1593 					max_cluster_size = vp->v_mount->mnt_minsaturationbytecount;
1594 
1595 					scale = 1;
1596 				} else {
1597 					max_cluster_size = MAX_CLUSTER_SIZE(vp);
1598 
1599 					if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
1600 						scale = WRITE_THROTTLE_SSD;
1601 					} else {
1602 						scale = WRITE_THROTTLE;
1603 					}
1604 				}
1605 				if (max_iosize > max_cluster_size) {
1606 					max_cluster = max_cluster_size;
1607 				} else {
1608 					max_cluster = max_iosize;
1609 				}
1610 
1611 				if (size < max_cluster) {
1612 					max_cluster = size;
1613 				}
1614 
1615 				if (flags & CL_CLOSE) {
1616 					scale += MAX_CLUSTERS;
1617 				}
1618 
1619 				async_throttle = min(IO_SCALE(vp, VNODE_ASYNC_THROTTLE), ((scale * max_cluster_size) / max_cluster) - 1);
1620 			}
1621 		}
1622 	}
1623 	if (flags & CL_AGE) {
1624 		io_flags |= B_AGE;
1625 	}
1626 	if (flags & (CL_PAGEIN | CL_PAGEOUT)) {
1627 		io_flags |= B_PAGEIO;
1628 	}
1629 	if (flags & (CL_IOSTREAMING)) {
1630 		io_flags |= B_IOSTREAMING;
1631 	}
1632 	if (flags & CL_COMMIT) {
1633 		io_flags |= B_COMMIT_UPL;
1634 	}
1635 	if (flags & CL_DIRECT_IO) {
1636 		io_flags |= B_PHYS;
1637 	}
1638 	if (flags & (CL_PRESERVE | CL_KEEPCACHED)) {
1639 		io_flags |= B_CACHE;
1640 	}
1641 	if (flags & CL_PASSIVE) {
1642 		io_flags |= B_PASSIVE;
1643 	}
1644 	if (flags & CL_ENCRYPTED) {
1645 		io_flags |= B_ENCRYPTED_IO;
1646 	}
1647 
1648 	if (vp->v_flag & VSYSTEM) {
1649 		io_flags |= B_META;
1650 	}
1651 
1652 	if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
1653 		/*
1654 		 * then we are going to end up
1655 		 * with a page that we can't complete (the file size wasn't a multiple
1656 		 * of PAGE_SIZE and we're trying to read to the end of the file
1657 		 * so we'll go ahead and zero out the portion of the page we can't
1658 		 * read in from the file
1659 		 */
1660 		zero_offset = (int)(upl_offset + non_rounded_size);
1661 	} else if (!ISSET(flags, CL_READ) && ISSET(flags, CL_DIRECT_IO)) {
1662 		assert(ISSET(flags, CL_COMMIT));
1663 
1664 		// For a direct/uncached write, we need to lock pages...
1665 		upl_t cached_upl = NULL;
1666 		upl_page_info_t *cached_pl;
1667 
1668 		assert(upl_offset < PAGE_SIZE);
1669 
1670 		/*
1671 		 *
1672 		 *                       f_offset = b
1673 		 *                      upl_offset = 8K
1674 		 *
1675 		 *                       (cached_upl - based on f_offset alignment)
1676 		 *       0         a    b              c
1677 		 *       <----|----|----|----|----|----|-----|---->
1678 		 *
1679 		 *
1680 		 *                          (upl - based on user buffer address alignment)
1681 		 *                   <__--|----|----|--__>
1682 		 *
1683 		 *                   0    1x   2x  3x
1684 		 *
1685 		 */
1686 		const off_t cached_upl_f_offset = trunc_page_64(f_offset);
1687 		const int cached_upl_size = round_page_32((f_offset - cached_upl_f_offset) + non_rounded_size);
1688 		int num_retries = 0;
1689 
1690 		/*
1691 		 * Create a UPL to lock the pages in the cache whilst the
1692 		 * write is in progress.
1693 		 */
1694 create_cached_upl:
1695 		ubc_create_upl_kernel(vp, cached_upl_f_offset, cached_upl_size, &cached_upl,
1696 		    &cached_pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
1697 
1698 		/*
1699 		 * If we are not overwriting the first and last pages completely
1700 		 * we need to write them out first if they are dirty. These pages
1701 		 * will be discarded after the write completes so we might lose
1702 		 * the writes for the parts that are not overwrrtten.
1703 		 */
1704 		bool first_page_needs_sync = false;
1705 		bool last_page_needs_sync = false;
1706 
1707 		if (cached_upl && (cached_upl_f_offset < f_offset) && upl_dirty_page(cached_pl, 0)) {
1708 			first_page_needs_sync = true;
1709 		}
1710 
1711 		if (cached_upl && (cached_upl_f_offset + cached_upl_size) > (f_offset + non_rounded_size)) {
1712 			int last_page = (cached_upl_size / PAGE_SIZE) - 1;
1713 
1714 			if ((last_page != 0 || !first_page_needs_sync) && upl_dirty_page(cached_pl, last_page)) {
1715 				last_page_needs_sync = true;
1716 			}
1717 		}
1718 
1719 		if (first_page_needs_sync || last_page_needs_sync) {
1720 			ubc_upl_abort_range(cached_upl, 0, cached_upl_size, UPL_ABORT_FREE_ON_EMPTY);
1721 			cached_upl = NULL;
1722 			cached_pl = NULL;
1723 			if (first_page_needs_sync) {
1724 				ubc_msync(vp, cached_upl_f_offset, cached_upl_f_offset + PAGE_SIZE, NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
1725 			}
1726 			if (last_page_needs_sync) {
1727 				off_t cached_upl_end_offset = cached_upl_f_offset + cached_upl_size;
1728 
1729 				ubc_msync(vp, cached_upl_end_offset - PAGE_SIZE, cached_upl_end_offset, NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
1730 			}
1731 			if (++num_retries < 16) {
1732 				goto create_cached_upl;
1733 			}
1734 			printf("%s : Number of retries for syncing first or last page reached %d\n", __FUNCTION__, num_retries);
1735 			assertf(num_retries < 16, "%s : Number of retries for syncing first or last page reached %d\n", __FUNCTION__, num_retries);
1736 		}
1737 
1738 		/*
1739 		 * Attach this UPL to the other UPL so that we can find it
1740 		 * later.
1741 		 */
1742 		upl_set_associated_upl(upl, cached_upl);
1743 		assertf(!cached_upl ||
1744 		    (upl_adjusted_offset(cached_upl, PAGE_MASK) == cached_upl_f_offset),
1745 		    "upl_adjusted_offset(cached_upl, PAGE_MASK) = %lld, cached_upl_f_offset = %lld",
1746 		    upl_adjusted_offset(cached_upl, PAGE_MASK), cached_upl_f_offset);
1747 	}
1748 
1749 	while (size) {
1750 		daddr64_t blkno;
1751 		daddr64_t lblkno;
1752 		size_t  io_size_tmp;
1753 		u_int   io_size_wanted;
1754 
1755 		if (size > max_iosize) {
1756 			io_size = max_iosize;
1757 		} else {
1758 			io_size = size;
1759 		}
1760 
1761 		io_size_wanted = io_size;
1762 		io_size_tmp = (size_t)io_size;
1763 
1764 		if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL))) {
1765 			break;
1766 		}
1767 
1768 		if (io_size_tmp > io_size_wanted) {
1769 			io_size = io_size_wanted;
1770 		} else {
1771 			io_size = (u_int)io_size_tmp;
1772 		}
1773 
1774 		if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno)) {
1775 			real_bp->b_blkno = blkno;
1776 		}
1777 
1778 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE,
1779 		    (int)f_offset, (int)(blkno >> 32), (int)blkno, io_size, 0);
1780 
1781 		if (io_size == 0) {
1782 			/*
1783 			 * vnop_blockmap didn't return an error... however, it did
1784 			 * return an extent size of 0 which means we can't
1785 			 * make forward progress on this I/O... a hole in the
1786 			 * file would be returned as a blkno of -1 with a non-zero io_size
1787 			 * a real extent is returned with a blkno != -1 and a non-zero io_size
1788 			 */
1789 			error = EINVAL;
1790 			break;
1791 		}
1792 		if (!(flags & CL_READ) && blkno == -1) {
1793 			off_t   e_offset;
1794 			int     pageout_flags;
1795 
1796 			if (upl_get_internal_vectorupl(upl)) {
1797 				panic("Vector UPLs should not take this code-path");
1798 			}
1799 			/*
1800 			 * we're writing into a 'hole'
1801 			 */
1802 			if (flags & CL_PAGEOUT) {
1803 				/*
1804 				 * if we got here via cluster_pageout
1805 				 * then just error the request and return
1806 				 * the 'hole' should already have been covered
1807 				 */
1808 				error = EINVAL;
1809 				break;
1810 			}
1811 			/*
1812 			 * we can get here if the cluster code happens to
1813 			 * pick up a page that was dirtied via mmap vs
1814 			 * a 'write' and the page targets a 'hole'...
1815 			 * i.e. the writes to the cluster were sparse
1816 			 * and the file was being written for the first time
1817 			 *
1818 			 * we can also get here if the filesystem supports
1819 			 * 'holes' that are less than PAGE_SIZE.... because
1820 			 * we can't know if the range in the page that covers
1821 			 * the 'hole' has been dirtied via an mmap or not,
1822 			 * we have to assume the worst and try to push the
1823 			 * entire page to storage.
1824 			 *
1825 			 * Try paging out the page individually before
1826 			 * giving up entirely and dumping it (the pageout
1827 			 * path will insure that the zero extent accounting
1828 			 * has been taken care of before we get back into cluster_io)
1829 			 *
1830 			 * go direct to vnode_pageout so that we don't have to
1831 			 * unbusy the page from the UPL... we used to do this
1832 			 * so that we could call ubc_msync, but that results
1833 			 * in a potential deadlock if someone else races us to acquire
1834 			 * that page and wins and in addition needs one of the pages
1835 			 * we're continuing to hold in the UPL
1836 			 */
1837 			pageout_flags = UPL_MSYNC | UPL_VNODE_PAGER | UPL_NESTED_PAGEOUT;
1838 
1839 			if (!(flags & CL_ASYNC)) {
1840 				pageout_flags |= UPL_IOSYNC;
1841 			}
1842 			if (!(flags & CL_COMMIT)) {
1843 				pageout_flags |= UPL_NOCOMMIT;
1844 			}
1845 
1846 			if (cbp_head) {
1847 				buf_t prev_cbp;
1848 				uint32_t   bytes_in_last_page;
1849 
1850 				/*
1851 				 * first we have to wait for the the current outstanding I/Os
1852 				 * to complete... EOT hasn't been set yet on this transaction
1853 				 * so the pages won't be released
1854 				 */
1855 				cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1856 
1857 				bytes_in_last_page = cbp_head->b_uploffset & PAGE_MASK;
1858 				for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
1859 					bytes_in_last_page += cbp->b_bcount;
1860 				}
1861 				bytes_in_last_page &= PAGE_MASK;
1862 
1863 				while (bytes_in_last_page) {
1864 					/*
1865 					 * we've got a transcation that
1866 					 * includes the page we're about to push out through vnode_pageout...
1867 					 * find the bp's in the list which intersect this page and either
1868 					 * remove them entirely from the transaction (there could be multiple bp's), or
1869 					 * round it's iosize down to the page boundary (there can only be one)...
1870 					 *
1871 					 * find the last bp in the list and act on it
1872 					 */
1873 					for (prev_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next) {
1874 						prev_cbp = cbp;
1875 					}
1876 
1877 					if (bytes_in_last_page >= cbp->b_bcount) {
1878 						/*
1879 						 * this buf no longer has any I/O associated with it
1880 						 */
1881 						bytes_in_last_page -= cbp->b_bcount;
1882 						cbp->b_bcount = 0;
1883 
1884 						free_io_buf(cbp);
1885 
1886 						if (cbp == cbp_head) {
1887 							assert(bytes_in_last_page == 0);
1888 							/*
1889 							 * the buf we just freed was the only buf in
1890 							 * this transaction... so there's no I/O to do
1891 							 */
1892 							cbp_head = NULL;
1893 							cbp_tail = NULL;
1894 						} else {
1895 							/*
1896 							 * remove the buf we just freed from
1897 							 * the transaction list
1898 							 */
1899 							prev_cbp->b_trans_next = NULL;
1900 							cbp_tail = prev_cbp;
1901 						}
1902 					} else {
1903 						/*
1904 						 * this is the last bp that has I/O
1905 						 * intersecting the page of interest
1906 						 * only some of the I/O is in the intersection
1907 						 * so clip the size but keep it in the transaction list
1908 						 */
1909 						cbp->b_bcount -= bytes_in_last_page;
1910 						cbp_tail = cbp;
1911 						bytes_in_last_page = 0;
1912 					}
1913 				}
1914 				if (cbp_head) {
1915 					/*
1916 					 * there was more to the current transaction
1917 					 * than just the page we are pushing out via vnode_pageout...
1918 					 * mark it as finished and complete it... we've already
1919 					 * waited for the I/Os to complete above in the call to cluster_wait_IO
1920 					 */
1921 					cluster_EOT(cbp_head, cbp_tail, 0, 0);
1922 
1923 					cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1924 
1925 					trans_count = 0;
1926 				}
1927 			}
1928 			if (vnode_pageout(vp, upl, (upl_offset_t)trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
1929 				error = EINVAL;
1930 			}
1931 			e_offset = round_page_64(f_offset + 1);
1932 			io_size = (u_int)(e_offset - f_offset);
1933 
1934 			f_offset   += io_size;
1935 			upl_offset += io_size;
1936 
1937 			if (size >= io_size) {
1938 				size -= io_size;
1939 			} else {
1940 				size = 0;
1941 			}
1942 			/*
1943 			 * keep track of how much of the original request
1944 			 * that we've actually completed... non_rounded_size
1945 			 * may go negative due to us rounding the request
1946 			 * to a page size multiple (i.e.  size > non_rounded_size)
1947 			 */
1948 			non_rounded_size -= io_size;
1949 
1950 			if (non_rounded_size <= 0) {
1951 				/*
1952 				 * we've transferred all of the data in the original
1953 				 * request, but we were unable to complete the tail
1954 				 * of the last page because the file didn't have
1955 				 * an allocation to back that portion... this is ok.
1956 				 */
1957 				size = 0;
1958 			}
1959 			if (error) {
1960 				if (size == 0) {
1961 					flags &= ~CL_COMMIT;
1962 				}
1963 				break;
1964 			}
1965 			continue;
1966 		}
1967 
1968 		lblkno = (daddr64_t)(f_offset / CLUSTER_IO_BLOCK_SIZE);
1969 
1970 		/*
1971 		 * we have now figured out how much I/O we can do - this is in 'io_size'
1972 		 * pg_offset is the starting point in the first page for the I/O
1973 		 * pg_count is the number of full and partial pages that 'io_size' encompasses
1974 		 */
1975 		pg_offset = upl_offset & PAGE_MASK;
1976 
1977 		if (flags & CL_DEV_MEMORY) {
1978 			/*
1979 			 * treat physical requests as one 'giant' page
1980 			 */
1981 			pg_count = 1;
1982 		} else {
1983 			pg_count  = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
1984 		}
1985 
1986 		if ((flags & CL_READ) && blkno == -1) {
1987 			vm_offset_t  commit_offset;
1988 			int bytes_to_zero;
1989 			int complete_transaction_now = 0;
1990 
1991 			/*
1992 			 * if we're reading and blkno == -1, then we've got a
1993 			 * 'hole' in the file that we need to deal with by zeroing
1994 			 * out the affected area in the upl
1995 			 */
1996 			if (io_size >= (u_int)non_rounded_size) {
1997 				/*
1998 				 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
1999 				 * than 'zero_offset' will be non-zero
2000 				 * if the 'hole' returned by vnop_blockmap extends all the way to the eof
2001 				 * (indicated by the io_size finishing off the I/O request for this UPL)
2002 				 * than we're not going to issue an I/O for the
2003 				 * last page in this upl... we need to zero both the hole and the tail
2004 				 * of the page beyond the EOF, since the delayed zero-fill won't kick in
2005 				 */
2006 				bytes_to_zero = non_rounded_size;
2007 				if (!(flags & CL_NOZERO)) {
2008 					bytes_to_zero = (int)((((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset);
2009 				}
2010 
2011 				zero_offset = 0;
2012 			} else {
2013 				bytes_to_zero = io_size;
2014 			}
2015 
2016 			pg_count = 0;
2017 
2018 			cluster_zero(upl, (upl_offset_t)upl_offset, bytes_to_zero, real_bp);
2019 
2020 			if (cbp_head) {
2021 				int     pg_resid;
2022 
2023 				/*
2024 				 * if there is a current I/O chain pending
2025 				 * then the first page of the group we just zero'd
2026 				 * will be handled by the I/O completion if the zero
2027 				 * fill started in the middle of the page
2028 				 */
2029 				commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2030 
2031 				pg_resid = (int)(commit_offset - upl_offset);
2032 
2033 				if (bytes_to_zero >= pg_resid) {
2034 					/*
2035 					 * the last page of the current I/O
2036 					 * has been completed...
2037 					 * compute the number of fully zero'd
2038 					 * pages that are beyond it
2039 					 * plus the last page if its partial
2040 					 * and we have no more I/O to issue...
2041 					 * otherwise a partial page is left
2042 					 * to begin the next I/O
2043 					 */
2044 					if ((int)io_size >= non_rounded_size) {
2045 						pg_count = (bytes_to_zero - pg_resid + (PAGE_SIZE - 1)) / PAGE_SIZE;
2046 					} else {
2047 						pg_count = (bytes_to_zero - pg_resid) / PAGE_SIZE;
2048 					}
2049 
2050 					complete_transaction_now = 1;
2051 				}
2052 			} else {
2053 				/*
2054 				 * no pending I/O to deal with
2055 				 * so, commit all of the fully zero'd pages
2056 				 * plus the last page if its partial
2057 				 * and we have no more I/O to issue...
2058 				 * otherwise a partial page is left
2059 				 * to begin the next I/O
2060 				 */
2061 				if ((int)io_size >= non_rounded_size) {
2062 					pg_count = (pg_offset + bytes_to_zero + (PAGE_SIZE - 1)) / PAGE_SIZE;
2063 				} else {
2064 					pg_count = (pg_offset + bytes_to_zero) / PAGE_SIZE;
2065 				}
2066 
2067 				commit_offset = upl_offset & ~PAGE_MASK;
2068 			}
2069 
2070 			// Associated UPL is currently only used in the direct write path
2071 			assert(!upl_associated_upl(upl));
2072 
2073 			if ((flags & CL_COMMIT) && pg_count) {
2074 				ubc_upl_commit_range(upl, (upl_offset_t)commit_offset,
2075 				    pg_count * PAGE_SIZE,
2076 				    UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
2077 			}
2078 			upl_offset += io_size;
2079 			f_offset   += io_size;
2080 			size       -= io_size;
2081 
2082 			/*
2083 			 * keep track of how much of the original request
2084 			 * that we've actually completed... non_rounded_size
2085 			 * may go negative due to us rounding the request
2086 			 * to a page size multiple (i.e.  size > non_rounded_size)
2087 			 */
2088 			non_rounded_size -= io_size;
2089 
2090 			if (non_rounded_size <= 0) {
2091 				/*
2092 				 * we've transferred all of the data in the original
2093 				 * request, but we were unable to complete the tail
2094 				 * of the last page because the file didn't have
2095 				 * an allocation to back that portion... this is ok.
2096 				 */
2097 				size = 0;
2098 			}
2099 			if (cbp_head && (complete_transaction_now || size == 0)) {
2100 				cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
2101 
2102 				cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0, verify_block_size);
2103 
2104 				cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
2105 
2106 				trans_count = 0;
2107 			}
2108 			continue;
2109 		}
2110 		if (pg_count > max_vectors) {
2111 			if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
2112 				io_size = PAGE_SIZE - pg_offset;
2113 				pg_count = 1;
2114 			} else {
2115 				io_size -= (pg_count - max_vectors) * PAGE_SIZE;
2116 				pg_count = max_vectors;
2117 			}
2118 		}
2119 		/*
2120 		 * If the transaction is going to reach the maximum number of
2121 		 * desired elements, truncate the i/o to the nearest page so
2122 		 * that the actual i/o is initiated after this buffer is
2123 		 * created and added to the i/o chain.
2124 		 *
2125 		 * I/O directed to physically contiguous memory
2126 		 * doesn't have a requirement to make sure we 'fill' a page
2127 		 */
2128 		if (!(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count &&
2129 		    ((upl_offset + io_size) & PAGE_MASK)) {
2130 			vm_offset_t aligned_ofs;
2131 
2132 			aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
2133 			/*
2134 			 * If the io_size does not actually finish off even a
2135 			 * single page we have to keep adding buffers to the
2136 			 * transaction despite having reached the desired limit.
2137 			 *
2138 			 * Eventually we get here with the page being finished
2139 			 * off (and exceeded) and then we truncate the size of
2140 			 * this i/o request so that it is page aligned so that
2141 			 * we can finally issue the i/o on the transaction.
2142 			 */
2143 			if (aligned_ofs > upl_offset) {
2144 				io_size = (u_int)(aligned_ofs - upl_offset);
2145 				pg_count--;
2146 			}
2147 		}
2148 
2149 		if (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV)) {
2150 			/*
2151 			 * if we're not targeting a virtual device i.e. a disk image
2152 			 * it's safe to dip into the reserve pool since real devices
2153 			 * can complete this I/O request without requiring additional
2154 			 * bufs from the alloc_io_buf pool
2155 			 */
2156 			priv = 1;
2157 		} else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT) && !cbp_head) {
2158 			/*
2159 			 * Throttle the speculative IO
2160 			 *
2161 			 * We can only throttle this if it is the first iobuf
2162 			 * for the transaction. alloc_io_buf implements
2163 			 * additional restrictions for diskimages anyway.
2164 			 */
2165 			priv = 0;
2166 		} else {
2167 			priv = 1;
2168 		}
2169 
2170 		cbp = alloc_io_buf(vp, priv);
2171 
2172 		if (flags & CL_PAGEOUT) {
2173 			u_int i;
2174 
2175 			/*
2176 			 * since blocks are in offsets of CLUSTER_IO_BLOCK_SIZE, scale
2177 			 * iteration to (PAGE_SIZE * pg_count) of blks.
2178 			 */
2179 			for (i = 0; i < (PAGE_SIZE * pg_count) / CLUSTER_IO_BLOCK_SIZE; i++) {
2180 				if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY) {
2181 					panic("BUSY bp found in cluster_io");
2182 				}
2183 			}
2184 		}
2185 		if (flags & CL_ASYNC) {
2186 			if (buf_setcallback(cbp, (void *)cluster_iodone, callback_arg)) {
2187 				panic("buf_setcallback failed");
2188 			}
2189 		}
2190 		cbp->b_cliodone = (void *)callback;
2191 		cbp->b_flags |= io_flags;
2192 		if (flags & CL_NOCACHE) {
2193 			cbp->b_attr.ba_flags |= BA_NOCACHE;
2194 		}
2195 		if (verify_block_size) {
2196 			cbp->b_attr.ba_flags |= BA_WILL_VERIFY;
2197 		}
2198 
2199 		cbp->b_lblkno = lblkno;
2200 		cbp->b_clfoffset = f_offset;
2201 		cbp->b_blkno  = blkno;
2202 		cbp->b_bcount = io_size;
2203 
2204 		if (buf_setupl(cbp, upl, (uint32_t)upl_offset)) {
2205 			panic("buf_setupl failed");
2206 		}
2207 #if CONFIG_IOSCHED
2208 		upl_set_blkno(upl, upl_offset, io_size, blkno);
2209 #endif
2210 		cbp->b_trans_next = (buf_t)NULL;
2211 
2212 		if ((cbp->b_iostate = (void *)iostate)) {
2213 			/*
2214 			 * caller wants to track the state of this
2215 			 * io... bump the amount issued against this stream
2216 			 */
2217 			iostate->io_issued += io_size;
2218 		}
2219 
2220 		if (flags & CL_READ) {
2221 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE,
2222 			    (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
2223 		} else {
2224 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE,
2225 			    (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
2226 		}
2227 
2228 		if (cbp_head) {
2229 			cbp_tail->b_trans_next = cbp;
2230 			cbp_tail = cbp;
2231 		} else {
2232 			cbp_head = cbp;
2233 			cbp_tail = cbp;
2234 
2235 			if ((cbp_head->b_real_bp = real_bp)) {
2236 				real_bp = (buf_t)NULL;
2237 			}
2238 		}
2239 		*(buf_t *)(&cbp->b_trans_head) = cbp_head;
2240 
2241 		trans_count++;
2242 
2243 		upl_offset += io_size;
2244 		f_offset   += io_size;
2245 		size       -= io_size;
2246 		/*
2247 		 * keep track of how much of the original request
2248 		 * that we've actually completed... non_rounded_size
2249 		 * may go negative due to us rounding the request
2250 		 * to a page size multiple (i.e.  size > non_rounded_size)
2251 		 */
2252 		non_rounded_size -= io_size;
2253 
2254 		if (non_rounded_size <= 0) {
2255 			/*
2256 			 * we've transferred all of the data in the original
2257 			 * request, but we were unable to complete the tail
2258 			 * of the last page because the file didn't have
2259 			 * an allocation to back that portion... this is ok.
2260 			 */
2261 			size = 0;
2262 		}
2263 		if (size == 0) {
2264 			/*
2265 			 * we have no more I/O to issue, so go
2266 			 * finish the final transaction
2267 			 */
2268 			need_EOT = TRUE;
2269 		} else if (((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) &&
2270 		    ((flags & CL_ASYNC) || trans_count > max_trans_count)) {
2271 			/*
2272 			 * I/O directed to physically contiguous memory...
2273 			 * which doesn't have a requirement to make sure we 'fill' a page
2274 			 * or...
2275 			 * the current I/O we've prepared fully
2276 			 * completes the last page in this request
2277 			 * and ...
2278 			 * it's either an ASYNC request or
2279 			 * we've already accumulated more than 8 I/O's into
2280 			 * this transaction so mark it as complete so that
2281 			 * it can finish asynchronously or via the cluster_complete_transaction
2282 			 * below if the request is synchronous
2283 			 */
2284 			need_EOT = TRUE;
2285 		}
2286 		if (need_EOT == TRUE) {
2287 			cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0, verify_block_size);
2288 		}
2289 
2290 		if (flags & CL_THROTTLE) {
2291 			(void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io");
2292 		}
2293 
2294 		if (!(io_flags & B_READ)) {
2295 			vnode_startwrite(vp);
2296 		}
2297 
2298 		if (flags & CL_RAW_ENCRYPTED) {
2299 			/*
2300 			 * User requested raw encrypted bytes.
2301 			 * Twiddle the bit in the ba_flags for the buffer
2302 			 */
2303 			cbp->b_attr.ba_flags |= BA_RAW_ENCRYPTED_IO;
2304 		}
2305 
2306 		(void) VNOP_STRATEGY(cbp);
2307 
2308 		if (need_EOT == TRUE) {
2309 			if (!(flags & CL_ASYNC)) {
2310 				cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1);
2311 			}
2312 
2313 			need_EOT = FALSE;
2314 			trans_count = 0;
2315 			cbp_head = NULL;
2316 		}
2317 	}
2318 	if (error) {
2319 		int abort_size;
2320 
2321 		io_size = 0;
2322 
2323 		if (cbp_head) {
2324 			/*
2325 			 * Wait until all of the outstanding I/O
2326 			 * for this partial transaction has completed
2327 			 */
2328 			cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
2329 
2330 			/*
2331 			 * Rewind the upl offset to the beginning of the
2332 			 * transaction.
2333 			 */
2334 			upl_offset = cbp_head->b_uploffset;
2335 		}
2336 
2337 		if (ISSET(flags, CL_COMMIT)) {
2338 			cluster_handle_associated_upl(iostate, upl,
2339 			    (upl_offset_t)upl_offset,
2340 			    (upl_size_t)(upl_end_offset - upl_offset),
2341 			    cbp_head ? cbp_head->b_clfoffset : f_offset);
2342 		}
2343 
2344 		// Free all the IO buffers in this transaction
2345 		for (cbp = cbp_head; cbp;) {
2346 			buf_t   cbp_next;
2347 
2348 			size       += cbp->b_bcount;
2349 			io_size    += cbp->b_bcount;
2350 
2351 			cbp_next = cbp->b_trans_next;
2352 			free_io_buf(cbp);
2353 			cbp = cbp_next;
2354 		}
2355 
2356 		if (iostate) {
2357 			int need_wakeup = 0;
2358 
2359 			/*
2360 			 * update the error condition for this stream
2361 			 * since we never really issued the io
2362 			 * just go ahead and adjust it back
2363 			 */
2364 			lck_mtx_lock_spin(&iostate->io_mtxp);
2365 
2366 			if (iostate->io_error == 0) {
2367 				iostate->io_error = error;
2368 			}
2369 			iostate->io_issued -= io_size;
2370 
2371 			if (iostate->io_wanted) {
2372 				/*
2373 				 * someone is waiting for the state of
2374 				 * this io stream to change
2375 				 */
2376 				iostate->io_wanted = 0;
2377 				need_wakeup = 1;
2378 			}
2379 			lck_mtx_unlock(&iostate->io_mtxp);
2380 
2381 			if (need_wakeup) {
2382 				wakeup((caddr_t)&iostate->io_wanted);
2383 			}
2384 		}
2385 
2386 		if (flags & CL_COMMIT) {
2387 			int     upl_flags;
2388 
2389 			pg_offset  = upl_offset & PAGE_MASK;
2390 			abort_size = (int)((upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK);
2391 
2392 			upl_flags = cluster_ioerror(upl, (int)(upl_offset - pg_offset),
2393 			    abort_size, error, io_flags, vp);
2394 
2395 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
2396 			    upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
2397 		}
2398 		if (retval == 0) {
2399 			retval = error;
2400 		}
2401 	} else if (cbp_head) {
2402 		panic("%s(): cbp_head is not NULL.", __FUNCTION__);
2403 	}
2404 
2405 	if (real_bp) {
2406 		/*
2407 		 * can get here if we either encountered an error
2408 		 * or we completely zero-filled the request and
2409 		 * no I/O was issued
2410 		 */
2411 		if (error) {
2412 			real_bp->b_flags |= B_ERROR;
2413 			real_bp->b_error = error;
2414 		}
2415 		buf_biodone(real_bp);
2416 	}
2417 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, (int)f_offset, size, upl_offset, retval, 0);
2418 
2419 	return retval;
2420 }
2421 
2422 #define reset_vector_run_state()                                                                                \
2423 	issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0;
2424 
2425 static int
vector_cluster_io(vnode_t vp,upl_t vector_upl,vm_offset_t vector_upl_offset,off_t v_upl_uio_offset,int vector_upl_iosize,int io_flag,buf_t real_bp,struct clios * iostate,int (* callback)(buf_t,void *),void * callback_arg)2426 vector_cluster_io(vnode_t vp, upl_t vector_upl, vm_offset_t vector_upl_offset, off_t v_upl_uio_offset, int vector_upl_iosize,
2427     int io_flag, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
2428 {
2429 	vector_upl_set_pagelist(vector_upl);
2430 
2431 	if (io_flag & CL_READ) {
2432 		if (vector_upl_offset == 0 && ((vector_upl_iosize & PAGE_MASK) == 0)) {
2433 			io_flag &= ~CL_PRESERVE; /*don't zero fill*/
2434 		} else {
2435 			io_flag |= CL_PRESERVE; /*zero fill*/
2436 		}
2437 	}
2438 	return cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg);
2439 }
2440 
2441 static int
cluster_read_prefetch(vnode_t vp,off_t f_offset,u_int size,off_t filesize,int (* callback)(buf_t,void *),void * callback_arg,int bflag)2442 cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
2443 {
2444 	int           pages_in_prefetch;
2445 
2446 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
2447 	    (int)f_offset, size, (int)filesize, 0, 0);
2448 
2449 	if (f_offset >= filesize) {
2450 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
2451 		    (int)f_offset, 0, 0, 0, 0);
2452 		return 0;
2453 	}
2454 	if ((off_t)size > (filesize - f_offset)) {
2455 		size = (u_int)(filesize - f_offset);
2456 	}
2457 	pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
2458 
2459 	advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag);
2460 
2461 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
2462 	    (int)f_offset + size, pages_in_prefetch, 0, 1, 0);
2463 
2464 	return pages_in_prefetch;
2465 }
2466 
2467 
2468 
2469 static void
cluster_read_ahead(vnode_t vp,struct cl_extent * extent,off_t filesize,struct cl_readahead * rap,int (* callback)(buf_t,void *),void * callback_arg,int bflag)2470 cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg,
2471     int bflag)
2472 {
2473 	daddr64_t       r_addr;
2474 	off_t           f_offset;
2475 	int             size_of_prefetch;
2476 	u_int           max_prefetch;
2477 
2478 
2479 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
2480 	    (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0);
2481 
2482 	if (extent->b_addr == rap->cl_lastr && extent->b_addr == extent->e_addr) {
2483 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2484 		    rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0);
2485 		return;
2486 	}
2487 	if (rap->cl_lastr == -1 || (extent->b_addr != rap->cl_lastr && extent->b_addr != (rap->cl_lastr + 1))) {
2488 		rap->cl_ralen = 0;
2489 		rap->cl_maxra = 0;
2490 
2491 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2492 		    rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0);
2493 
2494 		return;
2495 	}
2496 
2497 	max_prefetch = cluster_max_prefetch(vp,
2498 	    cluster_max_io_size(vp->v_mount, CL_READ), speculative_prefetch_max);
2499 
2500 	if (max_prefetch <= PAGE_SIZE) {
2501 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2502 		    rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 6, 0);
2503 		return;
2504 	}
2505 	if (extent->e_addr < rap->cl_maxra && rap->cl_ralen >= 4) {
2506 		if ((rap->cl_maxra - extent->e_addr) > (rap->cl_ralen / 4)) {
2507 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2508 			    rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0);
2509 			return;
2510 		}
2511 	}
2512 	r_addr = MAX(extent->e_addr, rap->cl_maxra) + 1;
2513 	f_offset = (off_t)(r_addr * PAGE_SIZE_64);
2514 
2515 	size_of_prefetch = 0;
2516 
2517 	ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
2518 
2519 	if (size_of_prefetch) {
2520 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2521 		    rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0);
2522 		return;
2523 	}
2524 	if (f_offset < filesize) {
2525 		daddr64_t read_size;
2526 
2527 		rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1;
2528 
2529 		read_size = (extent->e_addr + 1) - extent->b_addr;
2530 
2531 		if (read_size > rap->cl_ralen) {
2532 			if (read_size > max_prefetch / PAGE_SIZE) {
2533 				rap->cl_ralen = max_prefetch / PAGE_SIZE;
2534 			} else {
2535 				rap->cl_ralen = (int)read_size;
2536 			}
2537 		}
2538 		size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag);
2539 
2540 		if (size_of_prefetch) {
2541 			rap->cl_maxra = (r_addr + size_of_prefetch) - 1;
2542 		}
2543 	}
2544 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2545 	    rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0);
2546 }
2547 
2548 
2549 int
cluster_pageout(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags)2550 cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2551     int size, off_t filesize, int flags)
2552 {
2553 	return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
2554 }
2555 
2556 
2557 int
cluster_pageout_ext(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)2558 cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2559     int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2560 {
2561 	int           io_size;
2562 	int           rounded_size;
2563 	off_t         max_size;
2564 	int           local_flags;
2565 
2566 	local_flags = CL_PAGEOUT | CL_THROTTLE;
2567 
2568 	if ((flags & UPL_IOSYNC) == 0) {
2569 		local_flags |= CL_ASYNC;
2570 	}
2571 	if ((flags & UPL_NOCOMMIT) == 0) {
2572 		local_flags |= CL_COMMIT;
2573 	}
2574 	if ((flags & UPL_KEEPCACHED)) {
2575 		local_flags |= CL_KEEPCACHED;
2576 	}
2577 	if (flags & UPL_PAGING_ENCRYPTED) {
2578 		local_flags |= CL_ENCRYPTED;
2579 	}
2580 
2581 
2582 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
2583 	    (int)f_offset, size, (int)filesize, local_flags, 0);
2584 
2585 	/*
2586 	 * If they didn't specify any I/O, then we are done...
2587 	 * we can't issue an abort because we don't know how
2588 	 * big the upl really is
2589 	 */
2590 	if (size <= 0) {
2591 		return EINVAL;
2592 	}
2593 
2594 	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
2595 		if (local_flags & CL_COMMIT) {
2596 			ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
2597 		}
2598 		return EROFS;
2599 	}
2600 	/*
2601 	 * can't page-in from a negative offset
2602 	 * or if we're starting beyond the EOF
2603 	 * or if the file offset isn't page aligned
2604 	 * or the size requested isn't a multiple of PAGE_SIZE
2605 	 */
2606 	if (f_offset < 0 || f_offset >= filesize ||
2607 	    (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) {
2608 		if (local_flags & CL_COMMIT) {
2609 			ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
2610 		}
2611 		return EINVAL;
2612 	}
2613 	max_size = filesize - f_offset;
2614 
2615 	if (size < max_size) {
2616 		io_size = size;
2617 	} else {
2618 		io_size = (int)max_size;
2619 	}
2620 
2621 	rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2622 
2623 	if (size > rounded_size) {
2624 		if (local_flags & CL_COMMIT) {
2625 			ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
2626 			    UPL_ABORT_FREE_ON_EMPTY);
2627 		}
2628 	}
2629 	return cluster_io(vp, upl, upl_offset, f_offset, io_size,
2630 	           local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2631 }
2632 
2633 
2634 int
cluster_pagein(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags)2635 cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2636     int size, off_t filesize, int flags)
2637 {
2638 	return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
2639 }
2640 
2641 
2642 int
cluster_pagein_ext(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)2643 cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2644     int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2645 {
2646 	u_int         io_size;
2647 	int           rounded_size;
2648 	off_t         max_size;
2649 	int           retval;
2650 	int           local_flags = 0;
2651 
2652 	if (upl == NULL || size < 0) {
2653 		panic("cluster_pagein: NULL upl passed in");
2654 	}
2655 
2656 	if ((flags & UPL_IOSYNC) == 0) {
2657 		local_flags |= CL_ASYNC;
2658 	}
2659 	if ((flags & UPL_NOCOMMIT) == 0) {
2660 		local_flags |= CL_COMMIT;
2661 	}
2662 	if (flags & UPL_IOSTREAMING) {
2663 		local_flags |= CL_IOSTREAMING;
2664 	}
2665 	if (flags & UPL_PAGING_ENCRYPTED) {
2666 		local_flags |= CL_ENCRYPTED;
2667 	}
2668 
2669 
2670 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
2671 	    (int)f_offset, size, (int)filesize, local_flags, 0);
2672 
2673 	/*
2674 	 * can't page-in from a negative offset
2675 	 * or if we're starting beyond the EOF
2676 	 * or if the file offset isn't page aligned
2677 	 * or the size requested isn't a multiple of PAGE_SIZE
2678 	 */
2679 	if (f_offset < 0 || f_offset >= filesize ||
2680 	    (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) {
2681 		if (local_flags & CL_COMMIT) {
2682 			ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2683 		}
2684 
2685 		if (f_offset >= filesize) {
2686 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CLUSTER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CL_PGIN_PAST_EOF), 0 /* arg */);
2687 		}
2688 
2689 		return EINVAL;
2690 	}
2691 	max_size = filesize - f_offset;
2692 
2693 	if (size < max_size) {
2694 		io_size = size;
2695 	} else {
2696 		io_size = (int)max_size;
2697 	}
2698 
2699 	rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2700 
2701 	if (size > rounded_size && (local_flags & CL_COMMIT)) {
2702 		ubc_upl_abort_range(upl, upl_offset + rounded_size,
2703 		    size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2704 	}
2705 
2706 	retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
2707 	    local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2708 
2709 	return retval;
2710 }
2711 
2712 
2713 int
cluster_bp(buf_t bp)2714 cluster_bp(buf_t bp)
2715 {
2716 	return cluster_bp_ext(bp, NULL, NULL);
2717 }
2718 
2719 
2720 int
cluster_bp_ext(buf_t bp,int (* callback)(buf_t,void *),void * callback_arg)2721 cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg)
2722 {
2723 	off_t  f_offset;
2724 	int    flags;
2725 
2726 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
2727 	    bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
2728 
2729 	if (bp->b_flags & B_READ) {
2730 		flags = CL_ASYNC | CL_READ;
2731 	} else {
2732 		flags = CL_ASYNC;
2733 	}
2734 	if (bp->b_flags & B_PASSIVE) {
2735 		flags |= CL_PASSIVE;
2736 	}
2737 
2738 	f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
2739 
2740 	return cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg);
2741 }
2742 
2743 
2744 
2745 int
cluster_write(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int xflags)2746 cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags)
2747 {
2748 	return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL);
2749 }
2750 
2751 
2752 int
cluster_write_ext(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int xflags,int (* callback)(buf_t,void *),void * callback_arg)2753 cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff,
2754     int xflags, int (*callback)(buf_t, void *), void *callback_arg)
2755 {
2756 	user_ssize_t    cur_resid;
2757 	int             retval = 0;
2758 	int             flags;
2759 	int             zflags;
2760 	int             bflag;
2761 	int             write_type = IO_COPY;
2762 	u_int32_t       write_length;
2763 	uint32_t        min_direct_size = MIN_DIRECT_WRITE_SIZE;
2764 
2765 	flags = xflags;
2766 
2767 	if (flags & IO_PASSIVE) {
2768 		bflag = CL_PASSIVE;
2769 	} else {
2770 		bflag = 0;
2771 	}
2772 
2773 	if (vp->v_flag & VNOCACHE_DATA) {
2774 		flags |= IO_NOCACHE;
2775 		bflag |= CL_NOCACHE;
2776 	}
2777 	if (uio == NULL) {
2778 		/*
2779 		 * no user data...
2780 		 * this call is being made to zero-fill some range in the file
2781 		 */
2782 		retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
2783 
2784 		return retval;
2785 	}
2786 	/*
2787 	 * do a write through the cache if one of the following is true....
2788 	 *   NOCACHE is not true or NODIRECT is true
2789 	 *   the uio request doesn't target USERSPACE
2790 	 * otherwise, find out if we want the direct or contig variant for
2791 	 * the first vector in the uio request
2792 	 */
2793 	if (((flags & (IO_NOCACHE | IO_NODIRECT)) == IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
2794 		if (flags & IO_NOCACHE_SWRITE) {
2795 			uint32_t fs_bsize = vp->v_mount->mnt_vfsstat.f_bsize;
2796 
2797 			if (fs_bsize && (fs_bsize < MIN_DIRECT_WRITE_SIZE) &&
2798 			    ((fs_bsize & (fs_bsize - 1)) == 0)) {
2799 				min_direct_size = fs_bsize;
2800 			}
2801 		}
2802 		retval = cluster_io_type(uio, &write_type, &write_length, min_direct_size);
2803 	}
2804 
2805 	if ((flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT) {
2806 		/*
2807 		 * must go through the cached variant in this case
2808 		 */
2809 		write_type = IO_COPY;
2810 	}
2811 
2812 	while ((cur_resid = uio_resid(uio)) && uio->uio_offset < newEOF && retval == 0) {
2813 		switch (write_type) {
2814 		case IO_COPY:
2815 			/*
2816 			 * make sure the uio_resid isn't too big...
2817 			 * internally, we want to handle all of the I/O in
2818 			 * chunk sizes that fit in a 32 bit int
2819 			 */
2820 			if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
2821 				/*
2822 				 * we're going to have to call cluster_write_copy
2823 				 * more than once...
2824 				 *
2825 				 * only want the last call to cluster_write_copy to
2826 				 * have the IO_TAILZEROFILL flag set and only the
2827 				 * first call should have IO_HEADZEROFILL
2828 				 */
2829 				zflags = flags & ~IO_TAILZEROFILL;
2830 				flags &= ~IO_HEADZEROFILL;
2831 
2832 				write_length = MAX_IO_REQUEST_SIZE;
2833 			} else {
2834 				/*
2835 				 * last call to cluster_write_copy
2836 				 */
2837 				zflags = flags;
2838 
2839 				write_length = (u_int32_t)cur_resid;
2840 			}
2841 			retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg);
2842 			break;
2843 
2844 		case IO_CONTIG:
2845 			zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL);
2846 
2847 			if (flags & IO_HEADZEROFILL) {
2848 				/*
2849 				 * only do this once per request
2850 				 */
2851 				flags &= ~IO_HEADZEROFILL;
2852 
2853 				retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset,
2854 				    headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
2855 				if (retval) {
2856 					break;
2857 				}
2858 			}
2859 			retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag);
2860 
2861 			if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) {
2862 				/*
2863 				 * we're done with the data from the user specified buffer(s)
2864 				 * and we've been requested to zero fill at the tail
2865 				 * treat this as an IO_HEADZEROFILL which doesn't require a uio
2866 				 * by rearranging the args and passing in IO_HEADZEROFILL
2867 				 */
2868 
2869 				/*
2870 				 * Update the oldEOF to reflect the current EOF. If the UPL page
2871 				 * to zero-fill is not valid (when F_NOCACHE is set), the
2872 				 * cluster_write_copy() will perform RMW on the UPL page when
2873 				 * the oldEOF is not aligned on page boundary due to unaligned
2874 				 * write.
2875 				 */
2876 				if (uio->uio_offset > oldEOF) {
2877 					oldEOF = uio->uio_offset;
2878 				}
2879 				retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)oldEOF, tailOff, uio->uio_offset,
2880 				    (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
2881 			}
2882 			break;
2883 
2884 		case IO_DIRECT:
2885 			/*
2886 			 * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
2887 			 */
2888 			retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg, min_direct_size);
2889 			break;
2890 
2891 		case IO_UNKNOWN:
2892 			retval = cluster_io_type(uio, &write_type, &write_length, min_direct_size);
2893 			break;
2894 		}
2895 		/*
2896 		 * in case we end up calling cluster_write_copy (from cluster_write_direct)
2897 		 * multiple times to service a multi-vector request that is not aligned properly
2898 		 * we need to update the oldEOF so that we
2899 		 * don't zero-fill the head of a page if we've successfully written
2900 		 * data to that area... 'cluster_write_copy' will zero-fill the head of a
2901 		 * page that is beyond the oldEOF if the write is unaligned... we only
2902 		 * want that to happen for the very first page of the cluster_write,
2903 		 * NOT the first page of each vector making up a multi-vector write.
2904 		 */
2905 		if (uio->uio_offset > oldEOF) {
2906 			oldEOF = uio->uio_offset;
2907 		}
2908 	}
2909 	return retval;
2910 }
2911 
2912 
2913 static int
cluster_write_direct(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,int * write_type,u_int32_t * write_length,int flags,int (* callback)(buf_t,void *),void * callback_arg,uint32_t min_io_size)2914 cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
2915     int flags, int (*callback)(buf_t, void *), void *callback_arg, uint32_t min_io_size)
2916 {
2917 	upl_t            upl = NULL;
2918 	upl_page_info_t  *pl;
2919 	vm_offset_t      upl_offset;
2920 	vm_offset_t      vector_upl_offset = 0;
2921 	u_int32_t        io_req_size;
2922 	u_int32_t        offset_in_file;
2923 	u_int32_t        offset_in_iovbase;
2924 	u_int32_t        io_size;
2925 	int              io_flag = 0;
2926 	upl_size_t       upl_size = 0, vector_upl_size = 0;
2927 	vm_size_t        upl_needed_size;
2928 	mach_msg_type_number_t  pages_in_pl = 0;
2929 	upl_control_flags_t upl_flags;
2930 	kern_return_t    kret = KERN_SUCCESS;
2931 	mach_msg_type_number_t  i = 0;
2932 	int              force_data_sync;
2933 	int              retval = 0;
2934 	int              first_IO = 1;
2935 	struct clios     iostate;
2936 	user_addr_t      iov_base;
2937 	u_int32_t        mem_alignment_mask;
2938 	u_int32_t        devblocksize;
2939 	u_int32_t        max_io_size;
2940 	u_int32_t        max_upl_size;
2941 	u_int32_t        max_vector_size;
2942 	u_int32_t        bytes_outstanding_limit;
2943 	boolean_t        io_throttled = FALSE;
2944 
2945 	u_int32_t        vector_upl_iosize = 0;
2946 	int              issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1);
2947 	off_t            v_upl_uio_offset = 0;
2948 	int              vector_upl_index = 0;
2949 	upl_t            vector_upl = NULL;
2950 
2951 	uint32_t         io_align_mask;
2952 
2953 	/*
2954 	 * When we enter this routine, we know
2955 	 *  -- the resid will not exceed iov_len
2956 	 */
2957 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
2958 	    (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
2959 
2960 	assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT);
2961 
2962 	max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
2963 
2964 	io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO;
2965 
2966 	if (flags & IO_PASSIVE) {
2967 		io_flag |= CL_PASSIVE;
2968 	}
2969 
2970 	if (flags & IO_NOCACHE) {
2971 		io_flag |= CL_NOCACHE;
2972 	}
2973 
2974 	if (flags & IO_SKIP_ENCRYPTION) {
2975 		io_flag |= CL_ENCRYPTED;
2976 	}
2977 
2978 	iostate.io_completed = 0;
2979 	iostate.io_issued = 0;
2980 	iostate.io_error = 0;
2981 	iostate.io_wanted = 0;
2982 
2983 	lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
2984 
2985 	mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
2986 	devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2987 
2988 	if (devblocksize == 1) {
2989 		/*
2990 		 * the AFP client advertises a devblocksize of 1
2991 		 * however, its BLOCKMAP routine maps to physical
2992 		 * blocks that are PAGE_SIZE in size...
2993 		 * therefore we can't ask for I/Os that aren't page aligned
2994 		 * or aren't multiples of PAGE_SIZE in size
2995 		 * by setting devblocksize to PAGE_SIZE, we re-instate
2996 		 * the old behavior we had before the mem_alignment_mask
2997 		 * changes went in...
2998 		 */
2999 		devblocksize = PAGE_SIZE;
3000 	}
3001 
3002 	io_align_mask = PAGE_MASK;
3003 	if (min_io_size < MIN_DIRECT_WRITE_SIZE) {
3004 		/* The process has opted into fs blocksize direct io writes */
3005 		assert((min_io_size & (min_io_size - 1)) == 0);
3006 		io_align_mask = min_io_size - 1;
3007 		io_flag |= CL_DIRECT_IO_FSBLKSZ;
3008 	}
3009 
3010 next_dwrite:
3011 	io_req_size = *write_length;
3012 	iov_base = uio_curriovbase(uio);
3013 
3014 	offset_in_file = (u_int32_t)(uio->uio_offset & io_align_mask);
3015 	offset_in_iovbase = (u_int32_t)(iov_base & mem_alignment_mask);
3016 
3017 	if (offset_in_file || offset_in_iovbase) {
3018 		/*
3019 		 * one of the 2 important offsets is misaligned
3020 		 * so fire an I/O through the cache for this entire vector
3021 		 */
3022 		goto wait_for_dwrites;
3023 	}
3024 	if (iov_base & (devblocksize - 1)) {
3025 		/*
3026 		 * the offset in memory must be on a device block boundary
3027 		 * so that we can guarantee that we can generate an
3028 		 * I/O that ends on a page boundary in cluster_io
3029 		 */
3030 		goto wait_for_dwrites;
3031 	}
3032 
3033 	task_update_logical_writes(current_task(), (io_req_size & ~PAGE_MASK), TASK_WRITE_IMMEDIATE, vp);
3034 	while ((io_req_size >= PAGE_SIZE || io_req_size >= min_io_size) && uio->uio_offset < newEOF && retval == 0) {
3035 		int     throttle_type;
3036 
3037 		if ((throttle_type = cluster_is_throttled(vp))) {
3038 			uint32_t max_throttle_size = calculate_max_throttle_size(vp);
3039 
3040 			/*
3041 			 * we're in the throttle window, at the very least
3042 			 * we want to limit the size of the I/O we're about
3043 			 * to issue
3044 			 */
3045 			if ((flags & IO_RETURN_ON_THROTTLE) && throttle_type == THROTTLE_NOW) {
3046 				/*
3047 				 * we're in the throttle window and at least 1 I/O
3048 				 * has already been issued by a throttleable thread
3049 				 * in this window, so return with EAGAIN to indicate
3050 				 * to the FS issuing the cluster_write call that it
3051 				 * should now throttle after dropping any locks
3052 				 */
3053 				throttle_info_update_by_mount(vp->v_mount);
3054 
3055 				io_throttled = TRUE;
3056 				goto wait_for_dwrites;
3057 			}
3058 			max_vector_size = max_throttle_size;
3059 			max_io_size = max_throttle_size;
3060 		} else {
3061 			max_vector_size = MAX_VECTOR_UPL_SIZE;
3062 			max_io_size = max_upl_size;
3063 		}
3064 
3065 		if (first_IO) {
3066 			cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
3067 			first_IO = 0;
3068 		}
3069 		io_size  = io_req_size & ~io_align_mask;
3070 		iov_base = uio_curriovbase(uio);
3071 
3072 		if (io_size > max_io_size) {
3073 			io_size = max_io_size;
3074 		}
3075 
3076 		if (useVectorUPL && (iov_base & PAGE_MASK)) {
3077 			/*
3078 			 * We have an iov_base that's not page-aligned.
3079 			 * Issue all I/O's that have been collected within
3080 			 * this Vectored UPL.
3081 			 */
3082 			if (vector_upl_index) {
3083 				retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3084 				reset_vector_run_state();
3085 			}
3086 
3087 			/*
3088 			 * After this point, if we are using the Vector UPL path and the base is
3089 			 * not page-aligned then the UPL with that base will be the first in the vector UPL.
3090 			 */
3091 		}
3092 
3093 		upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
3094 		upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
3095 
3096 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
3097 		    (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
3098 
3099 		vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
3100 		for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
3101 			pages_in_pl = 0;
3102 			upl_size = (upl_size_t)upl_needed_size;
3103 			upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
3104 			    UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
3105 
3106 			kret = vm_map_get_upl(map,
3107 			    (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
3108 			    &upl_size,
3109 			    &upl,
3110 			    NULL,
3111 			    &pages_in_pl,
3112 			    &upl_flags,
3113 			    VM_KERN_MEMORY_FILE,
3114 			    force_data_sync);
3115 
3116 			if (kret != KERN_SUCCESS) {
3117 				KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3118 				    0, 0, 0, kret, 0);
3119 				/*
3120 				 * failed to get pagelist
3121 				 *
3122 				 * we may have already spun some portion of this request
3123 				 * off as async requests... we need to wait for the I/O
3124 				 * to complete before returning
3125 				 */
3126 				goto wait_for_dwrites;
3127 			}
3128 			pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
3129 			pages_in_pl = upl_size / PAGE_SIZE;
3130 
3131 			for (i = 0; i < pages_in_pl; i++) {
3132 				if (!upl_valid_page(pl, i)) {
3133 					break;
3134 				}
3135 			}
3136 			if (i == pages_in_pl) {
3137 				break;
3138 			}
3139 
3140 			/*
3141 			 * didn't get all the pages back that we
3142 			 * needed... release this upl and try again
3143 			 */
3144 			ubc_upl_abort(upl, 0);
3145 		}
3146 		if (force_data_sync >= 3) {
3147 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3148 			    i, pages_in_pl, upl_size, kret, 0);
3149 			/*
3150 			 * for some reason, we couldn't acquire a hold on all
3151 			 * the pages needed in the user's address space
3152 			 *
3153 			 * we may have already spun some portion of this request
3154 			 * off as async requests... we need to wait for the I/O
3155 			 * to complete before returning
3156 			 */
3157 			goto wait_for_dwrites;
3158 		}
3159 
3160 		/*
3161 		 * Consider the possibility that upl_size wasn't satisfied.
3162 		 */
3163 		if (upl_size < upl_needed_size) {
3164 			if (upl_size && upl_offset == 0) {
3165 				io_size = upl_size;
3166 			} else {
3167 				io_size = 0;
3168 			}
3169 		}
3170 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3171 		    (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
3172 
3173 		if (io_size == 0) {
3174 			ubc_upl_abort(upl, 0);
3175 			/*
3176 			 * we may have already spun some portion of this request
3177 			 * off as async requests... we need to wait for the I/O
3178 			 * to complete before returning
3179 			 */
3180 			goto wait_for_dwrites;
3181 		}
3182 
3183 		if (useVectorUPL) {
3184 			vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
3185 			if (end_off) {
3186 				issueVectorUPL = 1;
3187 			}
3188 			/*
3189 			 * After this point, if we are using a vector UPL, then
3190 			 * either all the UPL elements end on a page boundary OR
3191 			 * this UPL is the last element because it does not end
3192 			 * on a page boundary.
3193 			 */
3194 		}
3195 
3196 		/*
3197 		 * we want push out these writes asynchronously so that we can overlap
3198 		 * the preparation of the next I/O
3199 		 * if there are already too many outstanding writes
3200 		 * wait until some complete before issuing the next
3201 		 */
3202 		if (vp->v_mount->mnt_minsaturationbytecount) {
3203 			bytes_outstanding_limit = vp->v_mount->mnt_minsaturationbytecount;
3204 		} else {
3205 			if (__improbable(os_mul_overflow(max_upl_size, IO_SCALE(vp, 2),
3206 			    &bytes_outstanding_limit) ||
3207 			    (bytes_outstanding_limit > overlapping_write_max))) {
3208 				bytes_outstanding_limit = overlapping_write_max;
3209 			}
3210 		}
3211 
3212 		cluster_iostate_wait(&iostate, bytes_outstanding_limit, "cluster_write_direct");
3213 
3214 		if (iostate.io_error) {
3215 			/*
3216 			 * one of the earlier writes we issued ran into a hard error
3217 			 * don't issue any more writes, cleanup the UPL
3218 			 * that was just created but not used, then
3219 			 * go wait for all writes that are part of this stream
3220 			 * to complete before returning the error to the caller
3221 			 */
3222 			ubc_upl_abort(upl, 0);
3223 
3224 			goto wait_for_dwrites;
3225 		}
3226 
3227 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
3228 		    (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
3229 
3230 		if (!useVectorUPL) {
3231 			retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
3232 			    io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3233 		} else {
3234 			if (!vector_upl_index) {
3235 				vector_upl = vector_upl_create(upl_offset, uio->uio_iovcnt);
3236 				v_upl_uio_offset = uio->uio_offset;
3237 				vector_upl_offset = upl_offset;
3238 			}
3239 
3240 			vector_upl_set_subupl(vector_upl, upl, upl_size);
3241 			vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
3242 			vector_upl_index++;
3243 			vector_upl_iosize += io_size;
3244 			vector_upl_size += upl_size;
3245 
3246 			if (issueVectorUPL || vector_upl_index == vector_upl_max_upls(vector_upl) || vector_upl_size >= max_vector_size) {
3247 				retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3248 				reset_vector_run_state();
3249 			}
3250 		}
3251 
3252 		/*
3253 		 * update the uio structure to
3254 		 * reflect the I/O that we just issued
3255 		 */
3256 		uio_update(uio, (user_size_t)io_size);
3257 
3258 		/*
3259 		 * in case we end up calling through to cluster_write_copy to finish
3260 		 * the tail of this request, we need to update the oldEOF so that we
3261 		 * don't zero-fill the head of a page if we've successfully written
3262 		 * data to that area... 'cluster_write_copy' will zero-fill the head of a
3263 		 * page that is beyond the oldEOF if the write is unaligned... we only
3264 		 * want that to happen for the very first page of the cluster_write,
3265 		 * NOT the first page of each vector making up a multi-vector write.
3266 		 */
3267 		if (uio->uio_offset > oldEOF) {
3268 			oldEOF = uio->uio_offset;
3269 		}
3270 
3271 		io_req_size -= io_size;
3272 
3273 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
3274 		    (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0);
3275 	} /* end while */
3276 
3277 	if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) {
3278 		retval = cluster_io_type(uio, write_type, write_length, min_io_size);
3279 
3280 		if (retval == 0 && *write_type == IO_DIRECT) {
3281 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE,
3282 			    (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
3283 
3284 			goto next_dwrite;
3285 		}
3286 	}
3287 
3288 wait_for_dwrites:
3289 
3290 	if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
3291 		retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3292 		reset_vector_run_state();
3293 	}
3294 	/*
3295 	 * make sure all async writes issued as part of this stream
3296 	 * have completed before we return
3297 	 */
3298 	cluster_iostate_wait(&iostate, 0, "cluster_write_direct");
3299 
3300 	if (iostate.io_error) {
3301 		retval = iostate.io_error;
3302 	}
3303 
3304 	lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
3305 
3306 	if (io_throttled == TRUE && retval == 0) {
3307 		retval = EAGAIN;
3308 	}
3309 
3310 	if (io_req_size && retval == 0) {
3311 		/*
3312 		 * we couldn't handle the tail of this request in DIRECT mode
3313 		 * so fire it through the copy path
3314 		 *
3315 		 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
3316 		 * so we can just pass 0 in for the headOff and tailOff
3317 		 */
3318 		if (uio->uio_offset > oldEOF) {
3319 			oldEOF = uio->uio_offset;
3320 		}
3321 
3322 		retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
3323 
3324 		*write_type = IO_UNKNOWN;
3325 	}
3326 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
3327 	    (int)uio->uio_offset, io_req_size, retval, 4, 0);
3328 
3329 	return retval;
3330 }
3331 
3332 
3333 static int
cluster_write_contig(vnode_t vp,struct uio * uio,off_t newEOF,int * write_type,u_int32_t * write_length,int (* callback)(buf_t,void *),void * callback_arg,int bflag)3334 cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length,
3335     int (*callback)(buf_t, void *), void *callback_arg, int bflag)
3336 {
3337 	upl_page_info_t *pl;
3338 	addr64_t         src_paddr = 0;
3339 	upl_t            upl[MAX_VECTS];
3340 	vm_offset_t      upl_offset;
3341 	u_int32_t        tail_size = 0;
3342 	u_int32_t        io_size;
3343 	u_int32_t        xsize;
3344 	upl_size_t       upl_size;
3345 	vm_size_t        upl_needed_size;
3346 	mach_msg_type_number_t  pages_in_pl;
3347 	upl_control_flags_t upl_flags;
3348 	kern_return_t    kret;
3349 	struct clios     iostate;
3350 	int              error  = 0;
3351 	int              cur_upl = 0;
3352 	int              num_upl = 0;
3353 	int              n;
3354 	user_addr_t      iov_base;
3355 	u_int32_t        devblocksize;
3356 	u_int32_t        mem_alignment_mask;
3357 
3358 	/*
3359 	 * When we enter this routine, we know
3360 	 *  -- the io_req_size will not exceed iov_len
3361 	 *  -- the target address is physically contiguous
3362 	 */
3363 	cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
3364 
3365 	devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
3366 	mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
3367 
3368 	iostate.io_completed = 0;
3369 	iostate.io_issued = 0;
3370 	iostate.io_error = 0;
3371 	iostate.io_wanted = 0;
3372 
3373 	lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
3374 
3375 next_cwrite:
3376 	io_size = *write_length;
3377 
3378 	iov_base = uio_curriovbase(uio);
3379 
3380 	upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
3381 	upl_needed_size = upl_offset + io_size;
3382 
3383 	pages_in_pl = 0;
3384 	upl_size = (upl_size_t)upl_needed_size;
3385 	upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
3386 	    UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
3387 
3388 	vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
3389 	kret = vm_map_get_upl(map,
3390 	    vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
3391 	    &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0);
3392 
3393 	if (kret != KERN_SUCCESS) {
3394 		/*
3395 		 * failed to get pagelist
3396 		 */
3397 		error = EINVAL;
3398 		goto wait_for_cwrites;
3399 	}
3400 	num_upl++;
3401 
3402 	/*
3403 	 * Consider the possibility that upl_size wasn't satisfied.
3404 	 */
3405 	if (upl_size < upl_needed_size) {
3406 		/*
3407 		 * This is a failure in the physical memory case.
3408 		 */
3409 		error = EINVAL;
3410 		goto wait_for_cwrites;
3411 	}
3412 	pl = ubc_upl_pageinfo(upl[cur_upl]);
3413 
3414 	src_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
3415 
3416 	while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
3417 		u_int32_t   head_size;
3418 
3419 		head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
3420 
3421 		if (head_size > io_size) {
3422 			head_size = io_size;
3423 		}
3424 
3425 		error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg);
3426 
3427 		if (error) {
3428 			goto wait_for_cwrites;
3429 		}
3430 
3431 		upl_offset += head_size;
3432 		src_paddr  += head_size;
3433 		io_size    -= head_size;
3434 
3435 		iov_base   += head_size;
3436 	}
3437 	if ((u_int32_t)iov_base & mem_alignment_mask) {
3438 		/*
3439 		 * request doesn't set up on a memory boundary
3440 		 * the underlying DMA engine can handle...
3441 		 * return an error instead of going through
3442 		 * the slow copy path since the intent of this
3443 		 * path is direct I/O from device memory
3444 		 */
3445 		error = EINVAL;
3446 		goto wait_for_cwrites;
3447 	}
3448 
3449 	tail_size = io_size & (devblocksize - 1);
3450 	io_size  -= tail_size;
3451 
3452 	while (io_size && error == 0) {
3453 		if (io_size > MAX_IO_CONTIG_SIZE) {
3454 			xsize = MAX_IO_CONTIG_SIZE;
3455 		} else {
3456 			xsize = io_size;
3457 		}
3458 		/*
3459 		 * request asynchronously so that we can overlap
3460 		 * the preparation of the next I/O... we'll do
3461 		 * the commit after all the I/O has completed
3462 		 * since its all issued against the same UPL
3463 		 * if there are already too many outstanding writes
3464 		 * wait until some have completed before issuing the next
3465 		 */
3466 		cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_write_contig");
3467 
3468 		if (iostate.io_error) {
3469 			/*
3470 			 * one of the earlier writes we issued ran into a hard error
3471 			 * don't issue any more writes...
3472 			 * go wait for all writes that are part of this stream
3473 			 * to complete before returning the error to the caller
3474 			 */
3475 			goto wait_for_cwrites;
3476 		}
3477 		/*
3478 		 * issue an asynchronous write to cluster_io
3479 		 */
3480 		error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
3481 		    xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg);
3482 
3483 		if (error == 0) {
3484 			/*
3485 			 * The cluster_io write completed successfully,
3486 			 * update the uio structure
3487 			 */
3488 			uio_update(uio, (user_size_t)xsize);
3489 
3490 			upl_offset += xsize;
3491 			src_paddr  += xsize;
3492 			io_size    -= xsize;
3493 		}
3494 	}
3495 	if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) {
3496 		error = cluster_io_type(uio, write_type, write_length, 0);
3497 
3498 		if (error == 0 && *write_type == IO_CONTIG) {
3499 			cur_upl++;
3500 			goto next_cwrite;
3501 		}
3502 	} else {
3503 		*write_type = IO_UNKNOWN;
3504 	}
3505 
3506 wait_for_cwrites:
3507 	/*
3508 	 * make sure all async writes that are part of this stream
3509 	 * have completed before we proceed
3510 	 */
3511 	cluster_iostate_wait(&iostate, 0, "cluster_write_contig");
3512 
3513 	if (iostate.io_error) {
3514 		error = iostate.io_error;
3515 	}
3516 
3517 	lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
3518 
3519 	if (error == 0 && tail_size) {
3520 		error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg);
3521 	}
3522 
3523 	for (n = 0; n < num_upl; n++) {
3524 		/*
3525 		 * just release our hold on each physically contiguous
3526 		 * region without changing any state
3527 		 */
3528 		ubc_upl_abort(upl[n], 0);
3529 	}
3530 
3531 	return error;
3532 }
3533 
3534 
3535 /*
3536  * need to avoid a race between an msync of a range of pages dirtied via mmap
3537  * vs a filesystem such as HFS deciding to write a 'hole' to disk via cluster_write's
3538  * zerofill mechanism before it has seen the VNOP_PAGEOUTs for the pages being msync'd
3539  *
3540  * we should never force-zero-fill pages that are already valid in the cache...
3541  * the entire page contains valid data (either from disk, zero-filled or dirtied
3542  * via an mmap) so we can only do damage by trying to zero-fill
3543  *
3544  */
3545 static int
cluster_zero_range(upl_t upl,upl_page_info_t * pl,int flags,int io_offset,off_t zero_off,off_t upl_f_offset,int bytes_to_zero)3546 cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off_t zero_off, off_t upl_f_offset, int bytes_to_zero)
3547 {
3548 	int zero_pg_index;
3549 	boolean_t need_cluster_zero = TRUE;
3550 
3551 	if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
3552 		bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64));
3553 		zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
3554 
3555 		if (upl_valid_page(pl, zero_pg_index)) {
3556 			/*
3557 			 * never force zero valid pages - dirty or clean
3558 			 * we'll leave these in the UPL for cluster_write_copy to deal with
3559 			 */
3560 			need_cluster_zero = FALSE;
3561 		}
3562 	}
3563 	if (need_cluster_zero == TRUE) {
3564 		cluster_zero(upl, io_offset, bytes_to_zero, NULL);
3565 	}
3566 
3567 	return bytes_to_zero;
3568 }
3569 
3570 
3571 void
cluster_update_state(vnode_t vp,vm_object_offset_t s_offset,vm_object_offset_t e_offset,boolean_t vm_initiated)3572 cluster_update_state(vnode_t vp, vm_object_offset_t s_offset, vm_object_offset_t e_offset, boolean_t vm_initiated)
3573 {
3574 	struct cl_extent cl;
3575 	boolean_t first_pass = TRUE;
3576 
3577 	assert(s_offset < e_offset);
3578 	assert((s_offset & PAGE_MASK_64) == 0);
3579 	assert((e_offset & PAGE_MASK_64) == 0);
3580 
3581 	cl.b_addr = (daddr64_t)(s_offset / PAGE_SIZE_64);
3582 	cl.e_addr = (daddr64_t)(e_offset / PAGE_SIZE_64);
3583 
3584 	cluster_update_state_internal(vp, &cl, 0, TRUE, &first_pass, s_offset, (int)(e_offset - s_offset),
3585 	    vp->v_un.vu_ubcinfo->ui_size, NULL, NULL, vm_initiated);
3586 }
3587 
3588 
3589 static void
cluster_update_state_internal(vnode_t vp,struct cl_extent * cl,int flags,boolean_t defer_writes,boolean_t * first_pass,off_t write_off,int write_cnt,off_t newEOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)3590 cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes,
3591     boolean_t *first_pass, off_t write_off, int write_cnt, off_t newEOF,
3592     int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
3593 {
3594 	struct cl_writebehind *wbp;
3595 	int     cl_index;
3596 	int     ret_cluster_try_push;
3597 	u_int   max_cluster_pgcount;
3598 
3599 
3600 	max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
3601 
3602 	/*
3603 	 * take the lock to protect our accesses
3604 	 * of the writebehind and sparse cluster state
3605 	 */
3606 	wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
3607 
3608 	if (wbp->cl_scmap) {
3609 		if (!(flags & IO_NOCACHE)) {
3610 			/*
3611 			 * we've fallen into the sparse
3612 			 * cluster method of delaying dirty pages
3613 			 */
3614 			sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated);
3615 
3616 			lck_mtx_unlock(&wbp->cl_lockw);
3617 			return;
3618 		}
3619 		/*
3620 		 * must have done cached writes that fell into
3621 		 * the sparse cluster mechanism... we've switched
3622 		 * to uncached writes on the file, so go ahead
3623 		 * and push whatever's in the sparse map
3624 		 * and switch back to normal clustering
3625 		 */
3626 		wbp->cl_number = 0;
3627 
3628 		sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, newEOF, PUSH_ALL, 0, callback, callback_arg, vm_initiated);
3629 		/*
3630 		 * no clusters of either type present at this point
3631 		 * so just go directly to start_new_cluster since
3632 		 * we know we need to delay this I/O since we've
3633 		 * already released the pages back into the cache
3634 		 * to avoid the deadlock with sparse_cluster_push
3635 		 */
3636 		goto start_new_cluster;
3637 	}
3638 	if (*first_pass == TRUE) {
3639 		if (write_off == wbp->cl_last_write) {
3640 			wbp->cl_seq_written += write_cnt;
3641 		} else {
3642 			wbp->cl_seq_written = write_cnt;
3643 		}
3644 
3645 		wbp->cl_last_write = write_off + write_cnt;
3646 
3647 		*first_pass = FALSE;
3648 	}
3649 	if (wbp->cl_number == 0) {
3650 		/*
3651 		 * no clusters currently present
3652 		 */
3653 		goto start_new_cluster;
3654 	}
3655 
3656 	for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
3657 		/*
3658 		 * check each cluster that we currently hold
3659 		 * try to merge some or all of this write into
3660 		 * one or more of the existing clusters... if
3661 		 * any portion of the write remains, start a
3662 		 * new cluster
3663 		 */
3664 		if (cl->b_addr >= wbp->cl_clusters[cl_index].b_addr) {
3665 			/*
3666 			 * the current write starts at or after the current cluster
3667 			 */
3668 			if (cl->e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3669 				/*
3670 				 * we have a write that fits entirely
3671 				 * within the existing cluster limits
3672 				 */
3673 				if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) {
3674 					/*
3675 					 * update our idea of where the cluster ends
3676 					 */
3677 					wbp->cl_clusters[cl_index].e_addr = cl->e_addr;
3678 				}
3679 				break;
3680 			}
3681 			if (cl->b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3682 				/*
3683 				 * we have a write that starts in the middle of the current cluster
3684 				 * but extends beyond the cluster's limit... we know this because
3685 				 * of the previous checks
3686 				 * we'll extend the current cluster to the max
3687 				 * and update the b_addr for the current write to reflect that
3688 				 * the head of it was absorbed into this cluster...
3689 				 * note that we'll always have a leftover tail in this case since
3690 				 * full absorbtion would have occurred in the clause above
3691 				 */
3692 				wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount;
3693 
3694 				cl->b_addr = wbp->cl_clusters[cl_index].e_addr;
3695 			}
3696 			/*
3697 			 * we come here for the case where the current write starts
3698 			 * beyond the limit of the existing cluster or we have a leftover
3699 			 * tail after a partial absorbtion
3700 			 *
3701 			 * in either case, we'll check the remaining clusters before
3702 			 * starting a new one
3703 			 */
3704 		} else {
3705 			/*
3706 			 * the current write starts in front of the cluster we're currently considering
3707 			 */
3708 			if ((wbp->cl_clusters[cl_index].e_addr - cl->b_addr) <= max_cluster_pgcount) {
3709 				/*
3710 				 * we can just merge the new request into
3711 				 * this cluster and leave it in the cache
3712 				 * since the resulting cluster is still
3713 				 * less than the maximum allowable size
3714 				 */
3715 				wbp->cl_clusters[cl_index].b_addr = cl->b_addr;
3716 
3717 				if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) {
3718 					/*
3719 					 * the current write completely
3720 					 * envelops the existing cluster and since
3721 					 * each write is limited to at most max_cluster_pgcount pages
3722 					 * we can just use the start and last blocknos of the write
3723 					 * to generate the cluster limits
3724 					 */
3725 					wbp->cl_clusters[cl_index].e_addr = cl->e_addr;
3726 				}
3727 				break;
3728 			}
3729 			/*
3730 			 * if we were to combine this write with the current cluster
3731 			 * we would exceed the cluster size limit.... so,
3732 			 * let's see if there's any overlap of the new I/O with
3733 			 * the cluster we're currently considering... in fact, we'll
3734 			 * stretch the cluster out to it's full limit and see if we
3735 			 * get an intersection with the current write
3736 			 *
3737 			 */
3738 			if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) {
3739 				/*
3740 				 * the current write extends into the proposed cluster
3741 				 * clip the length of the current write after first combining it's
3742 				 * tail with the newly shaped cluster
3743 				 */
3744 				wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount;
3745 
3746 				cl->e_addr = wbp->cl_clusters[cl_index].b_addr;
3747 			}
3748 			/*
3749 			 * if we get here, there was no way to merge
3750 			 * any portion of this write with this cluster
3751 			 * or we could only merge part of it which
3752 			 * will leave a tail...
3753 			 * we'll check the remaining clusters before starting a new one
3754 			 */
3755 		}
3756 	}
3757 	if (cl_index < wbp->cl_number) {
3758 		/*
3759 		 * we found an existing cluster(s) that we
3760 		 * could entirely merge this I/O into
3761 		 */
3762 		goto delay_io;
3763 	}
3764 
3765 	if (defer_writes == FALSE &&
3766 	    wbp->cl_number == MAX_CLUSTERS &&
3767 	    wbp->cl_seq_written >= (MAX_CLUSTERS * (max_cluster_pgcount * PAGE_SIZE))) {
3768 		uint32_t        n;
3769 
3770 		if (vp->v_mount->mnt_minsaturationbytecount) {
3771 			n = vp->v_mount->mnt_minsaturationbytecount / MAX_CLUSTER_SIZE(vp);
3772 
3773 			if (n > MAX_CLUSTERS) {
3774 				n = MAX_CLUSTERS;
3775 			}
3776 		} else {
3777 			n = 0;
3778 		}
3779 
3780 		if (n == 0) {
3781 			if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
3782 				n = WRITE_BEHIND_SSD;
3783 			} else {
3784 				n = WRITE_BEHIND;
3785 			}
3786 		}
3787 		while (n--) {
3788 			cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg, NULL, vm_initiated);
3789 		}
3790 	}
3791 	if (wbp->cl_number < MAX_CLUSTERS) {
3792 		/*
3793 		 * we didn't find an existing cluster to
3794 		 * merge into, but there's room to start
3795 		 * a new one
3796 		 */
3797 		goto start_new_cluster;
3798 	}
3799 	/*
3800 	 * no exisitng cluster to merge with and no
3801 	 * room to start a new one... we'll try
3802 	 * pushing one of the existing ones... if none of
3803 	 * them are able to be pushed, we'll switch
3804 	 * to the sparse cluster mechanism
3805 	 * cluster_try_push updates cl_number to the
3806 	 * number of remaining clusters... and
3807 	 * returns the number of currently unused clusters
3808 	 */
3809 	ret_cluster_try_push = 0;
3810 
3811 	/*
3812 	 * if writes are not deferred, call cluster push immediately
3813 	 */
3814 	if (defer_writes == FALSE) {
3815 		ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg, NULL, vm_initiated);
3816 	}
3817 	/*
3818 	 * execute following regardless of writes being deferred or not
3819 	 */
3820 	if (ret_cluster_try_push == 0) {
3821 		/*
3822 		 * no more room in the normal cluster mechanism
3823 		 * so let's switch to the more expansive but expensive
3824 		 * sparse mechanism....
3825 		 */
3826 		sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg, vm_initiated);
3827 		sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated);
3828 
3829 		lck_mtx_unlock(&wbp->cl_lockw);
3830 		return;
3831 	}
3832 start_new_cluster:
3833 	wbp->cl_clusters[wbp->cl_number].b_addr = cl->b_addr;
3834 	wbp->cl_clusters[wbp->cl_number].e_addr = cl->e_addr;
3835 
3836 	wbp->cl_clusters[wbp->cl_number].io_flags = 0;
3837 
3838 	if (flags & IO_NOCACHE) {
3839 		wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE;
3840 	}
3841 
3842 	if (flags & IO_PASSIVE) {
3843 		wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE;
3844 	}
3845 
3846 	wbp->cl_number++;
3847 delay_io:
3848 	lck_mtx_unlock(&wbp->cl_lockw);
3849 	return;
3850 }
3851 
3852 
3853 static int
cluster_write_copy(vnode_t vp,struct uio * uio,u_int32_t io_req_size,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int flags,int (* callback)(buf_t,void *),void * callback_arg)3854 cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff,
3855     off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg)
3856 {
3857 	upl_page_info_t *pl;
3858 	upl_t            upl;
3859 	vm_offset_t      upl_offset = 0;
3860 	vm_size_t        upl_size;
3861 	off_t            upl_f_offset;
3862 	int              pages_in_upl;
3863 	int              start_offset;
3864 	int              xfer_resid;
3865 	int              io_size;
3866 	int              io_offset;
3867 	int              bytes_to_zero;
3868 	int              bytes_to_move;
3869 	kern_return_t    kret;
3870 	int              retval = 0;
3871 	int              io_resid;
3872 	long long        total_size;
3873 	long long        zero_cnt;
3874 	off_t            zero_off;
3875 	long long        zero_cnt1;
3876 	off_t            zero_off1;
3877 	off_t            write_off = 0;
3878 	int              write_cnt = 0;
3879 	boolean_t        first_pass = FALSE;
3880 	struct cl_extent cl;
3881 	int              bflag;
3882 	u_int            max_io_size;
3883 
3884 	if (uio) {
3885 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
3886 		    (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0);
3887 
3888 		io_resid = io_req_size;
3889 	} else {
3890 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
3891 		    0, 0, (int)oldEOF, (int)newEOF, 0);
3892 
3893 		io_resid = 0;
3894 	}
3895 	if (flags & IO_PASSIVE) {
3896 		bflag = CL_PASSIVE;
3897 	} else {
3898 		bflag = 0;
3899 	}
3900 	if (flags & IO_NOCACHE) {
3901 		bflag |= CL_NOCACHE;
3902 	}
3903 
3904 	if (flags & IO_SKIP_ENCRYPTION) {
3905 		bflag |= CL_ENCRYPTED;
3906 	}
3907 
3908 	zero_cnt  = 0;
3909 	zero_cnt1 = 0;
3910 	zero_off  = 0;
3911 	zero_off1 = 0;
3912 
3913 	max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
3914 
3915 	if (flags & IO_HEADZEROFILL) {
3916 		/*
3917 		 * some filesystems (HFS is one) don't support unallocated holes within a file...
3918 		 * so we zero fill the intervening space between the old EOF and the offset
3919 		 * where the next chunk of real data begins.... ftruncate will also use this
3920 		 * routine to zero fill to the new EOF when growing a file... in this case, the
3921 		 * uio structure will not be provided
3922 		 */
3923 		if (uio) {
3924 			if (headOff < uio->uio_offset) {
3925 				zero_cnt = uio->uio_offset - headOff;
3926 				zero_off = headOff;
3927 			}
3928 		} else if (headOff < newEOF) {
3929 			zero_cnt = newEOF - headOff;
3930 			zero_off = headOff;
3931 		}
3932 	} else {
3933 		if (uio && uio->uio_offset > oldEOF) {
3934 			zero_off = uio->uio_offset & ~PAGE_MASK_64;
3935 
3936 			if (zero_off >= oldEOF) {
3937 				zero_cnt = uio->uio_offset - zero_off;
3938 
3939 				flags |= IO_HEADZEROFILL;
3940 			}
3941 		}
3942 	}
3943 	if (flags & IO_TAILZEROFILL) {
3944 		if (uio) {
3945 			zero_off1 = uio->uio_offset + io_req_size;
3946 
3947 			if (zero_off1 < tailOff) {
3948 				zero_cnt1 = tailOff - zero_off1;
3949 			}
3950 		}
3951 	} else {
3952 		if (uio && newEOF > oldEOF) {
3953 			zero_off1 = uio->uio_offset + io_req_size;
3954 
3955 			if (zero_off1 == newEOF && (zero_off1 & PAGE_MASK_64)) {
3956 				zero_cnt1 = PAGE_SIZE_64 - (zero_off1 & PAGE_MASK_64);
3957 
3958 				flags |= IO_TAILZEROFILL;
3959 			}
3960 		}
3961 	}
3962 	if (zero_cnt == 0 && uio == (struct uio *) 0) {
3963 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
3964 		    retval, 0, 0, 0, 0);
3965 		return 0;
3966 	}
3967 	if (uio) {
3968 		write_off = uio->uio_offset;
3969 		write_cnt = (int)uio_resid(uio);
3970 		/*
3971 		 * delay updating the sequential write info
3972 		 * in the control block until we've obtained
3973 		 * the lock for it
3974 		 */
3975 		first_pass = TRUE;
3976 	}
3977 	while ((total_size = (io_resid + zero_cnt + zero_cnt1)) && retval == 0) {
3978 		/*
3979 		 * for this iteration of the loop, figure out where our starting point is
3980 		 */
3981 		if (zero_cnt) {
3982 			start_offset = (int)(zero_off & PAGE_MASK_64);
3983 			upl_f_offset = zero_off - start_offset;
3984 		} else if (io_resid) {
3985 			start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
3986 			upl_f_offset = uio->uio_offset - start_offset;
3987 		} else {
3988 			start_offset = (int)(zero_off1 & PAGE_MASK_64);
3989 			upl_f_offset = zero_off1 - start_offset;
3990 		}
3991 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE,
3992 		    (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0);
3993 
3994 		if (total_size > max_io_size) {
3995 			total_size = max_io_size;
3996 		}
3997 
3998 		cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
3999 
4000 		if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) {
4001 			/*
4002 			 * assumption... total_size <= io_resid
4003 			 * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
4004 			 */
4005 			if ((start_offset + total_size) > max_io_size) {
4006 				total_size = max_io_size - start_offset;
4007 			}
4008 			xfer_resid = (int)total_size;
4009 
4010 			retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1);
4011 
4012 			if (retval) {
4013 				break;
4014 			}
4015 
4016 			io_resid    -= (total_size - xfer_resid);
4017 			total_size   = xfer_resid;
4018 			start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
4019 			upl_f_offset = uio->uio_offset - start_offset;
4020 
4021 			if (total_size == 0) {
4022 				if (start_offset) {
4023 					/*
4024 					 * the write did not finish on a page boundary
4025 					 * which will leave upl_f_offset pointing to the
4026 					 * beginning of the last page written instead of
4027 					 * the page beyond it... bump it in this case
4028 					 * so that the cluster code records the last page
4029 					 * written as dirty
4030 					 */
4031 					upl_f_offset += PAGE_SIZE_64;
4032 				}
4033 				upl_size = 0;
4034 
4035 				goto check_cluster;
4036 			}
4037 		}
4038 		/*
4039 		 * compute the size of the upl needed to encompass
4040 		 * the requested write... limit each call to cluster_io
4041 		 * to the maximum UPL size... cluster_io will clip if
4042 		 * this exceeds the maximum io_size for the device,
4043 		 * make sure to account for
4044 		 * a starting offset that's not page aligned
4045 		 */
4046 		upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4047 
4048 		if (upl_size > max_io_size) {
4049 			upl_size = max_io_size;
4050 		}
4051 
4052 		pages_in_upl = (int)(upl_size / PAGE_SIZE);
4053 		io_size      = (int)(upl_size - start_offset);
4054 
4055 		if ((long long)io_size > total_size) {
4056 			io_size = (int)total_size;
4057 		}
4058 
4059 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
4060 
4061 
4062 		/*
4063 		 * Gather the pages from the buffer cache.
4064 		 * The UPL_WILL_MODIFY flag lets the UPL subsystem know
4065 		 * that we intend to modify these pages.
4066 		 */
4067 		kret = ubc_create_upl_kernel(vp,
4068 		    upl_f_offset,
4069 		    (int)upl_size,
4070 		    &upl,
4071 		    &pl,
4072 		    UPL_SET_LITE | ((uio != NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY),
4073 		    VM_KERN_MEMORY_FILE);
4074 		if (kret != KERN_SUCCESS) {
4075 			panic("cluster_write_copy: failed to get pagelist");
4076 		}
4077 
4078 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END,
4079 		    upl, (int)upl_f_offset, start_offset, 0, 0);
4080 
4081 		if (start_offset && upl_f_offset < oldEOF && !upl_valid_page(pl, 0)) {
4082 			int   read_size;
4083 
4084 			/*
4085 			 * we're starting in the middle of the first page of the upl
4086 			 * and the page isn't currently valid, so we're going to have
4087 			 * to read it in first... this is a synchronous operation
4088 			 */
4089 			read_size = PAGE_SIZE;
4090 
4091 			if ((upl_f_offset + read_size) > oldEOF) {
4092 				read_size = (int)(oldEOF - upl_f_offset);
4093 			}
4094 
4095 			retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
4096 			    CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
4097 			if (retval) {
4098 				/*
4099 				 * we had an error during the read which causes us to abort
4100 				 * the current cluster_write request... before we do, we need
4101 				 * to release the rest of the pages in the upl without modifying
4102 				 * there state and mark the failed page in error
4103 				 */
4104 				ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4105 
4106 				if (upl_size > PAGE_SIZE) {
4107 					ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size,
4108 					    UPL_ABORT_FREE_ON_EMPTY);
4109 				}
4110 
4111 				KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4112 				    upl, 0, 0, retval, 0);
4113 				break;
4114 			}
4115 		}
4116 		if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
4117 			/*
4118 			 * the last offset we're writing to in this upl does not end on a page
4119 			 * boundary... if it's not beyond the old EOF, then we'll also need to
4120 			 * pre-read this page in if it isn't already valid
4121 			 */
4122 			upl_offset = upl_size - PAGE_SIZE;
4123 
4124 			if ((upl_f_offset + start_offset + io_size) < oldEOF &&
4125 			    !upl_valid_page(pl, (int)(upl_offset / PAGE_SIZE))) {
4126 				int   read_size;
4127 
4128 				read_size = PAGE_SIZE;
4129 
4130 				if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF) {
4131 					read_size = (int)(oldEOF - (upl_f_offset + upl_offset));
4132 				}
4133 
4134 				retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
4135 				    CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
4136 				if (retval) {
4137 					/*
4138 					 * we had an error during the read which causes us to abort
4139 					 * the current cluster_write request... before we do, we
4140 					 * need to release the rest of the pages in the upl without
4141 					 * modifying there state and mark the failed page in error
4142 					 */
4143 					ubc_upl_abort_range(upl, (upl_offset_t)upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4144 
4145 					if (upl_size > PAGE_SIZE) {
4146 						ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, UPL_ABORT_FREE_ON_EMPTY);
4147 					}
4148 
4149 					KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4150 					    upl, 0, 0, retval, 0);
4151 					break;
4152 				}
4153 			}
4154 		}
4155 		xfer_resid = io_size;
4156 		io_offset = start_offset;
4157 
4158 		while (zero_cnt && xfer_resid) {
4159 			if (zero_cnt < (long long)xfer_resid) {
4160 				bytes_to_zero = (int)zero_cnt;
4161 			} else {
4162 				bytes_to_zero = xfer_resid;
4163 			}
4164 
4165 			bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero);
4166 
4167 			xfer_resid -= bytes_to_zero;
4168 			zero_cnt   -= bytes_to_zero;
4169 			zero_off   += bytes_to_zero;
4170 			io_offset  += bytes_to_zero;
4171 		}
4172 		if (xfer_resid && io_resid) {
4173 			u_int32_t  io_requested;
4174 
4175 			bytes_to_move = min(io_resid, xfer_resid);
4176 			io_requested = bytes_to_move;
4177 
4178 			retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
4179 
4180 			if (retval) {
4181 				ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, UPL_ABORT_FREE_ON_EMPTY);
4182 
4183 				KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4184 				    upl, 0, 0, retval, 0);
4185 			} else {
4186 				io_resid   -= bytes_to_move;
4187 				xfer_resid -= bytes_to_move;
4188 				io_offset  += bytes_to_move;
4189 			}
4190 		}
4191 		while (xfer_resid && zero_cnt1 && retval == 0) {
4192 			if (zero_cnt1 < (long long)xfer_resid) {
4193 				bytes_to_zero = (int)zero_cnt1;
4194 			} else {
4195 				bytes_to_zero = xfer_resid;
4196 			}
4197 
4198 			bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero);
4199 
4200 			xfer_resid -= bytes_to_zero;
4201 			zero_cnt1  -= bytes_to_zero;
4202 			zero_off1  += bytes_to_zero;
4203 			io_offset  += bytes_to_zero;
4204 		}
4205 		if (retval == 0) {
4206 			int do_zeroing = 1;
4207 
4208 			io_size += start_offset;
4209 
4210 			/* Force more restrictive zeroing behavior only on APFS */
4211 			if ((vnode_tag(vp) == VT_APFS) && (newEOF < oldEOF)) {
4212 				do_zeroing = 0;
4213 			}
4214 
4215 			if (do_zeroing && (upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
4216 				/*
4217 				 * if we're extending the file with this write
4218 				 * we'll zero fill the rest of the page so that
4219 				 * if the file gets extended again in such a way as to leave a
4220 				 * hole starting at this EOF, we'll have zero's in the correct spot
4221 				 */
4222 				cluster_zero(upl, io_size, (int)(upl_size - io_size), NULL);
4223 			}
4224 			/*
4225 			 * release the upl now if we hold one since...
4226 			 * 1) pages in it may be present in the sparse cluster map
4227 			 *    and may span 2 separate buckets there... if they do and
4228 			 *    we happen to have to flush a bucket to make room and it intersects
4229 			 *    this upl, a deadlock may result on page BUSY
4230 			 * 2) we're delaying the I/O... from this point forward we're just updating
4231 			 *    the cluster state... no need to hold the pages, so commit them
4232 			 * 3) IO_SYNC is set...
4233 			 *    because we had to ask for a UPL that provides currenty non-present pages, the
4234 			 *    UPL has been automatically set to clear the dirty flags (both software and hardware)
4235 			 *    upon committing it... this is not the behavior we want since it's possible for
4236 			 *    pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
4237 			 *    we'll pick these pages back up later with the correct behavior specified.
4238 			 * 4) we don't want to hold pages busy in a UPL and then block on the cluster lock... if a flush
4239 			 *    of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages
4240 			 *    we hold since the flushing context is holding the cluster lock.
4241 			 */
4242 			ubc_upl_commit_range(upl, 0, (upl_size_t)upl_size,
4243 			    UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
4244 check_cluster:
4245 			/*
4246 			 * calculate the last logical block number
4247 			 * that this delayed I/O encompassed
4248 			 */
4249 			cl.e_addr = (daddr64_t)((upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64);
4250 
4251 			if (flags & IO_SYNC) {
4252 				/*
4253 				 * if the IO_SYNC flag is set than we need to bypass
4254 				 * any clustering and immediately issue the I/O
4255 				 *
4256 				 * we don't hold the lock at this point
4257 				 *
4258 				 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
4259 				 * so that we correctly deal with a change in state of the hardware modify bit...
4260 				 * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force
4261 				 * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also
4262 				 * responsible for generating the correct sized I/O(s)
4263 				 */
4264 				retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg, FALSE);
4265 			} else {
4266 				boolean_t defer_writes = FALSE;
4267 
4268 				if (vfs_flags(vp->v_mount) & MNT_DEFWRITE) {
4269 					defer_writes = TRUE;
4270 				}
4271 
4272 				cluster_update_state_internal(vp, &cl, flags, defer_writes, &first_pass,
4273 				    write_off, write_cnt, newEOF, callback, callback_arg, FALSE);
4274 			}
4275 		}
4276 	}
4277 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, retval, 0, io_resid, 0, 0);
4278 
4279 	return retval;
4280 }
4281 
4282 
4283 
4284 int
cluster_read(vnode_t vp,struct uio * uio,off_t filesize,int xflags)4285 cluster_read(vnode_t vp, struct uio *uio, off_t filesize, int xflags)
4286 {
4287 	return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL);
4288 }
4289 
4290 
4291 int
cluster_read_ext(vnode_t vp,struct uio * uio,off_t filesize,int xflags,int (* callback)(buf_t,void *),void * callback_arg)4292 cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (*callback)(buf_t, void *), void *callback_arg)
4293 {
4294 	int             retval = 0;
4295 	int             flags;
4296 	user_ssize_t    cur_resid;
4297 	u_int32_t       io_size;
4298 	u_int32_t       read_length = 0;
4299 	int             read_type = IO_COPY;
4300 	bool            check_io_type;
4301 
4302 	flags = xflags;
4303 
4304 	if (vp->v_flag & VNOCACHE_DATA) {
4305 		flags |= IO_NOCACHE;
4306 	}
4307 	if ((vp->v_flag & VRAOFF) || speculative_reads_disabled) {
4308 		flags |= IO_RAOFF;
4309 	}
4310 
4311 	if (flags & IO_SKIP_ENCRYPTION) {
4312 		flags |= IO_ENCRYPTED;
4313 	}
4314 
4315 	/*
4316 	 * do a read through the cache if one of the following is true....
4317 	 *   NOCACHE is not true
4318 	 *   the uio request doesn't target USERSPACE (unless IO_NOCACHE_SYSSPACE is also set)
4319 	 * Alternatively, if IO_ENCRYPTED is set, then we want to bypass the cache as well.
4320 	 * Reading encrypted data from a CP filesystem should never result in the data touching
4321 	 * the UBC.
4322 	 *
4323 	 * otherwise, find out if we want the direct or contig variant for
4324 	 * the first vector in the uio request
4325 	 */
4326 	check_io_type = false;
4327 	if (flags & IO_NOCACHE) {
4328 		if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
4329 			/*
4330 			 * no-cache to user-space: ok to consider IO_DIRECT.
4331 			 */
4332 			check_io_type = true;
4333 		} else if (uio->uio_segflg == UIO_SYSSPACE &&
4334 		    (flags & IO_NOCACHE_SYSSPACE)) {
4335 			/*
4336 			 * no-cache to kernel-space but w/ IO_NOCACHE_SYSSPACE:
4337 			 * ok to consider IO_DIRECT.
4338 			 * The caller should make sure to target kernel buffer
4339 			 * that is backed by regular anonymous memory (i.e.
4340 			 * not backed by the kernel object or an external
4341 			 * memory manager like device memory or a file).
4342 			 */
4343 			check_io_type = true;
4344 		}
4345 	} else if (flags & IO_ENCRYPTED) {
4346 		check_io_type = true;
4347 	}
4348 	if (check_io_type) {
4349 		retval = cluster_io_type(uio, &read_type, &read_length, 0);
4350 	}
4351 
4352 	while ((cur_resid = uio_resid(uio)) && uio->uio_offset < filesize && retval == 0) {
4353 		switch (read_type) {
4354 		case IO_COPY:
4355 			/*
4356 			 * make sure the uio_resid isn't too big...
4357 			 * internally, we want to handle all of the I/O in
4358 			 * chunk sizes that fit in a 32 bit int
4359 			 */
4360 			if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
4361 				io_size = MAX_IO_REQUEST_SIZE;
4362 			} else {
4363 				io_size = (u_int32_t)cur_resid;
4364 			}
4365 
4366 			retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
4367 			break;
4368 
4369 		case IO_DIRECT:
4370 			retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg);
4371 			break;
4372 
4373 		case IO_CONTIG:
4374 			retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags);
4375 			break;
4376 
4377 		case IO_UNKNOWN:
4378 			retval = cluster_io_type(uio, &read_type, &read_length, 0);
4379 			break;
4380 		}
4381 	}
4382 	return retval;
4383 }
4384 
4385 
4386 
4387 static void
cluster_read_upl_release(upl_t upl,int start_pg,int last_pg,int take_reference)4388 cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference)
4389 {
4390 	int range;
4391 	int abort_flags = UPL_ABORT_FREE_ON_EMPTY;
4392 
4393 	if ((range = last_pg - start_pg)) {
4394 		if (take_reference) {
4395 			abort_flags |= UPL_ABORT_REFERENCE;
4396 		}
4397 
4398 		ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags);
4399 	}
4400 }
4401 
4402 
4403 static int
cluster_read_copy(vnode_t vp,struct uio * uio,u_int32_t io_req_size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)4404 cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
4405 {
4406 	upl_page_info_t *pl;
4407 	upl_t            upl = NULL;
4408 	vm_offset_t      upl_offset;
4409 	u_int32_t        upl_size;
4410 	off_t            upl_f_offset;
4411 	int              start_offset;
4412 	int              start_pg;
4413 	int              last_pg;
4414 	int              uio_last = 0;
4415 	int              pages_in_upl;
4416 	off_t            max_size;
4417 	off_t            last_ioread_offset;
4418 	off_t            last_request_offset;
4419 	kern_return_t    kret;
4420 	int              error  = 0;
4421 	int              retval = 0;
4422 	u_int32_t        size_of_prefetch;
4423 	u_int32_t        xsize;
4424 	u_int32_t        io_size;
4425 	u_int32_t        max_rd_size;
4426 	u_int32_t        max_io_size;
4427 	u_int32_t        max_prefetch;
4428 	u_int            rd_ahead_enabled = 1;
4429 	u_int            prefetch_enabled = 1;
4430 	struct cl_readahead *   rap;
4431 	struct clios            iostate;
4432 	struct cl_extent        extent;
4433 	int              bflag;
4434 	int              take_reference = 1;
4435 	int              policy = IOPOL_DEFAULT;
4436 	boolean_t        iolock_inited = FALSE;
4437 
4438 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
4439 	    (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0);
4440 
4441 	if (flags & IO_ENCRYPTED) {
4442 		panic("encrypted blocks will hit UBC!");
4443 	}
4444 
4445 	policy = throttle_get_io_policy(NULL);
4446 
4447 	if (policy == THROTTLE_LEVEL_TIER3 || policy == THROTTLE_LEVEL_TIER2 || (flags & IO_NOCACHE)) {
4448 		take_reference = 0;
4449 	}
4450 
4451 	if (flags & IO_PASSIVE) {
4452 		bflag = CL_PASSIVE;
4453 	} else {
4454 		bflag = 0;
4455 	}
4456 
4457 	if (flags & IO_NOCACHE) {
4458 		bflag |= CL_NOCACHE;
4459 	}
4460 
4461 	if (flags & IO_SKIP_ENCRYPTION) {
4462 		bflag |= CL_ENCRYPTED;
4463 	}
4464 
4465 	max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
4466 	max_prefetch = cluster_max_prefetch(vp, max_io_size, prefetch_max);
4467 	max_rd_size = max_prefetch;
4468 
4469 	last_request_offset = uio->uio_offset + io_req_size;
4470 
4471 	if (last_request_offset > filesize) {
4472 		last_request_offset = filesize;
4473 	}
4474 
4475 	if ((flags & (IO_RAOFF | IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) {
4476 		rd_ahead_enabled = 0;
4477 		rap = NULL;
4478 	} else {
4479 		if (cluster_is_throttled(vp)) {
4480 			/*
4481 			 * we're in the throttle window, at the very least
4482 			 * we want to limit the size of the I/O we're about
4483 			 * to issue
4484 			 */
4485 			rd_ahead_enabled = 0;
4486 			prefetch_enabled = 0;
4487 
4488 			max_rd_size = calculate_max_throttle_size(vp);
4489 		}
4490 		if ((rap = cluster_get_rap(vp)) == NULL) {
4491 			rd_ahead_enabled = 0;
4492 		} else {
4493 			extent.b_addr = uio->uio_offset / PAGE_SIZE_64;
4494 			extent.e_addr = (last_request_offset - 1) / PAGE_SIZE_64;
4495 		}
4496 	}
4497 	if (rap != NULL && rap->cl_ralen && (rap->cl_lastr == extent.b_addr || (rap->cl_lastr + 1) == extent.b_addr)) {
4498 		/*
4499 		 * determine if we already have a read-ahead in the pipe courtesy of the
4500 		 * last read systemcall that was issued...
4501 		 * if so, pick up it's extent to determine where we should start
4502 		 * with respect to any read-ahead that might be necessary to
4503 		 * garner all the data needed to complete this read systemcall
4504 		 */
4505 		last_ioread_offset = (rap->cl_maxra * PAGE_SIZE_64) + PAGE_SIZE_64;
4506 
4507 		if (last_ioread_offset < uio->uio_offset) {
4508 			last_ioread_offset = (off_t)0;
4509 		} else if (last_ioread_offset > last_request_offset) {
4510 			last_ioread_offset = last_request_offset;
4511 		}
4512 	} else {
4513 		last_ioread_offset = (off_t)0;
4514 	}
4515 
4516 	while (io_req_size && uio->uio_offset < filesize && retval == 0) {
4517 		max_size = filesize - uio->uio_offset;
4518 		bool leftover_upl_aborted = false;
4519 
4520 		if ((off_t)(io_req_size) < max_size) {
4521 			io_size = io_req_size;
4522 		} else {
4523 			io_size = (u_int32_t)max_size;
4524 		}
4525 
4526 		if (!(flags & IO_NOCACHE)) {
4527 			while (io_size) {
4528 				u_int32_t io_resid;
4529 				u_int32_t io_requested;
4530 
4531 				/*
4532 				 * if we keep finding the pages we need already in the cache, then
4533 				 * don't bother to call cluster_read_prefetch since it costs CPU cycles
4534 				 * to determine that we have all the pages we need... once we miss in
4535 				 * the cache and have issued an I/O, than we'll assume that we're likely
4536 				 * to continue to miss in the cache and it's to our advantage to try and prefetch
4537 				 */
4538 				if (last_request_offset && last_ioread_offset && (size_of_prefetch = (u_int32_t)(last_request_offset - last_ioread_offset))) {
4539 					if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) {
4540 						/*
4541 						 * we've already issued I/O for this request and
4542 						 * there's still work to do and
4543 						 * our prefetch stream is running dry, so issue a
4544 						 * pre-fetch I/O... the I/O latency will overlap
4545 						 * with the copying of the data
4546 						 */
4547 						if (size_of_prefetch > max_rd_size) {
4548 							size_of_prefetch = max_rd_size;
4549 						}
4550 
4551 						size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
4552 
4553 						last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
4554 
4555 						if (last_ioread_offset > last_request_offset) {
4556 							last_ioread_offset = last_request_offset;
4557 						}
4558 					}
4559 				}
4560 				/*
4561 				 * limit the size of the copy we're about to do so that
4562 				 * we can notice that our I/O pipe is running dry and
4563 				 * get the next I/O issued before it does go dry
4564 				 */
4565 				if (last_ioread_offset && io_size > (max_io_size / 4)) {
4566 					io_resid = (max_io_size / 4);
4567 				} else {
4568 					io_resid = io_size;
4569 				}
4570 
4571 				io_requested = io_resid;
4572 
4573 				retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, take_reference);
4574 
4575 				xsize = io_requested - io_resid;
4576 
4577 				io_size -= xsize;
4578 				io_req_size -= xsize;
4579 
4580 				if (retval || io_resid) {
4581 					/*
4582 					 * if we run into a real error or
4583 					 * a page that is not in the cache
4584 					 * we need to leave streaming mode
4585 					 */
4586 					break;
4587 				}
4588 
4589 				if (rd_ahead_enabled && (io_size == 0 || last_ioread_offset == last_request_offset)) {
4590 					/*
4591 					 * we're already finished the I/O for this read request
4592 					 * let's see if we should do a read-ahead
4593 					 */
4594 					cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
4595 				}
4596 			}
4597 			if (retval) {
4598 				break;
4599 			}
4600 			if (io_size == 0) {
4601 				if (rap != NULL) {
4602 					if (extent.e_addr < rap->cl_lastr) {
4603 						rap->cl_maxra = 0;
4604 					}
4605 					rap->cl_lastr = extent.e_addr;
4606 				}
4607 				break;
4608 			}
4609 			/*
4610 			 * recompute max_size since cluster_copy_ubc_data_internal
4611 			 * may have advanced uio->uio_offset
4612 			 */
4613 			max_size = filesize - uio->uio_offset;
4614 		}
4615 
4616 		iostate.io_completed = 0;
4617 		iostate.io_issued = 0;
4618 		iostate.io_error = 0;
4619 		iostate.io_wanted = 0;
4620 
4621 		if ((flags & IO_RETURN_ON_THROTTLE)) {
4622 			if (cluster_is_throttled(vp) == THROTTLE_NOW) {
4623 				if (!cluster_io_present_in_BC(vp, uio->uio_offset)) {
4624 					/*
4625 					 * we're in the throttle window and at least 1 I/O
4626 					 * has already been issued by a throttleable thread
4627 					 * in this window, so return with EAGAIN to indicate
4628 					 * to the FS issuing the cluster_read call that it
4629 					 * should now throttle after dropping any locks
4630 					 */
4631 					throttle_info_update_by_mount(vp->v_mount);
4632 
4633 					retval = EAGAIN;
4634 					break;
4635 				}
4636 			}
4637 		}
4638 
4639 		/*
4640 		 * compute the size of the upl needed to encompass
4641 		 * the requested read... limit each call to cluster_io
4642 		 * to the maximum UPL size... cluster_io will clip if
4643 		 * this exceeds the maximum io_size for the device,
4644 		 * make sure to account for
4645 		 * a starting offset that's not page aligned
4646 		 */
4647 		start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
4648 		upl_f_offset = uio->uio_offset - (off_t)start_offset;
4649 
4650 		if (io_size > max_rd_size) {
4651 			io_size = max_rd_size;
4652 		}
4653 
4654 		upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4655 
4656 		if (flags & IO_NOCACHE) {
4657 			if (upl_size > max_io_size) {
4658 				upl_size = max_io_size;
4659 			}
4660 		} else {
4661 			if (upl_size > max_io_size / 4) {
4662 				upl_size = max_io_size / 4;
4663 				upl_size &= ~PAGE_MASK;
4664 
4665 				if (upl_size == 0) {
4666 					upl_size = PAGE_SIZE;
4667 				}
4668 			}
4669 		}
4670 		pages_in_upl = upl_size / PAGE_SIZE;
4671 
4672 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
4673 		    upl, (int)upl_f_offset, upl_size, start_offset, 0);
4674 
4675 		kret = ubc_create_upl_kernel(vp,
4676 		    upl_f_offset,
4677 		    upl_size,
4678 		    &upl,
4679 		    &pl,
4680 		    UPL_FILE_IO | UPL_SET_LITE,
4681 		    VM_KERN_MEMORY_FILE);
4682 		if (kret != KERN_SUCCESS) {
4683 			panic("cluster_read_copy: failed to get pagelist");
4684 		}
4685 
4686 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END,
4687 		    upl, (int)upl_f_offset, upl_size, start_offset, 0);
4688 
4689 		/*
4690 		 * scan from the beginning of the upl looking for the first
4691 		 * non-valid page.... this will become the first page in
4692 		 * the request we're going to make to 'cluster_io'... if all
4693 		 * of the pages are valid, we won't call through to 'cluster_io'
4694 		 */
4695 		for (start_pg = 0; start_pg < pages_in_upl; start_pg++) {
4696 			if (!upl_valid_page(pl, start_pg)) {
4697 				break;
4698 			}
4699 		}
4700 
4701 		/*
4702 		 * scan from the starting invalid page looking for a valid
4703 		 * page before the end of the upl is reached, if we
4704 		 * find one, then it will be the last page of the request to
4705 		 * 'cluster_io'
4706 		 */
4707 		for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
4708 			if (upl_valid_page(pl, last_pg)) {
4709 				break;
4710 			}
4711 		}
4712 
4713 		if (start_pg < last_pg) {
4714 			/*
4715 			 * we found a range of 'invalid' pages that must be filled
4716 			 * if the last page in this range is the last page of the file
4717 			 * we may have to clip the size of it to keep from reading past
4718 			 * the end of the last physical block associated with the file
4719 			 */
4720 			if (iolock_inited == FALSE) {
4721 				lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
4722 
4723 				iolock_inited = TRUE;
4724 			}
4725 			upl_offset = start_pg * PAGE_SIZE;
4726 			io_size    = (last_pg - start_pg) * PAGE_SIZE;
4727 
4728 			if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) {
4729 				io_size = (u_int32_t)(filesize - (upl_f_offset + upl_offset));
4730 			}
4731 
4732 			/*
4733 			 * Find out if this needs verification, we'll have to manage the UPL
4734 			 * diffrently if so. Note that this call only lets us know if
4735 			 * verification is enabled on this mount point, the actual verification
4736 			 * is performed in the File system.
4737 			 */
4738 			size_t verify_block_size = 0;
4739 			if ((VNOP_VERIFY(vp, start_offset, NULL, 0, &verify_block_size, NULL, VNODE_VERIFY_DEFAULT, NULL) == 0) /* && verify_block_size */) {
4740 				for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
4741 					if (!upl_valid_page(pl, uio_last)) {
4742 						break;
4743 					}
4744 				}
4745 				if (uio_last < pages_in_upl) {
4746 					/*
4747 					 * there were some invalid pages beyond the valid pages
4748 					 * that we didn't issue an I/O for, just release them
4749 					 * unchanged now, so that any prefetch/readahed can
4750 					 * include them
4751 					 */
4752 					ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
4753 					    (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
4754 					leftover_upl_aborted = true;
4755 				}
4756 			}
4757 
4758 			/*
4759 			 * issue an asynchronous read to cluster_io
4760 			 */
4761 
4762 			error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
4763 			    io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
4764 
4765 			if (rap) {
4766 				if (extent.e_addr < rap->cl_maxra) {
4767 					/*
4768 					 * we've just issued a read for a block that should have been
4769 					 * in the cache courtesy of the read-ahead engine... something
4770 					 * has gone wrong with the pipeline, so reset the read-ahead
4771 					 * logic which will cause us to restart from scratch
4772 					 */
4773 					rap->cl_maxra = 0;
4774 				}
4775 			}
4776 		}
4777 		if (error == 0) {
4778 			/*
4779 			 * if the read completed successfully, or there was no I/O request
4780 			 * issued, than copy the data into user land via 'cluster_upl_copy_data'
4781 			 * we'll first add on any 'valid'
4782 			 * pages that were present in the upl when we acquired it.
4783 			 */
4784 			u_int  val_size;
4785 
4786 			if (!leftover_upl_aborted) {
4787 				for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
4788 					if (!upl_valid_page(pl, uio_last)) {
4789 						break;
4790 					}
4791 				}
4792 				if (uio_last < pages_in_upl) {
4793 					/*
4794 					 * there were some invalid pages beyond the valid pages
4795 					 * that we didn't issue an I/O for, just release them
4796 					 * unchanged now, so that any prefetch/readahed can
4797 					 * include them
4798 					 */
4799 					ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
4800 					    (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
4801 				}
4802 			}
4803 
4804 			/*
4805 			 * compute size to transfer this round,  if io_req_size is
4806 			 * still non-zero after this attempt, we'll loop around and
4807 			 * set up for another I/O.
4808 			 */
4809 			val_size = (uio_last * PAGE_SIZE) - start_offset;
4810 
4811 			if (val_size > max_size) {
4812 				val_size = (u_int)max_size;
4813 			}
4814 
4815 			if (val_size > io_req_size) {
4816 				val_size = io_req_size;
4817 			}
4818 
4819 			if ((uio->uio_offset + val_size) > last_ioread_offset) {
4820 				last_ioread_offset = uio->uio_offset + val_size;
4821 			}
4822 
4823 			if ((size_of_prefetch = (u_int32_t)(last_request_offset - last_ioread_offset)) && prefetch_enabled) {
4824 				if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) {
4825 					/*
4826 					 * if there's still I/O left to do for this request, and...
4827 					 * we're not in hard throttle mode, and...
4828 					 * we're close to using up the previous prefetch, then issue a
4829 					 * new pre-fetch I/O... the I/O latency will overlap
4830 					 * with the copying of the data
4831 					 */
4832 					if (size_of_prefetch > max_rd_size) {
4833 						size_of_prefetch = max_rd_size;
4834 					}
4835 
4836 					size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
4837 
4838 					last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
4839 
4840 					if (last_ioread_offset > last_request_offset) {
4841 						last_ioread_offset = last_request_offset;
4842 					}
4843 				}
4844 			} else if ((uio->uio_offset + val_size) == last_request_offset) {
4845 				/*
4846 				 * this transfer will finish this request, so...
4847 				 * let's try to read ahead if we're in
4848 				 * a sequential access pattern and we haven't
4849 				 * explicitly disabled it
4850 				 */
4851 				if (rd_ahead_enabled) {
4852 					cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
4853 				}
4854 
4855 				if (rap != NULL) {
4856 					if (extent.e_addr < rap->cl_lastr) {
4857 						rap->cl_maxra = 0;
4858 					}
4859 					rap->cl_lastr = extent.e_addr;
4860 				}
4861 			}
4862 			if (iolock_inited == TRUE) {
4863 				cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4864 			}
4865 
4866 			if (iostate.io_error) {
4867 				error = iostate.io_error;
4868 			} else {
4869 				u_int32_t io_requested;
4870 
4871 				io_requested = val_size;
4872 
4873 				retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested);
4874 
4875 				io_req_size -= (val_size - io_requested);
4876 			}
4877 		} else {
4878 			if (iolock_inited == TRUE) {
4879 				cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4880 			}
4881 		}
4882 		if (start_pg < last_pg) {
4883 			/*
4884 			 * compute the range of pages that we actually issued an I/O for
4885 			 * and either commit them as valid if the I/O succeeded
4886 			 * or abort them if the I/O failed or we're not supposed to
4887 			 * keep them in the cache
4888 			 */
4889 			io_size = (last_pg - start_pg) * PAGE_SIZE;
4890 
4891 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0);
4892 
4893 			if (error || (flags & IO_NOCACHE)) {
4894 				ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
4895 				    UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4896 			} else {
4897 				int     commit_flags = UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY;
4898 
4899 				if (take_reference) {
4900 					commit_flags |= UPL_COMMIT_INACTIVATE;
4901 				} else {
4902 					commit_flags |= UPL_COMMIT_SPECULATE;
4903 				}
4904 
4905 				ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags);
4906 			}
4907 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0);
4908 		}
4909 		if ((last_pg - start_pg) < pages_in_upl) {
4910 			/*
4911 			 * the set of pages that we issued an I/O for did not encompass
4912 			 * the entire upl... so just release these without modifying
4913 			 * their state
4914 			 */
4915 			if (error) {
4916 				if (leftover_upl_aborted) {
4917 					ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, (uio_last - start_pg) * PAGE_SIZE,
4918 					    UPL_ABORT_FREE_ON_EMPTY);
4919 				} else {
4920 					ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
4921 				}
4922 			} else {
4923 				KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
4924 				    upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
4925 
4926 				/*
4927 				 * handle any valid pages at the beginning of
4928 				 * the upl... release these appropriately
4929 				 */
4930 				cluster_read_upl_release(upl, 0, start_pg, take_reference);
4931 
4932 				/*
4933 				 * handle any valid pages immediately after the
4934 				 * pages we issued I/O for... ... release these appropriately
4935 				 */
4936 				cluster_read_upl_release(upl, last_pg, uio_last, take_reference);
4937 
4938 				KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, -1, -1, 0, 0);
4939 			}
4940 		}
4941 		if (retval == 0) {
4942 			retval = error;
4943 		}
4944 
4945 		if (io_req_size) {
4946 			uint32_t max_throttle_size = calculate_max_throttle_size(vp);
4947 
4948 			if (cluster_is_throttled(vp)) {
4949 				/*
4950 				 * we're in the throttle window, at the very least
4951 				 * we want to limit the size of the I/O we're about
4952 				 * to issue
4953 				 */
4954 				rd_ahead_enabled = 0;
4955 				prefetch_enabled = 0;
4956 				max_rd_size = max_throttle_size;
4957 			} else {
4958 				if (max_rd_size == max_throttle_size) {
4959 					/*
4960 					 * coming out of throttled state
4961 					 */
4962 					if (policy != THROTTLE_LEVEL_TIER3 && policy != THROTTLE_LEVEL_TIER2) {
4963 						if (rap != NULL) {
4964 							rd_ahead_enabled = 1;
4965 						}
4966 						prefetch_enabled = 1;
4967 					}
4968 					max_rd_size = max_prefetch;
4969 					last_ioread_offset = 0;
4970 				}
4971 			}
4972 		}
4973 	}
4974 	if (iolock_inited == TRUE) {
4975 		/*
4976 		 * cluster_io returned an error after it
4977 		 * had already issued some I/O.  we need
4978 		 * to wait for that I/O to complete before
4979 		 * we can destroy the iostate mutex...
4980 		 * 'retval' already contains the early error
4981 		 * so no need to pick it up from iostate.io_error
4982 		 */
4983 		cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4984 
4985 		lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
4986 	}
4987 	if (rap != NULL) {
4988 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
4989 		    (int)uio->uio_offset, io_req_size, rap->cl_lastr, retval, 0);
4990 
4991 		lck_mtx_unlock(&rap->cl_lockr);
4992 	} else {
4993 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
4994 		    (int)uio->uio_offset, io_req_size, 0, retval, 0);
4995 	}
4996 
4997 	return retval;
4998 }
4999 
5000 /*
5001  * We don't want another read/write lock for every vnode in the system
5002  * so we keep a hash of them here.  There should never be very many of
5003  * these around at any point in time.
5004  */
5005 cl_direct_read_lock_t *
cluster_lock_direct_read(vnode_t vp,lck_rw_type_t type)5006 cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type)
5007 {
5008 	struct cl_direct_read_locks *head
5009 	        = &cl_direct_read_locks[(uintptr_t)vp / sizeof(*vp)
5010 	    % CL_DIRECT_READ_LOCK_BUCKETS];
5011 
5012 	struct cl_direct_read_lock *lck, *new_lck = NULL;
5013 
5014 	for (;;) {
5015 		lck_spin_lock(&cl_direct_read_spin_lock);
5016 
5017 		LIST_FOREACH(lck, head, chain) {
5018 			if (lck->vp == vp) {
5019 				++lck->ref_count;
5020 				lck_spin_unlock(&cl_direct_read_spin_lock);
5021 				if (new_lck) {
5022 					// Someone beat us to it, ditch the allocation
5023 					lck_rw_destroy(&new_lck->rw_lock, &cl_mtx_grp);
5024 					kfree_type(cl_direct_read_lock_t, new_lck);
5025 				}
5026 				lck_rw_lock(&lck->rw_lock, type);
5027 				return lck;
5028 			}
5029 		}
5030 
5031 		if (new_lck) {
5032 			// Use the lock we allocated
5033 			LIST_INSERT_HEAD(head, new_lck, chain);
5034 			lck_spin_unlock(&cl_direct_read_spin_lock);
5035 			lck_rw_lock(&new_lck->rw_lock, type);
5036 			return new_lck;
5037 		}
5038 
5039 		lck_spin_unlock(&cl_direct_read_spin_lock);
5040 
5041 		// Allocate a new lock
5042 		new_lck = kalloc_type(cl_direct_read_lock_t, Z_WAITOK);
5043 		lck_rw_init(&new_lck->rw_lock, &cl_mtx_grp, LCK_ATTR_NULL);
5044 		new_lck->vp = vp;
5045 		new_lck->ref_count = 1;
5046 
5047 		// Got to go round again
5048 	}
5049 }
5050 
5051 void
cluster_unlock_direct_read(cl_direct_read_lock_t * lck)5052 cluster_unlock_direct_read(cl_direct_read_lock_t *lck)
5053 {
5054 	lck_rw_done(&lck->rw_lock);
5055 
5056 	lck_spin_lock(&cl_direct_read_spin_lock);
5057 	if (lck->ref_count == 1) {
5058 		LIST_REMOVE(lck, chain);
5059 		lck_spin_unlock(&cl_direct_read_spin_lock);
5060 		lck_rw_destroy(&lck->rw_lock, &cl_mtx_grp);
5061 		kfree_type(cl_direct_read_lock_t, lck);
5062 	} else {
5063 		--lck->ref_count;
5064 		lck_spin_unlock(&cl_direct_read_spin_lock);
5065 	}
5066 }
5067 
5068 static int
cluster_read_direct(vnode_t vp,struct uio * uio,off_t filesize,int * read_type,u_int32_t * read_length,int flags,int (* callback)(buf_t,void *),void * callback_arg)5069 cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
5070     int flags, int (*callback)(buf_t, void *), void *callback_arg)
5071 {
5072 	upl_t            upl = NULL;
5073 	upl_page_info_t  *pl;
5074 	off_t            max_io_size;
5075 	vm_offset_t      upl_offset, vector_upl_offset = 0;
5076 	upl_size_t       upl_size = 0, vector_upl_size = 0;
5077 	vm_size_t        upl_needed_size;
5078 	unsigned int     pages_in_pl;
5079 	upl_control_flags_t upl_flags;
5080 	kern_return_t    kret = KERN_SUCCESS;
5081 	unsigned int     i;
5082 	int              force_data_sync;
5083 	int              retval = 0;
5084 	int              no_zero_fill = 0;
5085 	int              io_flag = 0;
5086 	int              misaligned = 0;
5087 	struct clios     iostate;
5088 	user_addr_t      iov_base;
5089 	u_int32_t        io_req_size;
5090 	u_int32_t        offset_in_file;
5091 	u_int32_t        offset_in_iovbase;
5092 	u_int32_t        io_size;
5093 	u_int32_t        io_min;
5094 	u_int32_t        xsize;
5095 	u_int32_t        devblocksize;
5096 	u_int32_t        mem_alignment_mask;
5097 	u_int32_t        max_upl_size;
5098 	u_int32_t        max_rd_size;
5099 	u_int32_t        max_rd_ahead;
5100 	u_int32_t        max_vector_size;
5101 	boolean_t        io_throttled = FALSE;
5102 
5103 	u_int32_t        vector_upl_iosize = 0;
5104 	int              issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1);
5105 	off_t            v_upl_uio_offset = 0;
5106 	int              vector_upl_index = 0;
5107 	upl_t            vector_upl = NULL;
5108 	cl_direct_read_lock_t *lock = NULL;
5109 
5110 	assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT);
5111 
5112 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
5113 	    (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
5114 
5115 	max_upl_size = cluster_max_io_size(vp->v_mount, CL_READ);
5116 
5117 	max_rd_size = max_upl_size;
5118 
5119 	if (__improbable(os_mul_overflow(max_rd_size, IO_SCALE(vp, 2),
5120 	    &max_rd_ahead) || (max_rd_ahead > overlapping_read_max))) {
5121 		max_rd_ahead = overlapping_read_max;
5122 	}
5123 
5124 	io_flag = CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO | CL_DIRECT_IO;
5125 
5126 	if (flags & IO_PASSIVE) {
5127 		io_flag |= CL_PASSIVE;
5128 	}
5129 
5130 	if (flags & IO_ENCRYPTED) {
5131 		io_flag |= CL_RAW_ENCRYPTED;
5132 	}
5133 
5134 	if (flags & IO_NOCACHE) {
5135 		io_flag |= CL_NOCACHE;
5136 	}
5137 
5138 	if (flags & IO_SKIP_ENCRYPTION) {
5139 		io_flag |= CL_ENCRYPTED;
5140 	}
5141 
5142 	iostate.io_completed = 0;
5143 	iostate.io_issued = 0;
5144 	iostate.io_error = 0;
5145 	iostate.io_wanted = 0;
5146 
5147 	lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
5148 
5149 	devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
5150 	mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
5151 
5152 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
5153 	    (int)devblocksize, (int)mem_alignment_mask, 0, 0, 0);
5154 
5155 	if (devblocksize == 1) {
5156 		/*
5157 		 * the AFP client advertises a devblocksize of 1
5158 		 * however, its BLOCKMAP routine maps to physical
5159 		 * blocks that are PAGE_SIZE in size...
5160 		 * therefore we can't ask for I/Os that aren't page aligned
5161 		 * or aren't multiples of PAGE_SIZE in size
5162 		 * by setting devblocksize to PAGE_SIZE, we re-instate
5163 		 * the old behavior we had before the mem_alignment_mask
5164 		 * changes went in...
5165 		 */
5166 		devblocksize = PAGE_SIZE;
5167 	}
5168 
5169 	/*
5170 	 * We are going to need this uio for the prefaulting later
5171 	 * especially for the cases where multiple non-contiguous
5172 	 * iovs are passed into this routine.
5173 	 *
5174 	 * Note that we only want to prefault for direct IOs to userspace buffers,
5175 	 * not kernel buffers.
5176 	 */
5177 	uio_t uio_acct = NULL;
5178 	if (uio->uio_segflg != UIO_SYSSPACE) {
5179 		uio_acct = uio_duplicate(uio);
5180 	}
5181 
5182 next_dread:
5183 	io_req_size = *read_length;
5184 	iov_base = uio_curriovbase(uio);
5185 
5186 	offset_in_file = (u_int32_t)uio->uio_offset & (devblocksize - 1);
5187 	offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
5188 
5189 	if (vm_map_page_mask(current_map()) < PAGE_MASK) {
5190 		/*
5191 		 * XXX TODO4K
5192 		 * Direct I/O might not work as expected from a 16k kernel space
5193 		 * to a 4k user space because each 4k chunk might point to
5194 		 * a different 16k physical page...
5195 		 * Let's go the "misaligned" way.
5196 		 */
5197 		if (!misaligned) {
5198 			DEBUG4K_VFS("forcing misaligned\n");
5199 		}
5200 		misaligned = 1;
5201 	}
5202 
5203 	if (offset_in_file || offset_in_iovbase) {
5204 		/*
5205 		 * one of the 2 important offsets is misaligned
5206 		 * so fire an I/O through the cache for this entire vector
5207 		 */
5208 		misaligned = 1;
5209 	}
5210 	if (iov_base & (devblocksize - 1)) {
5211 		/*
5212 		 * the offset in memory must be on a device block boundary
5213 		 * so that we can guarantee that we can generate an
5214 		 * I/O that ends on a page boundary in cluster_io
5215 		 */
5216 		misaligned = 1;
5217 	}
5218 
5219 	max_io_size = filesize - uio->uio_offset;
5220 
5221 	/*
5222 	 * The user must request IO in aligned chunks.  If the
5223 	 * offset into the file is bad, or the userland pointer
5224 	 * is non-aligned, then we cannot service the encrypted IO request.
5225 	 */
5226 	if (flags & IO_ENCRYPTED) {
5227 		if (misaligned || (io_req_size & (devblocksize - 1))) {
5228 			retval = EINVAL;
5229 		}
5230 
5231 		max_io_size = roundup(max_io_size, devblocksize);
5232 	}
5233 
5234 	if ((off_t)io_req_size > max_io_size) {
5235 		io_req_size = (u_int32_t)max_io_size;
5236 	}
5237 
5238 	/*
5239 	 * When we get to this point, we know...
5240 	 *  -- the offset into the file is on a devblocksize boundary
5241 	 */
5242 
5243 	while (io_req_size && retval == 0) {
5244 		u_int32_t io_start;
5245 
5246 		if (cluster_is_throttled(vp)) {
5247 			uint32_t max_throttle_size = calculate_max_throttle_size(vp);
5248 
5249 			/*
5250 			 * we're in the throttle window, at the very least
5251 			 * we want to limit the size of the I/O we're about
5252 			 * to issue
5253 			 */
5254 			max_rd_size  = max_throttle_size;
5255 			max_rd_ahead = max_throttle_size - 1;
5256 			max_vector_size = max_throttle_size;
5257 		} else {
5258 			max_rd_size  = max_upl_size;
5259 			max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
5260 			max_vector_size = MAX_VECTOR_UPL_SIZE;
5261 		}
5262 		io_start = io_size = io_req_size;
5263 
5264 		/*
5265 		 * First look for pages already in the cache
5266 		 * and move them to user space.  But only do this
5267 		 * check if we are not retrieving encrypted data directly
5268 		 * from the filesystem;  those blocks should never
5269 		 * be in the UBC.
5270 		 *
5271 		 * cluster_copy_ubc_data returns the resid
5272 		 * in io_size
5273 		 */
5274 		if ((flags & IO_ENCRYPTED) == 0) {
5275 			retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
5276 		}
5277 		/*
5278 		 * calculate the number of bytes actually copied
5279 		 * starting size - residual
5280 		 */
5281 		xsize = io_start - io_size;
5282 
5283 		io_req_size -= xsize;
5284 
5285 		if (useVectorUPL && (xsize || (iov_base & PAGE_MASK))) {
5286 			/*
5287 			 * We found something in the cache or we have an iov_base that's not
5288 			 * page-aligned.
5289 			 *
5290 			 * Issue all I/O's that have been collected within this Vectored UPL.
5291 			 */
5292 			if (vector_upl_index) {
5293 				retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5294 				reset_vector_run_state();
5295 			}
5296 
5297 			if (xsize) {
5298 				useVectorUPL = 0;
5299 			}
5300 
5301 			/*
5302 			 * After this point, if we are using the Vector UPL path and the base is
5303 			 * not page-aligned then the UPL with that base will be the first in the vector UPL.
5304 			 */
5305 		}
5306 
5307 		/*
5308 		 * check to see if we are finished with this request.
5309 		 *
5310 		 * If we satisfied this IO already, then io_req_size will be 0.
5311 		 * Otherwise, see if the IO was mis-aligned and needs to go through
5312 		 * the UBC to deal with the 'tail'.
5313 		 *
5314 		 */
5315 		if (io_req_size == 0 || (misaligned)) {
5316 			/*
5317 			 * see if there's another uio vector to
5318 			 * process that's of type IO_DIRECT
5319 			 *
5320 			 * break out of while loop to get there
5321 			 */
5322 			break;
5323 		}
5324 		/*
5325 		 * assume the request ends on a device block boundary
5326 		 */
5327 		io_min = devblocksize;
5328 
5329 		/*
5330 		 * we can handle I/O's in multiples of the device block size
5331 		 * however, if io_size isn't a multiple of devblocksize we
5332 		 * want to clip it back to the nearest page boundary since
5333 		 * we are going to have to go through cluster_read_copy to
5334 		 * deal with the 'overhang'... by clipping it to a PAGE_SIZE
5335 		 * multiple, we avoid asking the drive for the same physical
5336 		 * blocks twice.. once for the partial page at the end of the
5337 		 * request and a 2nd time for the page we read into the cache
5338 		 * (which overlaps the end of the direct read) in order to
5339 		 * get at the overhang bytes
5340 		 */
5341 		if (io_size & (devblocksize - 1)) {
5342 			assert(!(flags & IO_ENCRYPTED));
5343 			/*
5344 			 * Clip the request to the previous page size boundary
5345 			 * since request does NOT end on a device block boundary
5346 			 */
5347 			io_size &= ~PAGE_MASK;
5348 			io_min = PAGE_SIZE;
5349 		}
5350 		if (retval || io_size < io_min) {
5351 			/*
5352 			 * either an error or we only have the tail left to
5353 			 * complete via the copy path...
5354 			 * we may have already spun some portion of this request
5355 			 * off as async requests... we need to wait for the I/O
5356 			 * to complete before returning
5357 			 */
5358 			goto wait_for_dreads;
5359 		}
5360 
5361 		/*
5362 		 * Don't re-check the UBC data if we are looking for uncached IO
5363 		 * or asking for encrypted blocks.
5364 		 */
5365 		if ((flags & IO_ENCRYPTED) == 0) {
5366 			if ((xsize = io_size) > max_rd_size) {
5367 				xsize = max_rd_size;
5368 			}
5369 
5370 			io_size = 0;
5371 
5372 			if (!lock) {
5373 				/*
5374 				 * We hold a lock here between the time we check the
5375 				 * cache and the time we issue I/O.  This saves us
5376 				 * from having to lock the pages in the cache.  Not
5377 				 * all clients will care about this lock but some
5378 				 * clients may want to guarantee stability between
5379 				 * here and when the I/O is issued in which case they
5380 				 * will take the lock exclusively.
5381 				 */
5382 				lock = cluster_lock_direct_read(vp, LCK_RW_TYPE_SHARED);
5383 			}
5384 
5385 			ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
5386 
5387 			if (io_size == 0) {
5388 				/*
5389 				 * a page must have just come into the cache
5390 				 * since the first page in this range is no
5391 				 * longer absent, go back and re-evaluate
5392 				 */
5393 				continue;
5394 			}
5395 		}
5396 		if ((flags & IO_RETURN_ON_THROTTLE)) {
5397 			if (cluster_is_throttled(vp) == THROTTLE_NOW) {
5398 				if (!cluster_io_present_in_BC(vp, uio->uio_offset)) {
5399 					/*
5400 					 * we're in the throttle window and at least 1 I/O
5401 					 * has already been issued by a throttleable thread
5402 					 * in this window, so return with EAGAIN to indicate
5403 					 * to the FS issuing the cluster_read call that it
5404 					 * should now throttle after dropping any locks
5405 					 */
5406 					throttle_info_update_by_mount(vp->v_mount);
5407 
5408 					io_throttled = TRUE;
5409 					goto wait_for_dreads;
5410 				}
5411 			}
5412 		}
5413 		if (io_size > max_rd_size) {
5414 			io_size = max_rd_size;
5415 		}
5416 
5417 		iov_base = uio_curriovbase(uio);
5418 
5419 		upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
5420 		upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
5421 
5422 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
5423 		    (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
5424 
5425 		if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0)) {
5426 			no_zero_fill = 1;
5427 		} else {
5428 			no_zero_fill = 0;
5429 		}
5430 
5431 		vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
5432 		for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
5433 			pages_in_pl = 0;
5434 			upl_size = (upl_size_t)upl_needed_size;
5435 			upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
5436 			if (no_zero_fill) {
5437 				upl_flags |= UPL_NOZEROFILL;
5438 			}
5439 			if (force_data_sync) {
5440 				upl_flags |= UPL_FORCE_DATA_SYNC;
5441 			}
5442 
5443 			kret = vm_map_create_upl(map,
5444 			    (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
5445 			    &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE);
5446 
5447 			if (kret != KERN_SUCCESS) {
5448 				KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5449 				    (int)upl_offset, upl_size, io_size, kret, 0);
5450 				/*
5451 				 * failed to get pagelist
5452 				 *
5453 				 * we may have already spun some portion of this request
5454 				 * off as async requests... we need to wait for the I/O
5455 				 * to complete before returning
5456 				 */
5457 				goto wait_for_dreads;
5458 			}
5459 			pages_in_pl = upl_size / PAGE_SIZE;
5460 			pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
5461 
5462 			for (i = 0; i < pages_in_pl; i++) {
5463 				if (!upl_page_present(pl, i)) {
5464 					break;
5465 				}
5466 			}
5467 			if (i == pages_in_pl) {
5468 				break;
5469 			}
5470 
5471 			ubc_upl_abort(upl, 0);
5472 		}
5473 		if (force_data_sync >= 3) {
5474 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5475 			    (int)upl_offset, upl_size, io_size, kret, 0);
5476 
5477 			goto wait_for_dreads;
5478 		}
5479 		/*
5480 		 * Consider the possibility that upl_size wasn't satisfied.
5481 		 */
5482 		if (upl_size < upl_needed_size) {
5483 			if (upl_size && upl_offset == 0) {
5484 				io_size = upl_size;
5485 			} else {
5486 				io_size = 0;
5487 			}
5488 		}
5489 		if (io_size == 0) {
5490 			ubc_upl_abort(upl, 0);
5491 			goto wait_for_dreads;
5492 		}
5493 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5494 		    (int)upl_offset, upl_size, io_size, kret, 0);
5495 
5496 		if (useVectorUPL) {
5497 			vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
5498 			if (end_off) {
5499 				issueVectorUPL = 1;
5500 			}
5501 			/*
5502 			 * After this point, if we are using a vector UPL, then
5503 			 * either all the UPL elements end on a page boundary OR
5504 			 * this UPL is the last element because it does not end
5505 			 * on a page boundary.
5506 			 */
5507 		}
5508 
5509 		/*
5510 		 * request asynchronously so that we can overlap
5511 		 * the preparation of the next I/O
5512 		 * if there are already too many outstanding reads
5513 		 * wait until some have completed before issuing the next read
5514 		 */
5515 		cluster_iostate_wait(&iostate, max_rd_ahead, "cluster_read_direct");
5516 
5517 		if (iostate.io_error) {
5518 			/*
5519 			 * one of the earlier reads we issued ran into a hard error
5520 			 * don't issue any more reads, cleanup the UPL
5521 			 * that was just created but not used, then
5522 			 * go wait for any other reads to complete before
5523 			 * returning the error to the caller
5524 			 */
5525 			ubc_upl_abort(upl, 0);
5526 
5527 			goto wait_for_dreads;
5528 		}
5529 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
5530 		    upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
5531 
5532 		if (!useVectorUPL) {
5533 			if (no_zero_fill) {
5534 				io_flag &= ~CL_PRESERVE;
5535 			} else {
5536 				io_flag |= CL_PRESERVE;
5537 			}
5538 
5539 			retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5540 		} else {
5541 			if (!vector_upl_index) {
5542 				vector_upl = vector_upl_create(upl_offset, uio->uio_iovcnt);
5543 				v_upl_uio_offset = uio->uio_offset;
5544 				vector_upl_offset = upl_offset;
5545 			}
5546 
5547 			vector_upl_set_subupl(vector_upl, upl, upl_size);
5548 			vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
5549 			vector_upl_index++;
5550 			vector_upl_size += upl_size;
5551 			vector_upl_iosize += io_size;
5552 
5553 			if (issueVectorUPL || vector_upl_index == vector_upl_max_upls(vector_upl) || vector_upl_size >= max_vector_size) {
5554 				retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5555 				reset_vector_run_state();
5556 			}
5557 		}
5558 
5559 		if (lock) {
5560 			// We don't need to wait for the I/O to complete
5561 			cluster_unlock_direct_read(lock);
5562 			lock = NULL;
5563 		}
5564 
5565 		/*
5566 		 * update the uio structure
5567 		 */
5568 		if ((flags & IO_ENCRYPTED) && (max_io_size < io_size)) {
5569 			uio_update(uio, (user_size_t)max_io_size);
5570 		} else {
5571 			uio_update(uio, (user_size_t)io_size);
5572 		}
5573 
5574 		io_req_size -= io_size;
5575 
5576 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
5577 		    upl, (int)uio->uio_offset, io_req_size, retval, 0);
5578 	} /* end while */
5579 
5580 	if (retval == 0 && iostate.io_error == 0 && io_req_size == 0 && uio->uio_offset < filesize) {
5581 		retval = cluster_io_type(uio, read_type, read_length, 0);
5582 
5583 		if (retval == 0 && *read_type == IO_DIRECT) {
5584 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
5585 			    (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
5586 
5587 			goto next_dread;
5588 		}
5589 	}
5590 
5591 wait_for_dreads:
5592 
5593 	if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
5594 		retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5595 		reset_vector_run_state();
5596 	}
5597 
5598 	// We don't need to wait for the I/O to complete
5599 	if (lock) {
5600 		cluster_unlock_direct_read(lock);
5601 	}
5602 
5603 	/*
5604 	 * make sure all async reads that are part of this stream
5605 	 * have completed before we return
5606 	 */
5607 	cluster_iostate_wait(&iostate, 0, "cluster_read_direct");
5608 
5609 	if (iostate.io_error) {
5610 		retval = iostate.io_error;
5611 	}
5612 
5613 	lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
5614 
5615 	if (io_throttled == TRUE && retval == 0) {
5616 		retval = EAGAIN;
5617 	}
5618 
5619 	vm_map_offset_t current_page_size, current_page_mask;
5620 	current_page_size = vm_map_page_size(current_map());
5621 	current_page_mask = vm_map_page_mask(current_map());
5622 	if (uio_acct) {
5623 		assert(uio_acct->uio_segflg != UIO_SYSSPACE);
5624 		off_t bytes_to_prefault = 0, bytes_prefaulted = 0;
5625 		user_addr_t curr_iov_base = 0;
5626 		user_addr_t curr_iov_end = 0;
5627 		user_size_t curr_iov_len = 0;
5628 
5629 		bytes_to_prefault = uio_offset(uio) - uio_offset(uio_acct);
5630 
5631 		for (; bytes_prefaulted < bytes_to_prefault;) {
5632 			curr_iov_base = uio_curriovbase(uio_acct);
5633 			curr_iov_len = MIN(uio_curriovlen(uio_acct), bytes_to_prefault - bytes_prefaulted);
5634 			curr_iov_end = curr_iov_base + curr_iov_len;
5635 
5636 			for (; curr_iov_base < curr_iov_end;) {
5637 				/*
5638 				 * This is specifically done for pmap accounting purposes.
5639 				 * vm_pre_fault() will call vm_fault() to enter the page into
5640 				 * the pmap if there isn't _a_ physical page for that VA already.
5641 				 */
5642 				vm_pre_fault(vm_map_trunc_page(curr_iov_base, current_page_mask), VM_PROT_READ);
5643 				curr_iov_base += current_page_size;
5644 				bytes_prefaulted += current_page_size;
5645 			}
5646 			/*
5647 			 * Use update instead of advance so we can see how many iovs we processed.
5648 			 */
5649 			uio_update(uio_acct, curr_iov_len);
5650 		}
5651 		uio_free(uio_acct);
5652 		uio_acct = NULL;
5653 	}
5654 
5655 	if (io_req_size && retval == 0) {
5656 		/*
5657 		 * we couldn't handle the tail of this request in DIRECT mode
5658 		 * so fire it through the copy path
5659 		 */
5660 		if (flags & IO_ENCRYPTED) {
5661 			/*
5662 			 * We cannot fall back to the copy path for encrypted I/O. If this
5663 			 * happens, there is something wrong with the user buffer passed
5664 			 * down.
5665 			 */
5666 			retval = EFAULT;
5667 		} else {
5668 			retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg);
5669 		}
5670 
5671 		*read_type = IO_UNKNOWN;
5672 	}
5673 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
5674 	    (int)uio->uio_offset, (int)uio_resid(uio), io_req_size, retval, 0);
5675 
5676 	return retval;
5677 }
5678 
5679 
5680 static int
cluster_read_contig(vnode_t vp,struct uio * uio,off_t filesize,int * read_type,u_int32_t * read_length,int (* callback)(buf_t,void *),void * callback_arg,int flags)5681 cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
5682     int (*callback)(buf_t, void *), void *callback_arg, int flags)
5683 {
5684 	upl_page_info_t *pl;
5685 	upl_t            upl[MAX_VECTS];
5686 	vm_offset_t      upl_offset;
5687 	addr64_t         dst_paddr = 0;
5688 	user_addr_t      iov_base;
5689 	off_t            max_size;
5690 	upl_size_t       upl_size;
5691 	vm_size_t        upl_needed_size;
5692 	mach_msg_type_number_t  pages_in_pl;
5693 	upl_control_flags_t upl_flags;
5694 	kern_return_t    kret;
5695 	struct clios     iostate;
5696 	int              error = 0;
5697 	int              cur_upl = 0;
5698 	int              num_upl = 0;
5699 	int              n;
5700 	u_int32_t        xsize;
5701 	u_int32_t        io_size;
5702 	u_int32_t        devblocksize;
5703 	u_int32_t        mem_alignment_mask;
5704 	u_int32_t        tail_size = 0;
5705 	int              bflag;
5706 
5707 	if (flags & IO_PASSIVE) {
5708 		bflag = CL_PASSIVE;
5709 	} else {
5710 		bflag = 0;
5711 	}
5712 
5713 	if (flags & IO_NOCACHE) {
5714 		bflag |= CL_NOCACHE;
5715 	}
5716 
5717 	/*
5718 	 * When we enter this routine, we know
5719 	 *  -- the read_length will not exceed the current iov_len
5720 	 *  -- the target address is physically contiguous for read_length
5721 	 */
5722 	cluster_syncup(vp, filesize, callback, callback_arg, PUSH_SYNC);
5723 
5724 	devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
5725 	mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
5726 
5727 	iostate.io_completed = 0;
5728 	iostate.io_issued = 0;
5729 	iostate.io_error = 0;
5730 	iostate.io_wanted = 0;
5731 
5732 	lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
5733 
5734 next_cread:
5735 	io_size = *read_length;
5736 
5737 	max_size = filesize - uio->uio_offset;
5738 
5739 	if (io_size > max_size) {
5740 		io_size = (u_int32_t)max_size;
5741 	}
5742 
5743 	iov_base = uio_curriovbase(uio);
5744 
5745 	upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
5746 	upl_needed_size = upl_offset + io_size;
5747 
5748 	pages_in_pl = 0;
5749 	upl_size = (upl_size_t)upl_needed_size;
5750 	upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
5751 
5752 
5753 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_START,
5754 	    (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
5755 
5756 	vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
5757 	kret = vm_map_get_upl(map,
5758 	    vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
5759 	    &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0);
5760 
5761 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END,
5762 	    (int)upl_offset, upl_size, io_size, kret, 0);
5763 
5764 	if (kret != KERN_SUCCESS) {
5765 		/*
5766 		 * failed to get pagelist
5767 		 */
5768 		error = EINVAL;
5769 		goto wait_for_creads;
5770 	}
5771 	num_upl++;
5772 
5773 	if (upl_size < upl_needed_size) {
5774 		/*
5775 		 * The upl_size wasn't satisfied.
5776 		 */
5777 		error = EINVAL;
5778 		goto wait_for_creads;
5779 	}
5780 	pl = ubc_upl_pageinfo(upl[cur_upl]);
5781 
5782 	dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
5783 
5784 	while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
5785 		u_int32_t   head_size;
5786 
5787 		head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
5788 
5789 		if (head_size > io_size) {
5790 			head_size = io_size;
5791 		}
5792 
5793 		error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, CL_READ, callback, callback_arg);
5794 
5795 		if (error) {
5796 			goto wait_for_creads;
5797 		}
5798 
5799 		upl_offset += head_size;
5800 		dst_paddr  += head_size;
5801 		io_size    -= head_size;
5802 
5803 		iov_base   += head_size;
5804 	}
5805 	if ((u_int32_t)iov_base & mem_alignment_mask) {
5806 		/*
5807 		 * request doesn't set up on a memory boundary
5808 		 * the underlying DMA engine can handle...
5809 		 * return an error instead of going through
5810 		 * the slow copy path since the intent of this
5811 		 * path is direct I/O to device memory
5812 		 */
5813 		error = EINVAL;
5814 		goto wait_for_creads;
5815 	}
5816 
5817 	tail_size = io_size & (devblocksize - 1);
5818 
5819 	io_size  -= tail_size;
5820 
5821 	while (io_size && error == 0) {
5822 		if (io_size > MAX_IO_CONTIG_SIZE) {
5823 			xsize = MAX_IO_CONTIG_SIZE;
5824 		} else {
5825 			xsize = io_size;
5826 		}
5827 		/*
5828 		 * request asynchronously so that we can overlap
5829 		 * the preparation of the next I/O... we'll do
5830 		 * the commit after all the I/O has completed
5831 		 * since its all issued against the same UPL
5832 		 * if there are already too many outstanding reads
5833 		 * wait until some have completed before issuing the next
5834 		 */
5835 		cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_read_contig");
5836 
5837 		if (iostate.io_error) {
5838 			/*
5839 			 * one of the earlier reads we issued ran into a hard error
5840 			 * don't issue any more reads...
5841 			 * go wait for any other reads to complete before
5842 			 * returning the error to the caller
5843 			 */
5844 			goto wait_for_creads;
5845 		}
5846 		error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize,
5847 		    CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC | bflag,
5848 		    (buf_t)NULL, &iostate, callback, callback_arg);
5849 		/*
5850 		 * The cluster_io read was issued successfully,
5851 		 * update the uio structure
5852 		 */
5853 		if (error == 0) {
5854 			uio_update(uio, (user_size_t)xsize);
5855 
5856 			dst_paddr  += xsize;
5857 			upl_offset += xsize;
5858 			io_size    -= xsize;
5859 		}
5860 	}
5861 	if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS && uio->uio_offset < filesize) {
5862 		error = cluster_io_type(uio, read_type, read_length, 0);
5863 
5864 		if (error == 0 && *read_type == IO_CONTIG) {
5865 			cur_upl++;
5866 			goto next_cread;
5867 		}
5868 	} else {
5869 		*read_type = IO_UNKNOWN;
5870 	}
5871 
5872 wait_for_creads:
5873 	/*
5874 	 * make sure all async reads that are part of this stream
5875 	 * have completed before we proceed
5876 	 */
5877 	cluster_iostate_wait(&iostate, 0, "cluster_read_contig");
5878 
5879 	if (iostate.io_error) {
5880 		error = iostate.io_error;
5881 	}
5882 
5883 	lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
5884 
5885 	if (error == 0 && tail_size) {
5886 		error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg);
5887 	}
5888 
5889 	for (n = 0; n < num_upl; n++) {
5890 		/*
5891 		 * just release our hold on each physically contiguous
5892 		 * region without changing any state
5893 		 */
5894 		ubc_upl_abort(upl[n], 0);
5895 	}
5896 
5897 	return error;
5898 }
5899 
5900 
5901 static int
cluster_io_type(struct uio * uio,int * io_type,u_int32_t * io_length,u_int32_t min_length)5902 cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length)
5903 {
5904 	user_size_t      iov_len;
5905 	user_addr_t      iov_base = 0;
5906 	upl_t            upl;
5907 	upl_size_t       upl_size;
5908 	upl_control_flags_t upl_flags;
5909 	int              retval = 0;
5910 
5911 	/*
5912 	 * skip over any emtpy vectors
5913 	 */
5914 	uio_update(uio, (user_size_t)0);
5915 
5916 	iov_len = uio_curriovlen(uio);
5917 
5918 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_START, uio, (int)iov_len, 0, 0, 0);
5919 
5920 	if (iov_len) {
5921 		iov_base = uio_curriovbase(uio);
5922 		/*
5923 		 * make sure the size of the vector isn't too big...
5924 		 * internally, we want to handle all of the I/O in
5925 		 * chunk sizes that fit in a 32 bit int
5926 		 */
5927 		if (iov_len > (user_size_t)MAX_IO_REQUEST_SIZE) {
5928 			upl_size = MAX_IO_REQUEST_SIZE;
5929 		} else {
5930 			upl_size = (u_int32_t)iov_len;
5931 		}
5932 
5933 		upl_flags = UPL_QUERY_OBJECT_TYPE;
5934 
5935 		vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
5936 		if ((vm_map_get_upl(map,
5937 		    vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
5938 		    &upl_size, &upl, NULL, NULL, &upl_flags, VM_KERN_MEMORY_FILE, 0)) != KERN_SUCCESS) {
5939 			/*
5940 			 * the user app must have passed in an invalid address
5941 			 */
5942 			retval = EFAULT;
5943 		}
5944 		if (upl_size == 0) {
5945 			retval = EFAULT;
5946 		}
5947 
5948 		*io_length = upl_size;
5949 
5950 		if (upl_flags & UPL_PHYS_CONTIG) {
5951 			*io_type = IO_CONTIG;
5952 		} else if (iov_len >= min_length) {
5953 			*io_type = IO_DIRECT;
5954 		} else {
5955 			*io_type = IO_COPY;
5956 		}
5957 	} else {
5958 		/*
5959 		 * nothing left to do for this uio
5960 		 */
5961 		*io_length = 0;
5962 		*io_type   = IO_UNKNOWN;
5963 	}
5964 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_END, iov_base, *io_type, *io_length, retval, 0);
5965 
5966 	if (*io_type == IO_DIRECT &&
5967 	    vm_map_page_shift(current_map()) < PAGE_SHIFT) {
5968 		/* no direct I/O for sub-page-size address spaces */
5969 		DEBUG4K_VFS("io_type IO_DIRECT -> IO_COPY\n");
5970 		*io_type = IO_COPY;
5971 	}
5972 
5973 	return retval;
5974 }
5975 
5976 
5977 /*
5978  * generate advisory I/O's in the largest chunks possible
5979  * the completed pages will be released into the VM cache
5980  */
5981 int
advisory_read(vnode_t vp,off_t filesize,off_t f_offset,int resid)5982 advisory_read(vnode_t vp, off_t filesize, off_t f_offset, int resid)
5983 {
5984 	return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE);
5985 }
5986 
5987 int
advisory_read_ext(vnode_t vp,off_t filesize,off_t f_offset,int resid,int (* callback)(buf_t,void *),void * callback_arg,int bflag)5988 advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
5989 {
5990 	upl_page_info_t *pl;
5991 	upl_t            upl = NULL;
5992 	vm_offset_t      upl_offset;
5993 	int              upl_size;
5994 	off_t            upl_f_offset;
5995 	int              start_offset;
5996 	int              start_pg;
5997 	int              last_pg;
5998 	int              pages_in_upl;
5999 	off_t            max_size;
6000 	int              io_size;
6001 	kern_return_t    kret;
6002 	int              retval = 0;
6003 	int              issued_io;
6004 	int              skip_range;
6005 	uint32_t         max_io_size;
6006 
6007 
6008 	if (!UBCINFOEXISTS(vp)) {
6009 		return EINVAL;
6010 	}
6011 
6012 	if (f_offset < 0 || resid < 0) {
6013 		return EINVAL;
6014 	}
6015 
6016 	max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
6017 
6018 	if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
6019 		if (max_io_size > speculative_prefetch_max_iosize) {
6020 			max_io_size = speculative_prefetch_max_iosize;
6021 		}
6022 	}
6023 
6024 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
6025 	    (int)f_offset, resid, (int)filesize, 0, 0);
6026 
6027 	while (resid && f_offset < filesize && retval == 0) {
6028 		/*
6029 		 * compute the size of the upl needed to encompass
6030 		 * the requested read... limit each call to cluster_io
6031 		 * to the maximum UPL size... cluster_io will clip if
6032 		 * this exceeds the maximum io_size for the device,
6033 		 * make sure to account for
6034 		 * a starting offset that's not page aligned
6035 		 */
6036 		start_offset = (int)(f_offset & PAGE_MASK_64);
6037 		upl_f_offset = f_offset - (off_t)start_offset;
6038 		max_size     = filesize - f_offset;
6039 
6040 		if (resid < max_size) {
6041 			io_size = resid;
6042 		} else {
6043 			io_size = (int)max_size;
6044 		}
6045 
6046 		upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
6047 		if ((uint32_t)upl_size > max_io_size) {
6048 			upl_size = max_io_size;
6049 		}
6050 
6051 		skip_range = 0;
6052 		/*
6053 		 * return the number of contiguously present pages in the cache
6054 		 * starting at upl_f_offset within the file
6055 		 */
6056 		ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range);
6057 
6058 		if (skip_range) {
6059 			/*
6060 			 * skip over pages already present in the cache
6061 			 */
6062 			io_size = skip_range - start_offset;
6063 
6064 			f_offset += io_size;
6065 			resid    -= io_size;
6066 
6067 			if (skip_range == upl_size) {
6068 				continue;
6069 			}
6070 			/*
6071 			 * have to issue some real I/O
6072 			 * at this point, we know it's starting on a page boundary
6073 			 * because we've skipped over at least the first page in the request
6074 			 */
6075 			start_offset = 0;
6076 			upl_f_offset += skip_range;
6077 			upl_size     -= skip_range;
6078 		}
6079 		pages_in_upl = upl_size / PAGE_SIZE;
6080 
6081 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START,
6082 		    upl, (int)upl_f_offset, upl_size, start_offset, 0);
6083 
6084 		kret = ubc_create_upl_kernel(vp,
6085 		    upl_f_offset,
6086 		    upl_size,
6087 		    &upl,
6088 		    &pl,
6089 		    UPL_RET_ONLY_ABSENT | UPL_SET_LITE,
6090 		    VM_KERN_MEMORY_FILE);
6091 		if (kret != KERN_SUCCESS) {
6092 			return retval;
6093 		}
6094 		issued_io = 0;
6095 
6096 		/*
6097 		 * before we start marching forward, we must make sure we end on
6098 		 * a present page, otherwise we will be working with a freed
6099 		 * upl
6100 		 */
6101 		for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
6102 			if (upl_page_present(pl, last_pg)) {
6103 				break;
6104 			}
6105 		}
6106 		pages_in_upl = last_pg + 1;
6107 
6108 
6109 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END,
6110 		    upl, (int)upl_f_offset, upl_size, start_offset, 0);
6111 
6112 
6113 		for (last_pg = 0; last_pg < pages_in_upl;) {
6114 			/*
6115 			 * scan from the beginning of the upl looking for the first
6116 			 * page that is present.... this will become the first page in
6117 			 * the request we're going to make to 'cluster_io'... if all
6118 			 * of the pages are absent, we won't call through to 'cluster_io'
6119 			 */
6120 			for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
6121 				if (upl_page_present(pl, start_pg)) {
6122 					break;
6123 				}
6124 			}
6125 
6126 			/*
6127 			 * scan from the starting present page looking for an absent
6128 			 * page before the end of the upl is reached, if we
6129 			 * find one, then it will terminate the range of pages being
6130 			 * presented to 'cluster_io'
6131 			 */
6132 			for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
6133 				if (!upl_page_present(pl, last_pg)) {
6134 					break;
6135 				}
6136 			}
6137 
6138 			if (last_pg > start_pg) {
6139 				/*
6140 				 * we found a range of pages that must be filled
6141 				 * if the last page in this range is the last page of the file
6142 				 * we may have to clip the size of it to keep from reading past
6143 				 * the end of the last physical block associated with the file
6144 				 */
6145 				upl_offset = start_pg * PAGE_SIZE;
6146 				io_size    = (last_pg - start_pg) * PAGE_SIZE;
6147 
6148 				if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) {
6149 					io_size = (int)(filesize - (upl_f_offset + upl_offset));
6150 				}
6151 
6152 				/*
6153 				 * issue an asynchronous read to cluster_io
6154 				 */
6155 				retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
6156 				    CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
6157 
6158 				issued_io = 1;
6159 			}
6160 		}
6161 		if (issued_io == 0) {
6162 			ubc_upl_abort(upl, 0);
6163 		}
6164 
6165 		io_size = upl_size - start_offset;
6166 
6167 		if (io_size > resid) {
6168 			io_size = resid;
6169 		}
6170 		f_offset += io_size;
6171 		resid    -= io_size;
6172 	}
6173 
6174 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END,
6175 	    (int)f_offset, resid, retval, 0, 0);
6176 
6177 	return retval;
6178 }
6179 
6180 
6181 int
cluster_push(vnode_t vp,int flags)6182 cluster_push(vnode_t vp, int flags)
6183 {
6184 	return cluster_push_ext(vp, flags, NULL, NULL);
6185 }
6186 
6187 
6188 int
cluster_push_ext(vnode_t vp,int flags,int (* callback)(buf_t,void *),void * callback_arg)6189 cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg)
6190 {
6191 	return cluster_push_err(vp, flags, callback, callback_arg, NULL);
6192 }
6193 
6194 /* write errors via err, but return the number of clusters written */
6195 extern uint32_t system_inshutdown;
6196 uint32_t cl_sparse_push_error = 0;
6197 int
cluster_push_err(vnode_t vp,int flags,int (* callback)(buf_t,void *),void * callback_arg,int * err)6198 cluster_push_err(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg, int *err)
6199 {
6200 	int     retval;
6201 	int     my_sparse_wait = 0;
6202 	struct  cl_writebehind *wbp;
6203 	int     local_err = 0;
6204 
6205 	if (err) {
6206 		*err = 0;
6207 	}
6208 
6209 	if (!UBCINFOEXISTS(vp)) {
6210 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -1, 0);
6211 		return 0;
6212 	}
6213 	/* return if deferred write is set */
6214 	if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) {
6215 		return 0;
6216 	}
6217 	if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) {
6218 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -2, 0);
6219 		return 0;
6220 	}
6221 	if (!ISSET(flags, IO_SYNC) && wbp->cl_number == 0 && wbp->cl_scmap == NULL) {
6222 		lck_mtx_unlock(&wbp->cl_lockw);
6223 
6224 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -3, 0);
6225 		return 0;
6226 	}
6227 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START,
6228 	    wbp->cl_scmap, wbp->cl_number, flags, 0, 0);
6229 
6230 	/*
6231 	 * if we have an fsync in progress, we don't want to allow any additional
6232 	 * sync/fsync/close(s) to occur until it finishes.
6233 	 * note that its possible for writes to continue to occur to this file
6234 	 * while we're waiting and also once the fsync starts to clean if we're
6235 	 * in the sparse map case
6236 	 */
6237 	while (wbp->cl_sparse_wait) {
6238 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, kdebug_vnode(vp), 0, 0, 0, 0);
6239 
6240 		msleep((caddr_t)&wbp->cl_sparse_wait, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
6241 
6242 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, kdebug_vnode(vp), 0, 0, 0, 0);
6243 	}
6244 	if (flags & IO_SYNC) {
6245 		my_sparse_wait = 1;
6246 		wbp->cl_sparse_wait = 1;
6247 
6248 		/*
6249 		 * this is an fsync (or equivalent)... we must wait for any existing async
6250 		 * cleaning operations to complete before we evaulate the current state
6251 		 * and finish cleaning... this insures that all writes issued before this
6252 		 * fsync actually get cleaned to the disk before this fsync returns
6253 		 */
6254 		while (wbp->cl_sparse_pushes) {
6255 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_START, kdebug_vnode(vp), 0, 0, 0, 0);
6256 
6257 			msleep((caddr_t)&wbp->cl_sparse_pushes, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
6258 
6259 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_END, kdebug_vnode(vp), 0, 0, 0, 0);
6260 		}
6261 	}
6262 	if (wbp->cl_scmap) {
6263 		void    *scmap;
6264 
6265 		if (wbp->cl_sparse_pushes < SPARSE_PUSH_LIMIT) {
6266 			scmap = wbp->cl_scmap;
6267 			wbp->cl_scmap = NULL;
6268 
6269 			wbp->cl_sparse_pushes++;
6270 
6271 			lck_mtx_unlock(&wbp->cl_lockw);
6272 
6273 			retval = sparse_cluster_push(wbp, &scmap, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE);
6274 
6275 			lck_mtx_lock(&wbp->cl_lockw);
6276 
6277 			wbp->cl_sparse_pushes--;
6278 
6279 			if (retval) {
6280 				if (wbp->cl_scmap != NULL) {
6281 					/*
6282 					 * panic("cluster_push_err: Expected NULL cl_scmap\n");
6283 					 *
6284 					 * This can happen if we get an error from the underlying FS
6285 					 * e.g. ENOSPC, EPERM or EIO etc. We hope that these errors
6286 					 * are transient and the I/Os will succeed at a later point.
6287 					 *
6288 					 * The tricky part here is that a new sparse cluster has been
6289 					 * allocated and tracking a different set of dirty pages. So these
6290 					 * pages are not going to be pushed out with the next sparse_cluster_push.
6291 					 * An explicit msync or file close will, however, push the pages out.
6292 					 *
6293 					 * What if those calls still don't work? And so, during shutdown we keep
6294 					 * trying till we succeed...
6295 					 */
6296 
6297 					if (system_inshutdown) {
6298 						if ((retval == ENOSPC) && (vp->v_mount->mnt_flag & (MNT_LOCAL | MNT_REMOVABLE)) == MNT_LOCAL) {
6299 							os_atomic_inc(&cl_sparse_push_error, relaxed);
6300 						}
6301 					} else {
6302 						vfs_drt_control(&scmap, 0); /* emit stats and free this memory. Dirty pages stay intact. */
6303 						scmap = NULL;
6304 					}
6305 				} else {
6306 					wbp->cl_scmap = scmap;
6307 				}
6308 			}
6309 
6310 			if (wbp->cl_sparse_wait && wbp->cl_sparse_pushes == 0) {
6311 				wakeup((caddr_t)&wbp->cl_sparse_pushes);
6312 			}
6313 		} else {
6314 			retval = sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE);
6315 		}
6316 
6317 		local_err = retval;
6318 
6319 		if (err) {
6320 			*err = retval;
6321 		}
6322 		retval = 1;
6323 	} else {
6324 		retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, &local_err, FALSE);
6325 		if (err) {
6326 			*err = local_err;
6327 		}
6328 	}
6329 	lck_mtx_unlock(&wbp->cl_lockw);
6330 
6331 	if (flags & IO_SYNC) {
6332 		(void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push");
6333 	}
6334 
6335 	if (my_sparse_wait) {
6336 		/*
6337 		 * I'm the owner of the serialization token
6338 		 * clear it and wakeup anyone that is waiting
6339 		 * for me to finish
6340 		 */
6341 		lck_mtx_lock(&wbp->cl_lockw);
6342 
6343 		wbp->cl_sparse_wait = 0;
6344 		wakeup((caddr_t)&wbp->cl_sparse_wait);
6345 
6346 		lck_mtx_unlock(&wbp->cl_lockw);
6347 	}
6348 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
6349 	    wbp->cl_scmap, wbp->cl_number, retval, local_err, 0);
6350 
6351 	return retval;
6352 }
6353 
6354 
6355 __private_extern__ void
cluster_release(struct ubc_info * ubc)6356 cluster_release(struct ubc_info *ubc)
6357 {
6358 	struct cl_writebehind *wbp;
6359 	struct cl_readahead   *rap;
6360 
6361 	if ((wbp = ubc->cl_wbehind)) {
6362 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, wbp->cl_scmap, 0, 0, 0);
6363 
6364 		if (wbp->cl_scmap) {
6365 			vfs_drt_control(&(wbp->cl_scmap), 0);
6366 		}
6367 		lck_mtx_destroy(&wbp->cl_lockw, &cl_mtx_grp);
6368 		zfree(cl_wr_zone, wbp);
6369 		ubc->cl_wbehind = NULL;
6370 	} else {
6371 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, 0, 0, 0, 0);
6372 	}
6373 
6374 	if ((rap = ubc->cl_rahead)) {
6375 		lck_mtx_destroy(&rap->cl_lockr, &cl_mtx_grp);
6376 		zfree(cl_rd_zone, rap);
6377 		ubc->cl_rahead  = NULL;
6378 	}
6379 
6380 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, ubc, rap, wbp, 0, 0);
6381 }
6382 
6383 
6384 static int
cluster_try_push(struct cl_writebehind * wbp,vnode_t vp,off_t EOF,int push_flag,int io_flags,int (* callback)(buf_t,void *),void * callback_arg,int * err,boolean_t vm_initiated)6385 cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg, int *err, boolean_t vm_initiated)
6386 {
6387 	int cl_index;
6388 	int cl_index1;
6389 	int min_index;
6390 	int cl_len;
6391 	int cl_pushed = 0;
6392 	struct cl_wextent l_clusters[MAX_CLUSTERS];
6393 	u_int  max_cluster_pgcount;
6394 	int error = 0;
6395 
6396 	max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
6397 	/*
6398 	 * the write behind context exists and has
6399 	 * already been locked...
6400 	 */
6401 	if (wbp->cl_number == 0) {
6402 		/*
6403 		 * no clusters to push
6404 		 * return number of empty slots
6405 		 */
6406 		return MAX_CLUSTERS;
6407 	}
6408 
6409 	/*
6410 	 * make a local 'sorted' copy of the clusters
6411 	 * and clear wbp->cl_number so that new clusters can
6412 	 * be developed
6413 	 */
6414 	for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
6415 		for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) {
6416 			if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr) {
6417 				continue;
6418 			}
6419 			if (min_index == -1) {
6420 				min_index = cl_index1;
6421 			} else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr) {
6422 				min_index = cl_index1;
6423 			}
6424 		}
6425 		if (min_index == -1) {
6426 			break;
6427 		}
6428 
6429 		l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr;
6430 		l_clusters[cl_index].e_addr = wbp->cl_clusters[min_index].e_addr;
6431 		l_clusters[cl_index].io_flags = wbp->cl_clusters[min_index].io_flags;
6432 
6433 		wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr;
6434 	}
6435 	wbp->cl_number = 0;
6436 
6437 	cl_len = cl_index;
6438 
6439 	/* skip switching to the sparse cluster mechanism if on diskimage */
6440 	if (((push_flag & PUSH_DELAY) && cl_len == MAX_CLUSTERS) &&
6441 	    !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV)) {
6442 		int   i;
6443 
6444 		/*
6445 		 * determine if we appear to be writing the file sequentially
6446 		 * if not, by returning without having pushed any clusters
6447 		 * we will cause this vnode to be pushed into the sparse cluster mechanism
6448 		 * used for managing more random I/O patterns
6449 		 *
6450 		 * we know that we've got all clusters currently in use and the next write doesn't fit into one of them...
6451 		 * that's why we're in try_push with PUSH_DELAY...
6452 		 *
6453 		 * check to make sure that all the clusters except the last one are 'full'... and that each cluster
6454 		 * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above
6455 		 * so we can just make a simple pass through, up to, but not including the last one...
6456 		 * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they
6457 		 * are sequential
6458 		 *
6459 		 * we let the last one be partial as long as it was adjacent to the previous one...
6460 		 * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out
6461 		 * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
6462 		 */
6463 		for (i = 0; i < MAX_CLUSTERS - 1; i++) {
6464 			if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != max_cluster_pgcount) {
6465 				goto dont_try;
6466 			}
6467 			if (l_clusters[i].e_addr != l_clusters[i + 1].b_addr) {
6468 				goto dont_try;
6469 			}
6470 		}
6471 	}
6472 	if (vm_initiated == TRUE) {
6473 		lck_mtx_unlock(&wbp->cl_lockw);
6474 	}
6475 
6476 	for (cl_index = 0; cl_index < cl_len; cl_index++) {
6477 		int     flags;
6478 		struct  cl_extent cl;
6479 		int retval;
6480 
6481 		flags = io_flags & (IO_PASSIVE | IO_CLOSE);
6482 
6483 		/*
6484 		 * try to push each cluster in turn...
6485 		 */
6486 		if (l_clusters[cl_index].io_flags & CLW_IONOCACHE) {
6487 			flags |= IO_NOCACHE;
6488 		}
6489 
6490 		if (l_clusters[cl_index].io_flags & CLW_IOPASSIVE) {
6491 			flags |= IO_PASSIVE;
6492 		}
6493 
6494 		if (push_flag & PUSH_SYNC) {
6495 			flags |= IO_SYNC;
6496 		}
6497 
6498 		cl.b_addr = l_clusters[cl_index].b_addr;
6499 		cl.e_addr = l_clusters[cl_index].e_addr;
6500 
6501 		retval = cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg, vm_initiated);
6502 
6503 		if (retval == 0) {
6504 			cl_pushed++;
6505 
6506 			l_clusters[cl_index].b_addr = 0;
6507 			l_clusters[cl_index].e_addr = 0;
6508 		} else if (error == 0) {
6509 			error = retval;
6510 		}
6511 
6512 		if (!(push_flag & PUSH_ALL)) {
6513 			break;
6514 		}
6515 	}
6516 	if (vm_initiated == TRUE) {
6517 		lck_mtx_lock(&wbp->cl_lockw);
6518 	}
6519 
6520 	if (err) {
6521 		*err = error;
6522 	}
6523 
6524 dont_try:
6525 	if (cl_len > cl_pushed) {
6526 		/*
6527 		 * we didn't push all of the clusters, so
6528 		 * lets try to merge them back in to the vnode
6529 		 */
6530 		if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) {
6531 			/*
6532 			 * we picked up some new clusters while we were trying to
6533 			 * push the old ones... this can happen because I've dropped
6534 			 * the vnode lock... the sum of the
6535 			 * leftovers plus the new cluster count exceeds our ability
6536 			 * to represent them, so switch to the sparse cluster mechanism
6537 			 *
6538 			 * collect the active public clusters...
6539 			 */
6540 			sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated);
6541 
6542 			for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) {
6543 				if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) {
6544 					continue;
6545 				}
6546 				wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
6547 				wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
6548 				wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
6549 
6550 				cl_index1++;
6551 			}
6552 			/*
6553 			 * update the cluster count
6554 			 */
6555 			wbp->cl_number = cl_index1;
6556 
6557 			/*
6558 			 * and collect the original clusters that were moved into the
6559 			 * local storage for sorting purposes
6560 			 */
6561 			sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated);
6562 		} else {
6563 			/*
6564 			 * we've got room to merge the leftovers back in
6565 			 * just append them starting at the next 'hole'
6566 			 * represented by wbp->cl_number
6567 			 */
6568 			for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) {
6569 				if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) {
6570 					continue;
6571 				}
6572 
6573 				wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
6574 				wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
6575 				wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
6576 
6577 				cl_index1++;
6578 			}
6579 			/*
6580 			 * update the cluster count
6581 			 */
6582 			wbp->cl_number = cl_index1;
6583 		}
6584 	}
6585 	return MAX_CLUSTERS - wbp->cl_number;
6586 }
6587 
6588 
6589 
6590 static int
cluster_push_now(vnode_t vp,struct cl_extent * cl,off_t EOF,int flags,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6591 cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags,
6592     int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6593 {
6594 	upl_page_info_t *pl;
6595 	upl_t            upl;
6596 	vm_offset_t      upl_offset;
6597 	int              upl_size;
6598 	off_t            upl_f_offset;
6599 	int              pages_in_upl;
6600 	int              start_pg;
6601 	int              last_pg;
6602 	int              io_size;
6603 	int              io_flags;
6604 	int              upl_flags;
6605 	int              bflag;
6606 	int              size;
6607 	int              error = 0;
6608 	int              retval;
6609 	kern_return_t    kret;
6610 
6611 	if (flags & IO_PASSIVE) {
6612 		bflag = CL_PASSIVE;
6613 	} else {
6614 		bflag = 0;
6615 	}
6616 
6617 	if (flags & IO_SKIP_ENCRYPTION) {
6618 		bflag |= CL_ENCRYPTED;
6619 	}
6620 
6621 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START,
6622 	    (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0);
6623 
6624 	if ((pages_in_upl = (int)(cl->e_addr - cl->b_addr)) == 0) {
6625 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0);
6626 
6627 		return 0;
6628 	}
6629 	upl_size = pages_in_upl * PAGE_SIZE;
6630 	upl_f_offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
6631 
6632 	if (upl_f_offset + upl_size >= EOF) {
6633 		if (upl_f_offset >= EOF) {
6634 			/*
6635 			 * must have truncated the file and missed
6636 			 * clearing a dangling cluster (i.e. it's completely
6637 			 * beyond the new EOF
6638 			 */
6639 			KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0);
6640 
6641 			return 0;
6642 		}
6643 		size = (int)(EOF - upl_f_offset);
6644 
6645 		upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
6646 		pages_in_upl = upl_size / PAGE_SIZE;
6647 	} else {
6648 		size = upl_size;
6649 	}
6650 
6651 
6652 	if (vm_initiated) {
6653 		vnode_pageout(vp, NULL, (upl_offset_t)0, upl_f_offset, (upl_size_t)upl_size,
6654 		    UPL_MSYNC | UPL_VNODE_PAGER | UPL_KEEPCACHED, &error);
6655 
6656 		return error;
6657 	}
6658 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0);
6659 
6660 	/*
6661 	 * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior
6662 	 *
6663 	 * - only pages that are currently dirty are returned... these are the ones we need to clean
6664 	 * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
6665 	 * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
6666 	 * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if
6667 	 *   someone dirties this page while the I/O is in progress, we don't lose track of the new state
6668 	 *
6669 	 * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard)
6670 	 */
6671 
6672 	if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE)) {
6673 		upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED;
6674 	} else {
6675 		upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE;
6676 	}
6677 
6678 	kret = ubc_create_upl_kernel(vp,
6679 	    upl_f_offset,
6680 	    upl_size,
6681 	    &upl,
6682 	    &pl,
6683 	    upl_flags,
6684 	    VM_KERN_MEMORY_FILE);
6685 	if (kret != KERN_SUCCESS) {
6686 		panic("cluster_push: failed to get pagelist");
6687 	}
6688 
6689 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, upl, upl_f_offset, 0, 0, 0);
6690 
6691 	/*
6692 	 * since we only asked for the dirty pages back
6693 	 * it's possible that we may only get a few or even none, so...
6694 	 * before we start marching forward, we must make sure we know
6695 	 * where the last present page is in the UPL, otherwise we could
6696 	 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
6697 	 * employed by commit_range and abort_range.
6698 	 */
6699 	for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
6700 		if (upl_page_present(pl, last_pg)) {
6701 			break;
6702 		}
6703 	}
6704 	pages_in_upl = last_pg + 1;
6705 
6706 	if (pages_in_upl == 0) {
6707 		ubc_upl_abort(upl, 0);
6708 
6709 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0);
6710 		return 0;
6711 	}
6712 
6713 	for (last_pg = 0; last_pg < pages_in_upl;) {
6714 		/*
6715 		 * find the next dirty page in the UPL
6716 		 * this will become the first page in the
6717 		 * next I/O to generate
6718 		 */
6719 		for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
6720 			if (upl_dirty_page(pl, start_pg)) {
6721 				break;
6722 			}
6723 			if (upl_page_present(pl, start_pg)) {
6724 				/*
6725 				 * RET_ONLY_DIRTY will return non-dirty 'precious' pages
6726 				 * just release these unchanged since we're not going
6727 				 * to steal them or change their state
6728 				 */
6729 				ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
6730 			}
6731 		}
6732 		if (start_pg >= pages_in_upl) {
6733 			/*
6734 			 * done... no more dirty pages to push
6735 			 */
6736 			break;
6737 		}
6738 		if (start_pg > last_pg) {
6739 			/*
6740 			 * skipped over some non-dirty pages
6741 			 */
6742 			size -= ((start_pg - last_pg) * PAGE_SIZE);
6743 		}
6744 
6745 		/*
6746 		 * find a range of dirty pages to write
6747 		 */
6748 		for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
6749 			if (!upl_dirty_page(pl, last_pg)) {
6750 				break;
6751 			}
6752 		}
6753 		upl_offset = start_pg * PAGE_SIZE;
6754 
6755 		io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
6756 
6757 		io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | bflag;
6758 
6759 		if (!(flags & IO_SYNC)) {
6760 			io_flags |= CL_ASYNC;
6761 		}
6762 
6763 		if (flags & IO_CLOSE) {
6764 			io_flags |= CL_CLOSE;
6765 		}
6766 
6767 		if (flags & IO_NOCACHE) {
6768 			io_flags |= CL_NOCACHE;
6769 		}
6770 
6771 		retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
6772 		    io_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
6773 
6774 		if (error == 0 && retval) {
6775 			error = retval;
6776 		}
6777 
6778 		size -= io_size;
6779 	}
6780 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, error, 0, 0);
6781 
6782 	return error;
6783 }
6784 
6785 
6786 /*
6787  * sparse_cluster_switch is called with the write behind lock held
6788  */
6789 static int
sparse_cluster_switch(struct cl_writebehind * wbp,vnode_t vp,off_t EOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6790 sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6791 {
6792 	int     cl_index;
6793 	int     error = 0;
6794 
6795 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, 0, 0);
6796 
6797 	for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
6798 		int       flags;
6799 		struct cl_extent cl;
6800 
6801 		for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) {
6802 			if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) {
6803 				if (flags & UPL_POP_DIRTY) {
6804 					cl.e_addr = cl.b_addr + 1;
6805 
6806 					error = sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg, vm_initiated);
6807 
6808 					if (error) {
6809 						break;
6810 					}
6811 				}
6812 			}
6813 		}
6814 	}
6815 	wbp->cl_number -= cl_index;
6816 
6817 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, error, 0);
6818 
6819 	return error;
6820 }
6821 
6822 
6823 /*
6824  * sparse_cluster_push must be called with the write-behind lock held if the scmap is
6825  * still associated with the write-behind context... however, if the scmap has been disassociated
6826  * from the write-behind context (the cluster_push case), the wb lock is not held
6827  */
6828 static int
sparse_cluster_push(struct cl_writebehind * wbp,void ** scmap,vnode_t vp,off_t EOF,int push_flag,int io_flags,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6829 sparse_cluster_push(struct cl_writebehind *wbp, void **scmap, vnode_t vp, off_t EOF, int push_flag,
6830     int io_flags, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6831 {
6832 	struct cl_extent cl;
6833 	off_t           offset;
6834 	u_int           length;
6835 	void            *l_scmap;
6836 	int error = 0;
6837 
6838 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, kdebug_vnode(vp), (*scmap), 0, push_flag, 0);
6839 
6840 	if (push_flag & PUSH_ALL) {
6841 		vfs_drt_control(scmap, 1);
6842 	}
6843 
6844 	l_scmap = *scmap;
6845 
6846 	for (;;) {
6847 		int retval;
6848 
6849 		if (vfs_drt_get_cluster(scmap, &offset, &length) != KERN_SUCCESS) {
6850 			/*
6851 			 * Not finding anything to push will return KERN_FAILURE.
6852 			 * Confusing since it isn't really a failure. But that's the
6853 			 * reason we don't set 'error' here like we do below.
6854 			 */
6855 			break;
6856 		}
6857 
6858 		if (vm_initiated == TRUE) {
6859 			lck_mtx_unlock(&wbp->cl_lockw);
6860 		}
6861 
6862 		cl.b_addr = (daddr64_t)(offset / PAGE_SIZE_64);
6863 		cl.e_addr = (daddr64_t)((offset + length) / PAGE_SIZE_64);
6864 
6865 		retval = cluster_push_now(vp, &cl, EOF, io_flags, callback, callback_arg, vm_initiated);
6866 		if (error == 0 && retval) {
6867 			error = retval;
6868 		}
6869 
6870 		if (vm_initiated == TRUE) {
6871 			lck_mtx_lock(&wbp->cl_lockw);
6872 
6873 			if (*scmap != l_scmap) {
6874 				break;
6875 			}
6876 		}
6877 
6878 		if (error) {
6879 			if (vfs_drt_mark_pages(scmap, offset, length, NULL) != KERN_SUCCESS) {
6880 				panic("Failed to restore dirty state on failure");
6881 			}
6882 
6883 			break;
6884 		}
6885 
6886 		if (!(push_flag & PUSH_ALL)) {
6887 			break;
6888 		}
6889 	}
6890 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0);
6891 
6892 	return error;
6893 }
6894 
6895 
6896 /*
6897  * sparse_cluster_add is called with the write behind lock held
6898  */
6899 static int
sparse_cluster_add(struct cl_writebehind * wbp,void ** scmap,vnode_t vp,struct cl_extent * cl,off_t EOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6900 sparse_cluster_add(struct cl_writebehind *wbp, void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF,
6901     int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6902 {
6903 	u_int   new_dirty;
6904 	u_int   length;
6905 	off_t   offset;
6906 	int     error = 0;
6907 	int     push_flag = 0; /* Is this a valid value? */
6908 
6909 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (*scmap), 0, cl->b_addr, (int)cl->e_addr, 0);
6910 
6911 	offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
6912 	length = ((u_int)(cl->e_addr - cl->b_addr)) * PAGE_SIZE;
6913 
6914 	while (vfs_drt_mark_pages(scmap, offset, length, &new_dirty) != KERN_SUCCESS) {
6915 		/*
6916 		 * no room left in the map
6917 		 * only a partial update was done
6918 		 * push out some pages and try again
6919 		 */
6920 
6921 		if (vfs_get_scmap_push_behavior_internal(scmap, &push_flag)) {
6922 			push_flag = 0;
6923 		}
6924 
6925 		error = sparse_cluster_push(wbp, scmap, vp, EOF, push_flag, 0, callback, callback_arg, vm_initiated);
6926 
6927 		if (error) {
6928 			break;
6929 		}
6930 
6931 		offset += (new_dirty * PAGE_SIZE_64);
6932 		length -= (new_dirty * PAGE_SIZE);
6933 	}
6934 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0);
6935 
6936 	return error;
6937 }
6938 
6939 
6940 static int
cluster_align_phys_io(vnode_t vp,struct uio * uio,addr64_t usr_paddr,u_int32_t xsize,int flags,int (* callback)(buf_t,void *),void * callback_arg)6941 cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
6942 {
6943 	upl_page_info_t  *pl;
6944 	upl_t            upl;
6945 	addr64_t         ubc_paddr;
6946 	kern_return_t    kret;
6947 	int              error = 0;
6948 	int              did_read = 0;
6949 	int              abort_flags;
6950 	int              upl_flags;
6951 	int              bflag;
6952 
6953 	if (flags & IO_PASSIVE) {
6954 		bflag = CL_PASSIVE;
6955 	} else {
6956 		bflag = 0;
6957 	}
6958 
6959 	if (flags & IO_NOCACHE) {
6960 		bflag |= CL_NOCACHE;
6961 	}
6962 
6963 	upl_flags = UPL_SET_LITE;
6964 
6965 	if (!(flags & CL_READ)) {
6966 		/*
6967 		 * "write" operation:  let the UPL subsystem know
6968 		 * that we intend to modify the buffer cache pages
6969 		 * we're gathering.
6970 		 */
6971 		upl_flags |= UPL_WILL_MODIFY;
6972 	} else {
6973 		/*
6974 		 * indicate that there is no need to pull the
6975 		 * mapping for this page... we're only going
6976 		 * to read from it, not modify it.
6977 		 */
6978 		upl_flags |= UPL_FILE_IO;
6979 	}
6980 	kret = ubc_create_upl_kernel(vp,
6981 	    uio->uio_offset & ~PAGE_MASK_64,
6982 	    PAGE_SIZE,
6983 	    &upl,
6984 	    &pl,
6985 	    upl_flags,
6986 	    VM_KERN_MEMORY_FILE);
6987 
6988 	if (kret != KERN_SUCCESS) {
6989 		return EINVAL;
6990 	}
6991 
6992 	if (!upl_valid_page(pl, 0)) {
6993 		/*
6994 		 * issue a synchronous read to cluster_io
6995 		 */
6996 		error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
6997 		    CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
6998 		if (error) {
6999 			ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
7000 
7001 			return error;
7002 		}
7003 		did_read = 1;
7004 	}
7005 	ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)(uio->uio_offset & PAGE_MASK_64);
7006 
7007 /*
7008  *	NOTE:  There is no prototype for the following in BSD. It, and the definitions
7009  *	of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
7010  *	osfmk/ppc/mappings.h.  They are not included here because there appears to be no
7011  *	way to do so without exporting them to kexts as well.
7012  */
7013 	if (flags & CL_READ) {
7014 //		copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk);	/* Copy physical to physical and flush the destination */
7015 		copypv(ubc_paddr, usr_paddr, xsize, 2 |        1 |        4);           /* Copy physical to physical and flush the destination */
7016 	} else {
7017 //		copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc);	/* Copy physical to physical and flush the source */
7018 		copypv(usr_paddr, ubc_paddr, xsize, 2 |        1 |        8);           /* Copy physical to physical and flush the source */
7019 	}
7020 	if (!(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) {
7021 		/*
7022 		 * issue a synchronous write to cluster_io
7023 		 */
7024 		error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
7025 		    bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
7026 	}
7027 	if (error == 0) {
7028 		uio_update(uio, (user_size_t)xsize);
7029 	}
7030 
7031 	if (did_read) {
7032 		abort_flags = UPL_ABORT_FREE_ON_EMPTY;
7033 	} else {
7034 		abort_flags = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
7035 	}
7036 
7037 	ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags);
7038 
7039 	return error;
7040 }
7041 
7042 int
cluster_copy_upl_data(struct uio * uio,upl_t upl,int upl_offset,int * io_resid)7043 cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid)
7044 {
7045 	int       pg_offset;
7046 	int       pg_index;
7047 	int       csize;
7048 	int       segflg;
7049 	int       retval = 0;
7050 	int       xsize;
7051 	upl_page_info_t *pl;
7052 	int       dirty_count;
7053 
7054 	xsize = *io_resid;
7055 
7056 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
7057 	    (int)uio->uio_offset, upl_offset, xsize, 0, 0);
7058 
7059 	segflg = uio->uio_segflg;
7060 
7061 	switch (segflg) {
7062 	case UIO_USERSPACE32:
7063 	case UIO_USERISPACE32:
7064 		uio->uio_segflg = UIO_PHYS_USERSPACE32;
7065 		break;
7066 
7067 	case UIO_USERSPACE:
7068 	case UIO_USERISPACE:
7069 		uio->uio_segflg = UIO_PHYS_USERSPACE;
7070 		break;
7071 
7072 	case UIO_USERSPACE64:
7073 	case UIO_USERISPACE64:
7074 		uio->uio_segflg = UIO_PHYS_USERSPACE64;
7075 		break;
7076 
7077 	case UIO_SYSSPACE:
7078 		uio->uio_segflg = UIO_PHYS_SYSSPACE;
7079 		break;
7080 	}
7081 	pl = ubc_upl_pageinfo(upl);
7082 
7083 	pg_index  = upl_offset / PAGE_SIZE;
7084 	pg_offset = upl_offset & PAGE_MASK;
7085 	csize     = min(PAGE_SIZE - pg_offset, xsize);
7086 
7087 	dirty_count = 0;
7088 	while (xsize && retval == 0) {
7089 		addr64_t  paddr;
7090 
7091 		paddr = ((addr64_t)upl_phys_page(pl, pg_index) << PAGE_SHIFT) + pg_offset;
7092 		if ((uio->uio_rw == UIO_WRITE) && (upl_dirty_page(pl, pg_index) == FALSE)) {
7093 			dirty_count++;
7094 		}
7095 
7096 		retval = uiomove64(paddr, csize, uio);
7097 
7098 		pg_index += 1;
7099 		pg_offset = 0;
7100 		xsize    -= csize;
7101 		csize     = min(PAGE_SIZE, xsize);
7102 	}
7103 	*io_resid = xsize;
7104 
7105 	uio->uio_segflg = segflg;
7106 
7107 	if (dirty_count) {
7108 		task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, upl_lookup_vnode(upl));
7109 	}
7110 
7111 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7112 	    (int)uio->uio_offset, xsize, retval, segflg, 0);
7113 
7114 	return retval;
7115 }
7116 
7117 
7118 int
cluster_copy_ubc_data(vnode_t vp,struct uio * uio,int * io_resid,int mark_dirty)7119 cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty)
7120 {
7121 	return cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1);
7122 }
7123 
7124 
7125 static int
cluster_copy_ubc_data_internal(vnode_t vp,struct uio * uio,int * io_resid,int mark_dirty,int take_reference)7126 cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference)
7127 {
7128 	int       segflg;
7129 	int       io_size;
7130 	int       xsize;
7131 	int       start_offset;
7132 	int       retval = 0;
7133 	memory_object_control_t  control;
7134 
7135 	io_size = *io_resid;
7136 
7137 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
7138 	    (int)uio->uio_offset, io_size, mark_dirty, take_reference, 0);
7139 
7140 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
7141 
7142 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
7143 		KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7144 		    (int)uio->uio_offset, io_size, retval, 3, 0);
7145 
7146 		return 0;
7147 	}
7148 	segflg = uio->uio_segflg;
7149 
7150 	switch (segflg) {
7151 	case UIO_USERSPACE32:
7152 	case UIO_USERISPACE32:
7153 		uio->uio_segflg = UIO_PHYS_USERSPACE32;
7154 		break;
7155 
7156 	case UIO_USERSPACE64:
7157 	case UIO_USERISPACE64:
7158 		uio->uio_segflg = UIO_PHYS_USERSPACE64;
7159 		break;
7160 
7161 	case UIO_USERSPACE:
7162 	case UIO_USERISPACE:
7163 		uio->uio_segflg = UIO_PHYS_USERSPACE;
7164 		break;
7165 
7166 	case UIO_SYSSPACE:
7167 		uio->uio_segflg = UIO_PHYS_SYSSPACE;
7168 		break;
7169 	}
7170 
7171 	if ((io_size = *io_resid)) {
7172 		start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
7173 		xsize = (int)uio_resid(uio);
7174 
7175 		retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio,
7176 		    start_offset, io_size, mark_dirty, take_reference);
7177 		xsize -= uio_resid(uio);
7178 
7179 		int num_bytes_copied = xsize;
7180 		if (num_bytes_copied && uio_rw(uio)) {
7181 			task_update_logical_writes(current_task(), num_bytes_copied, TASK_WRITE_DEFERRED, vp);
7182 		}
7183 		io_size -= xsize;
7184 	}
7185 	uio->uio_segflg = segflg;
7186 	*io_resid       = io_size;
7187 
7188 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7189 	    (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);
7190 
7191 	return retval;
7192 }
7193 
7194 
7195 int
is_file_clean(vnode_t vp,off_t filesize)7196 is_file_clean(vnode_t vp, off_t filesize)
7197 {
7198 	off_t f_offset;
7199 	int   flags;
7200 	int   total_dirty = 0;
7201 
7202 	for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) {
7203 		if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {
7204 			if (flags & UPL_POP_DIRTY) {
7205 				total_dirty++;
7206 			}
7207 		}
7208 	}
7209 	if (total_dirty) {
7210 		return EINVAL;
7211 	}
7212 
7213 	return 0;
7214 }
7215 
7216 
7217 
7218 /*
7219  * Dirty region tracking/clustering mechanism.
7220  *
7221  * This code (vfs_drt_*) provides a mechanism for tracking and clustering
7222  * dirty regions within a larger space (file).  It is primarily intended to
7223  * support clustering in large files with many dirty areas.
7224  *
7225  * The implementation assumes that the dirty regions are pages.
7226  *
7227  * To represent dirty pages within the file, we store bit vectors in a
7228  * variable-size circular hash.
7229  */
7230 
7231 /*
7232  * Bitvector size.  This determines the number of pages we group in a
7233  * single hashtable entry.  Each hashtable entry is aligned to this
7234  * size within the file.
7235  */
7236 #define DRT_BITVECTOR_PAGES             ((1024 * 256) / PAGE_SIZE)
7237 
7238 /*
7239  * File offset handling.
7240  *
7241  * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
7242  * the correct formula is  (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1))
7243  */
7244 #define DRT_ADDRESS_MASK                (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1))
7245 #define DRT_ALIGN_ADDRESS(addr)         ((addr) & DRT_ADDRESS_MASK)
7246 
7247 /*
7248  * Hashtable address field handling.
7249  *
7250  * The low-order bits of the hashtable address are used to conserve
7251  * space.
7252  *
7253  * DRT_HASH_COUNT_MASK must be large enough to store the range
7254  * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
7255  * to indicate that the bucket is actually unoccupied.
7256  */
7257 #define DRT_HASH_GET_ADDRESS(scm, i)    ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
7258 #define DRT_HASH_SET_ADDRESS(scm, i, a)                                                                 \
7259 	do {                                                                                            \
7260 	        (scm)->scm_hashtable[(i)].dhe_control =                                                 \
7261 	            ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
7262 	} while (0)
7263 #define DRT_HASH_COUNT_MASK             0x1ff
7264 #define DRT_HASH_GET_COUNT(scm, i)      ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
7265 #define DRT_HASH_SET_COUNT(scm, i, c)                                                                                   \
7266 	do {                                                                                                            \
7267 	        (scm)->scm_hashtable[(i)].dhe_control =                                                                 \
7268 	            ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK);       \
7269 	} while (0)
7270 #define DRT_HASH_CLEAR(scm, i)                                                                                          \
7271 	do {                                                                                                            \
7272 	        (scm)->scm_hashtable[(i)].dhe_control =	0;                                                              \
7273 	} while (0)
7274 #define DRT_HASH_VACATE(scm, i)         DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
7275 #define DRT_HASH_VACANT(scm, i)         (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
7276 #define DRT_HASH_COPY(oscm, oi, scm, i)                                                                 \
7277 	do {                                                                                            \
7278 	        (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control;        \
7279 	        DRT_BITVECTOR_COPY(oscm, oi, scm, i);                                                   \
7280 	} while(0);
7281 
7282 
7283 #if !defined(XNU_TARGET_OS_OSX)
7284 /*
7285  * Hash table moduli.
7286  *
7287  * Since the hashtable entry's size is dependent on the size of
7288  * the bitvector, and since the hashtable size is constrained to
7289  * both being prime and fitting within the desired allocation
7290  * size, these values need to be manually determined.
7291  *
7292  * For DRT_BITVECTOR_SIZE = 64, the entry size is 16 bytes.
7293  *
7294  * The small hashtable allocation is 4096 bytes, so the modulus is 251.
7295  * The large hashtable allocation is 32768 bytes, so the modulus is 2039.
7296  * The xlarge hashtable allocation is 131072 bytes, so the modulus is 8179.
7297  */
7298 
7299 #define DRT_HASH_SMALL_MODULUS  251
7300 #define DRT_HASH_LARGE_MODULUS  2039
7301 #define DRT_HASH_XLARGE_MODULUS  8179
7302 
7303 /*
7304  * Physical memory required before the large hash modulus is permitted.
7305  *
7306  * On small memory systems, the large hash modulus can lead to phsyical
7307  * memory starvation, so we avoid using it there.
7308  */
7309 #define DRT_HASH_LARGE_MEMORY_REQUIRED  (1024LL * 1024LL * 1024LL)      /* 1GiB */
7310 #define DRT_HASH_XLARGE_MEMORY_REQUIRED  (8 * 1024LL * 1024LL * 1024LL)  /* 8GiB */
7311 
7312 #define DRT_SMALL_ALLOCATION    4096    /* 80 bytes spare */
7313 #define DRT_LARGE_ALLOCATION    32768   /* 144 bytes spare */
7314 #define DRT_XLARGE_ALLOCATION    131072  /* 208 bytes spare */
7315 
7316 #else /* XNU_TARGET_OS_OSX */
7317 /*
7318  * Hash table moduli.
7319  *
7320  * Since the hashtable entry's size is dependent on the size of
7321  * the bitvector, and since the hashtable size is constrained to
7322  * both being prime and fitting within the desired allocation
7323  * size, these values need to be manually determined.
7324  *
7325  * For DRT_BITVECTOR_SIZE = 64, the entry size is 16 bytes.
7326  *
7327  * The small hashtable allocation is 16384 bytes, so the modulus is 1019.
7328  * The large hashtable allocation is 131072 bytes, so the modulus is 8179.
7329  * The xlarge hashtable allocation is 524288 bytes, so the modulus is 32749.
7330  */
7331 
7332 #define DRT_HASH_SMALL_MODULUS  1019
7333 #define DRT_HASH_LARGE_MODULUS  8179
7334 #define DRT_HASH_XLARGE_MODULUS  32749
7335 
7336 /*
7337  * Physical memory required before the large hash modulus is permitted.
7338  *
7339  * On small memory systems, the large hash modulus can lead to phsyical
7340  * memory starvation, so we avoid using it there.
7341  */
7342 #define DRT_HASH_LARGE_MEMORY_REQUIRED  (4 * 1024LL * 1024LL * 1024LL)  /* 4GiB */
7343 #define DRT_HASH_XLARGE_MEMORY_REQUIRED  (32 * 1024LL * 1024LL * 1024LL)  /* 32GiB */
7344 
7345 #define DRT_SMALL_ALLOCATION    16384   /* 80 bytes spare */
7346 #define DRT_LARGE_ALLOCATION    131072  /* 208 bytes spare */
7347 #define DRT_XLARGE_ALLOCATION   524288  /* 304 bytes spare */
7348 
7349 #endif /* ! XNU_TARGET_OS_OSX */
7350 
7351 /* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
7352 
7353 /*
7354  * Hashtable entry.
7355  */
7356 struct vfs_drt_hashentry {
7357 	u_int64_t       dhe_control;
7358 /*
7359  * dhe_bitvector was declared as dhe_bitvector[DRT_BITVECTOR_PAGES / 32];
7360  * DRT_BITVECTOR_PAGES is defined as ((1024 * 256) / PAGE_SIZE)
7361  * Since PAGE_SIZE is only known at boot time,
7362  *	-define MAX_DRT_BITVECTOR_PAGES for smallest supported page size (4k)
7363  *	-declare dhe_bitvector array for largest possible length
7364  */
7365 #define MAX_DRT_BITVECTOR_PAGES (1024 * 256)/( 4 * 1024)
7366 	u_int32_t       dhe_bitvector[MAX_DRT_BITVECTOR_PAGES / 32];
7367 };
7368 
7369 /*
7370  * Hashtable bitvector handling.
7371  *
7372  * Bitvector fields are 32 bits long.
7373  */
7374 
7375 #define DRT_HASH_SET_BIT(scm, i, bit)                           \
7376 	(scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
7377 
7378 #define DRT_HASH_CLEAR_BIT(scm, i, bit)                         \
7379 	(scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
7380 
7381 #define DRT_HASH_TEST_BIT(scm, i, bit)                          \
7382 	((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
7383 
7384 #define DRT_BITVECTOR_CLEAR(scm, i)                             \
7385 	bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
7386 
7387 #define DRT_BITVECTOR_COPY(oscm, oi, scm, i)                    \
7388 	bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0],    \
7389 	    &(scm)->scm_hashtable[(i)].dhe_bitvector[0],        \
7390 	    (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
7391 
7392 /*
7393  * Dirty Region Tracking structure.
7394  *
7395  * The hashtable is allocated entirely inside the DRT structure.
7396  *
7397  * The hash is a simple circular prime modulus arrangement, the structure
7398  * is resized from small to large if it overflows.
7399  */
7400 
7401 struct vfs_drt_clustermap {
7402 	u_int32_t               scm_magic;      /* sanity/detection */
7403 #define DRT_SCM_MAGIC           0x12020003
7404 	u_int32_t               scm_modulus;    /* current ring size */
7405 	u_int32_t               scm_buckets;    /* number of occupied buckets */
7406 	u_int32_t               scm_lastclean;  /* last entry we cleaned */
7407 	u_int32_t               scm_iskips;     /* number of slot skips */
7408 
7409 	struct vfs_drt_hashentry scm_hashtable[0];
7410 };
7411 
7412 
7413 #define DRT_HASH(scm, addr)             ((addr) % (scm)->scm_modulus)
7414 #define DRT_HASH_NEXT(scm, addr)        (((addr) + 1) % (scm)->scm_modulus)
7415 
7416 /*
7417  * Debugging codes and arguments.
7418  */
7419 #define DRT_DEBUG_EMPTYFREE     (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
7420 #define DRT_DEBUG_RETCLUSTER    (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
7421 #define DRT_DEBUG_ALLOC         (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
7422 #define DRT_DEBUG_INSERT        (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
7423 #define DRT_DEBUG_MARK          (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
7424 	                                                    * dirty */
7425                                                            /* 0, setcount */
7426                                                            /* 1 (clean, no map) */
7427                                                            /* 2 (map alloc fail) */
7428                                                            /* 3, resid (partial) */
7429 #define DRT_DEBUG_6             (FSDBG_CODE(DBG_FSRW, 87))
7430 #define DRT_DEBUG_SCMDATA       (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
7431 	                                                    * lastclean, iskips */
7432 
7433 
7434 static kern_return_t    vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp);
7435 static kern_return_t    vfs_drt_free_map(struct vfs_drt_clustermap *cmap);
7436 static kern_return_t    vfs_drt_search_index(struct vfs_drt_clustermap *cmap,
7437     u_int64_t offset, int *indexp);
7438 static kern_return_t    vfs_drt_get_index(struct vfs_drt_clustermap **cmapp,
7439     u_int64_t offset,
7440     int *indexp,
7441     int recursed);
7442 static kern_return_t    vfs_drt_do_mark_pages(
7443 	void            **cmapp,
7444 	u_int64_t       offset,
7445 	u_int           length,
7446 	u_int           *setcountp,
7447 	int             dirty);
7448 static void             vfs_drt_trace(
7449 	struct vfs_drt_clustermap *cmap,
7450 	int code,
7451 	int arg1,
7452 	int arg2,
7453 	int arg3,
7454 	int arg4);
7455 
7456 
7457 /*
7458  * Allocate and initialise a sparse cluster map.
7459  *
7460  * Will allocate a new map, resize or compact an existing map.
7461  *
7462  * XXX we should probably have at least one intermediate map size,
7463  * as the 1:16 ratio seems a bit drastic.
7464  */
7465 static kern_return_t
vfs_drt_alloc_map(struct vfs_drt_clustermap ** cmapp)7466 vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp)
7467 {
7468 	struct vfs_drt_clustermap *cmap = NULL, *ocmap = NULL;
7469 	kern_return_t   kret = KERN_SUCCESS;
7470 	u_int64_t       offset = 0;
7471 	u_int32_t       i = 0;
7472 	int             modulus_size = 0, map_size = 0, active_buckets = 0, index = 0, copycount = 0;
7473 
7474 	ocmap = NULL;
7475 	if (cmapp != NULL) {
7476 		ocmap = *cmapp;
7477 	}
7478 
7479 	/*
7480 	 * Decide on the size of the new map.
7481 	 */
7482 	if (ocmap == NULL) {
7483 		modulus_size = DRT_HASH_SMALL_MODULUS;
7484 		map_size = DRT_SMALL_ALLOCATION;
7485 	} else {
7486 		/* count the number of active buckets in the old map */
7487 		active_buckets = 0;
7488 		for (i = 0; i < ocmap->scm_modulus; i++) {
7489 			if (!DRT_HASH_VACANT(ocmap, i) &&
7490 			    (DRT_HASH_GET_COUNT(ocmap, i) != 0)) {
7491 				active_buckets++;
7492 			}
7493 		}
7494 		/*
7495 		 * If we're currently using the small allocation, check to
7496 		 * see whether we should grow to the large one.
7497 		 */
7498 		if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
7499 			/*
7500 			 * If the ring is nearly full and we are allowed to
7501 			 * use the large modulus, upgrade.
7502 			 */
7503 			if ((active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) &&
7504 			    (max_mem >= DRT_HASH_LARGE_MEMORY_REQUIRED)) {
7505 				modulus_size = DRT_HASH_LARGE_MODULUS;
7506 				map_size = DRT_LARGE_ALLOCATION;
7507 			} else {
7508 				modulus_size = DRT_HASH_SMALL_MODULUS;
7509 				map_size = DRT_SMALL_ALLOCATION;
7510 			}
7511 		} else if (ocmap->scm_modulus == DRT_HASH_LARGE_MODULUS) {
7512 			if ((active_buckets > (DRT_HASH_LARGE_MODULUS - 5)) &&
7513 			    (max_mem >= DRT_HASH_XLARGE_MEMORY_REQUIRED)) {
7514 				modulus_size = DRT_HASH_XLARGE_MODULUS;
7515 				map_size = DRT_XLARGE_ALLOCATION;
7516 			} else {
7517 				/*
7518 				 * If the ring is completely full and we can't
7519 				 * expand, there's nothing useful for us to do.
7520 				 * Behave as though we had compacted into the new
7521 				 * array and return.
7522 				 */
7523 				return KERN_SUCCESS;
7524 			}
7525 		} else {
7526 			/* already using the xlarge modulus */
7527 			modulus_size = DRT_HASH_XLARGE_MODULUS;
7528 			map_size = DRT_XLARGE_ALLOCATION;
7529 
7530 			/*
7531 			 * If the ring is completely full, there's
7532 			 * nothing useful for us to do.  Behave as
7533 			 * though we had compacted into the new
7534 			 * array and return.
7535 			 */
7536 			if (active_buckets >= DRT_HASH_XLARGE_MODULUS) {
7537 				return KERN_SUCCESS;
7538 			}
7539 		}
7540 	}
7541 
7542 	/*
7543 	 * Allocate and initialise the new map.
7544 	 */
7545 
7546 	kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap, map_size,
7547 	    KMA_DATA, VM_KERN_MEMORY_FILE);
7548 	if (kret != KERN_SUCCESS) {
7549 		return kret;
7550 	}
7551 	cmap->scm_magic = DRT_SCM_MAGIC;
7552 	cmap->scm_modulus = modulus_size;
7553 	cmap->scm_buckets = 0;
7554 	cmap->scm_lastclean = 0;
7555 	cmap->scm_iskips = 0;
7556 	for (i = 0; i < cmap->scm_modulus; i++) {
7557 		DRT_HASH_CLEAR(cmap, i);
7558 		DRT_HASH_VACATE(cmap, i);
7559 		DRT_BITVECTOR_CLEAR(cmap, i);
7560 	}
7561 
7562 	/*
7563 	 * If there's an old map, re-hash entries from it into the new map.
7564 	 */
7565 	copycount = 0;
7566 	if (ocmap != NULL) {
7567 		for (i = 0; i < ocmap->scm_modulus; i++) {
7568 			/* skip empty buckets */
7569 			if (DRT_HASH_VACANT(ocmap, i) ||
7570 			    (DRT_HASH_GET_COUNT(ocmap, i) == 0)) {
7571 				continue;
7572 			}
7573 			/* get new index */
7574 			offset = DRT_HASH_GET_ADDRESS(ocmap, i);
7575 			kret = vfs_drt_get_index(&cmap, offset, &index, 1);
7576 			if (kret != KERN_SUCCESS) {
7577 				/* XXX need to bail out gracefully here */
7578 				panic("vfs_drt: new cluster map mysteriously too small");
7579 				index = 0;
7580 			}
7581 			/* copy */
7582 			DRT_HASH_COPY(ocmap, i, cmap, index);
7583 			copycount++;
7584 		}
7585 	}
7586 
7587 	/* log what we've done */
7588 	vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0);
7589 
7590 	/*
7591 	 * It's important to ensure that *cmapp always points to
7592 	 * a valid map, so we must overwrite it before freeing
7593 	 * the old map.
7594 	 */
7595 	*cmapp = cmap;
7596 	if (ocmap != NULL) {
7597 		/* emit stats into trace buffer */
7598 		vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA,
7599 		    ocmap->scm_modulus,
7600 		    ocmap->scm_buckets,
7601 		    ocmap->scm_lastclean,
7602 		    ocmap->scm_iskips);
7603 
7604 		vfs_drt_free_map(ocmap);
7605 	}
7606 	return KERN_SUCCESS;
7607 }
7608 
7609 
7610 /*
7611  * Free a sparse cluster map.
7612  */
7613 static kern_return_t
vfs_drt_free_map(struct vfs_drt_clustermap * cmap)7614 vfs_drt_free_map(struct vfs_drt_clustermap *cmap)
7615 {
7616 	vm_size_t map_size = 0;
7617 
7618 	if (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
7619 		map_size = DRT_SMALL_ALLOCATION;
7620 	} else if (cmap->scm_modulus == DRT_HASH_LARGE_MODULUS) {
7621 		map_size = DRT_LARGE_ALLOCATION;
7622 	} else if (cmap->scm_modulus == DRT_HASH_XLARGE_MODULUS) {
7623 		map_size = DRT_XLARGE_ALLOCATION;
7624 	} else {
7625 		panic("vfs_drt_free_map: Invalid modulus %d", cmap->scm_modulus);
7626 	}
7627 
7628 	kmem_free(kernel_map, (vm_offset_t)cmap, map_size);
7629 	return KERN_SUCCESS;
7630 }
7631 
7632 
7633 /*
7634  * Find the hashtable slot currently occupied by an entry for the supplied offset.
7635  */
7636 static kern_return_t
vfs_drt_search_index(struct vfs_drt_clustermap * cmap,u_int64_t offset,int * indexp)7637 vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp)
7638 {
7639 	int             index;
7640 	u_int32_t       i;
7641 
7642 	offset = DRT_ALIGN_ADDRESS(offset);
7643 	index = DRT_HASH(cmap, offset);
7644 
7645 	/* traverse the hashtable */
7646 	for (i = 0; i < cmap->scm_modulus; i++) {
7647 		/*
7648 		 * If the slot is vacant, we can stop.
7649 		 */
7650 		if (DRT_HASH_VACANT(cmap, index)) {
7651 			break;
7652 		}
7653 
7654 		/*
7655 		 * If the address matches our offset, we have success.
7656 		 */
7657 		if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) {
7658 			*indexp = index;
7659 			return KERN_SUCCESS;
7660 		}
7661 
7662 		/*
7663 		 * Move to the next slot, try again.
7664 		 */
7665 		index = DRT_HASH_NEXT(cmap, index);
7666 	}
7667 	/*
7668 	 * It's not there.
7669 	 */
7670 	return KERN_FAILURE;
7671 }
7672 
7673 /*
7674  * Find the hashtable slot for the supplied offset.  If we haven't allocated
7675  * one yet, allocate one and populate the address field.  Note that it will
7676  * not have a nonzero page count and thus will still technically be free, so
7677  * in the case where we are called to clean pages, the slot will remain free.
7678  */
7679 static kern_return_t
vfs_drt_get_index(struct vfs_drt_clustermap ** cmapp,u_int64_t offset,int * indexp,int recursed)7680 vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed)
7681 {
7682 	struct vfs_drt_clustermap *cmap;
7683 	kern_return_t   kret;
7684 	u_int32_t       index;
7685 	u_int32_t       i;
7686 
7687 	cmap = *cmapp;
7688 
7689 	/* look for an existing entry */
7690 	kret = vfs_drt_search_index(cmap, offset, indexp);
7691 	if (kret == KERN_SUCCESS) {
7692 		return kret;
7693 	}
7694 
7695 	/* need to allocate an entry */
7696 	offset = DRT_ALIGN_ADDRESS(offset);
7697 	index = DRT_HASH(cmap, offset);
7698 
7699 	/* scan from the index forwards looking for a vacant slot */
7700 	for (i = 0; i < cmap->scm_modulus; i++) {
7701 		/* slot vacant? */
7702 		if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap, index) == 0) {
7703 			cmap->scm_buckets++;
7704 			if (index < cmap->scm_lastclean) {
7705 				cmap->scm_lastclean = index;
7706 			}
7707 			DRT_HASH_SET_ADDRESS(cmap, index, offset);
7708 			DRT_HASH_SET_COUNT(cmap, index, 0);
7709 			DRT_BITVECTOR_CLEAR(cmap, index);
7710 			*indexp = index;
7711 			vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0);
7712 			return KERN_SUCCESS;
7713 		}
7714 		cmap->scm_iskips += i;
7715 		index = DRT_HASH_NEXT(cmap, index);
7716 	}
7717 
7718 	/*
7719 	 * We haven't found a vacant slot, so the map is full.  If we're not
7720 	 * already recursed, try reallocating/compacting it.
7721 	 */
7722 	if (recursed) {
7723 		return KERN_FAILURE;
7724 	}
7725 	kret = vfs_drt_alloc_map(cmapp);
7726 	if (kret == KERN_SUCCESS) {
7727 		/* now try to insert again */
7728 		kret = vfs_drt_get_index(cmapp, offset, indexp, 1);
7729 	}
7730 	return kret;
7731 }
7732 
7733 /*
7734  * Implementation of set dirty/clean.
7735  *
7736  * In the 'clean' case, not finding a map is OK.
7737  */
7738 static kern_return_t
vfs_drt_do_mark_pages(void ** private,u_int64_t offset,u_int length,u_int * setcountp,int dirty)7739 vfs_drt_do_mark_pages(
7740 	void            **private,
7741 	u_int64_t       offset,
7742 	u_int           length,
7743 	u_int           *setcountp,
7744 	int             dirty)
7745 {
7746 	struct vfs_drt_clustermap *cmap, **cmapp;
7747 	kern_return_t   kret;
7748 	int             i, index, pgoff, pgcount, setcount, ecount;
7749 
7750 	cmapp = (struct vfs_drt_clustermap **)private;
7751 	cmap = *cmapp;
7752 
7753 	vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0);
7754 
7755 	if (setcountp != NULL) {
7756 		*setcountp = 0;
7757 	}
7758 
7759 	/* allocate a cluster map if we don't already have one */
7760 	if (cmap == NULL) {
7761 		/* no cluster map, nothing to clean */
7762 		if (!dirty) {
7763 			vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0);
7764 			return KERN_SUCCESS;
7765 		}
7766 		kret = vfs_drt_alloc_map(cmapp);
7767 		if (kret != KERN_SUCCESS) {
7768 			vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0);
7769 			return kret;
7770 		}
7771 	}
7772 	setcount = 0;
7773 
7774 	/*
7775 	 * Iterate over the length of the region.
7776 	 */
7777 	while (length > 0) {
7778 		/*
7779 		 * Get the hashtable index for this offset.
7780 		 *
7781 		 * XXX this will add blank entries if we are clearing a range
7782 		 * that hasn't been dirtied.
7783 		 */
7784 		kret = vfs_drt_get_index(cmapp, offset, &index, 0);
7785 		cmap = *cmapp;  /* may have changed! */
7786 		/* this may be a partial-success return */
7787 		if (kret != KERN_SUCCESS) {
7788 			if (setcountp != NULL) {
7789 				*setcountp = setcount;
7790 			}
7791 			vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0);
7792 
7793 			return kret;
7794 		}
7795 
7796 		/*
7797 		 * Work out how many pages we're modifying in this
7798 		 * hashtable entry.
7799 		 */
7800 		pgoff = (int)((offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE);
7801 		pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff));
7802 
7803 		/*
7804 		 * Iterate over pages, dirty/clearing as we go.
7805 		 */
7806 		ecount = DRT_HASH_GET_COUNT(cmap, index);
7807 		for (i = 0; i < pgcount; i++) {
7808 			if (dirty) {
7809 				if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
7810 					if (ecount >= DRT_BITVECTOR_PAGES) {
7811 						panic("ecount >= DRT_BITVECTOR_PAGES, cmap = %p, index = %d, bit = %d", cmap, index, pgoff + i);
7812 					}
7813 					DRT_HASH_SET_BIT(cmap, index, pgoff + i);
7814 					ecount++;
7815 					setcount++;
7816 				}
7817 			} else {
7818 				if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
7819 					if (ecount <= 0) {
7820 						panic("ecount <= 0, cmap = %p, index = %d, bit = %d", cmap, index, pgoff + i);
7821 					}
7822 					assert(ecount > 0);
7823 					DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i);
7824 					ecount--;
7825 					setcount++;
7826 				}
7827 			}
7828 		}
7829 		DRT_HASH_SET_COUNT(cmap, index, ecount);
7830 
7831 		offset += pgcount * PAGE_SIZE;
7832 		length -= pgcount * PAGE_SIZE;
7833 	}
7834 	if (setcountp != NULL) {
7835 		*setcountp = setcount;
7836 	}
7837 
7838 	vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0);
7839 
7840 	return KERN_SUCCESS;
7841 }
7842 
7843 /*
7844  * Mark a set of pages as dirty/clean.
7845  *
7846  * This is a public interface.
7847  *
7848  * cmapp
7849  *	Pointer to storage suitable for holding a pointer.  Note that
7850  *	this must either be NULL or a value set by this function.
7851  *
7852  * size
7853  *	Current file size in bytes.
7854  *
7855  * offset
7856  *	Offset of the first page to be marked as dirty, in bytes.  Must be
7857  *	page-aligned.
7858  *
7859  * length
7860  *	Length of dirty region, in bytes.  Must be a multiple of PAGE_SIZE.
7861  *
7862  * setcountp
7863  *	Number of pages newly marked dirty by this call (optional).
7864  *
7865  * Returns KERN_SUCCESS if all the pages were successfully marked.
7866  */
7867 static kern_return_t
vfs_drt_mark_pages(void ** cmapp,off_t offset,u_int length,u_int * setcountp)7868 vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp)
7869 {
7870 	/* XXX size unused, drop from interface */
7871 	return vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1);
7872 }
7873 
7874 #if 0
7875 static kern_return_t
7876 vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length)
7877 {
7878 	return vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
7879 }
7880 #endif
7881 
7882 /*
7883  * Get a cluster of dirty pages.
7884  *
7885  * This is a public interface.
7886  *
7887  * cmapp
7888  *	Pointer to storage managed by drt_mark_pages.  Note that this must
7889  *	be NULL or a value set by drt_mark_pages.
7890  *
7891  * offsetp
7892  *	Returns the byte offset into the file of the first page in the cluster.
7893  *
7894  * lengthp
7895  *	Returns the length in bytes of the cluster of dirty pages.
7896  *
7897  * Returns success if a cluster was found.  If KERN_FAILURE is returned, there
7898  * are no dirty pages meeting the minmum size criteria.  Private storage will
7899  * be released if there are no more dirty pages left in the map
7900  *
7901  */
7902 static kern_return_t
vfs_drt_get_cluster(void ** cmapp,off_t * offsetp,u_int * lengthp)7903 vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp)
7904 {
7905 	struct vfs_drt_clustermap *cmap;
7906 	u_int64_t       offset;
7907 	u_int           length;
7908 	u_int32_t       j;
7909 	int             index, i, fs, ls;
7910 
7911 	/* sanity */
7912 	if ((cmapp == NULL) || (*cmapp == NULL)) {
7913 		return KERN_FAILURE;
7914 	}
7915 	cmap = *cmapp;
7916 
7917 	/* walk the hashtable */
7918 	for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) {
7919 		index = DRT_HASH(cmap, offset);
7920 
7921 		if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0)) {
7922 			continue;
7923 		}
7924 
7925 		/* scan the bitfield for a string of bits */
7926 		fs = -1;
7927 
7928 		for (i = 0; i < DRT_BITVECTOR_PAGES; i++) {
7929 			if (DRT_HASH_TEST_BIT(cmap, index, i)) {
7930 				fs = i;
7931 				break;
7932 			}
7933 		}
7934 		if (fs == -1) {
7935 			/*  didn't find any bits set */
7936 			panic("vfs_drt: entry summary count > 0 but no bits set in map, cmap = %p, index = %d, count = %lld",
7937 			    cmap, index, DRT_HASH_GET_COUNT(cmap, index));
7938 		}
7939 		for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) {
7940 			if (!DRT_HASH_TEST_BIT(cmap, index, i)) {
7941 				break;
7942 			}
7943 		}
7944 
7945 		/* compute offset and length, mark pages clean */
7946 		offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs);
7947 		length = ls * PAGE_SIZE;
7948 		vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
7949 		cmap->scm_lastclean = index;
7950 
7951 		/* return successful */
7952 		*offsetp = (off_t)offset;
7953 		*lengthp = length;
7954 
7955 		vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0);
7956 		return KERN_SUCCESS;
7957 	}
7958 	/*
7959 	 * We didn't find anything... hashtable is empty
7960 	 * emit stats into trace buffer and
7961 	 * then free it
7962 	 */
7963 	vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
7964 	    cmap->scm_modulus,
7965 	    cmap->scm_buckets,
7966 	    cmap->scm_lastclean,
7967 	    cmap->scm_iskips);
7968 
7969 	vfs_drt_free_map(cmap);
7970 	*cmapp = NULL;
7971 
7972 	return KERN_FAILURE;
7973 }
7974 
7975 
7976 static kern_return_t
vfs_drt_control(void ** cmapp,int op_type)7977 vfs_drt_control(void **cmapp, int op_type)
7978 {
7979 	struct vfs_drt_clustermap *cmap;
7980 
7981 	/* sanity */
7982 	if ((cmapp == NULL) || (*cmapp == NULL)) {
7983 		return KERN_FAILURE;
7984 	}
7985 	cmap = *cmapp;
7986 
7987 	switch (op_type) {
7988 	case 0:
7989 		/* emit stats into trace buffer */
7990 		vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
7991 		    cmap->scm_modulus,
7992 		    cmap->scm_buckets,
7993 		    cmap->scm_lastclean,
7994 		    cmap->scm_iskips);
7995 
7996 		vfs_drt_free_map(cmap);
7997 		*cmapp = NULL;
7998 		break;
7999 
8000 	case 1:
8001 		cmap->scm_lastclean = 0;
8002 		break;
8003 	}
8004 	return KERN_SUCCESS;
8005 }
8006 
8007 
8008 
8009 /*
8010  * Emit a summary of the state of the clustermap into the trace buffer
8011  * along with some caller-provided data.
8012  */
8013 #if KDEBUG
8014 static void
vfs_drt_trace(__unused struct vfs_drt_clustermap * cmap,int code,int arg1,int arg2,int arg3,int arg4)8015 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, int code, int arg1, int arg2, int arg3, int arg4)
8016 {
8017 	KERNEL_DEBUG(code, arg1, arg2, arg3, arg4, 0);
8018 }
8019 #else
8020 static void
vfs_drt_trace(__unused struct vfs_drt_clustermap * cmap,__unused int code,__unused int arg1,__unused int arg2,__unused int arg3,__unused int arg4)8021 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code,
8022     __unused int arg1, __unused int arg2, __unused int arg3,
8023     __unused int arg4)
8024 {
8025 }
8026 #endif
8027 
8028 #if 0
8029 /*
8030  * Perform basic sanity check on the hash entry summary count
8031  * vs. the actual bits set in the entry.
8032  */
8033 static void
8034 vfs_drt_sanity(struct vfs_drt_clustermap *cmap)
8035 {
8036 	int index, i;
8037 	int bits_on;
8038 
8039 	for (index = 0; index < cmap->scm_modulus; index++) {
8040 		if (DRT_HASH_VACANT(cmap, index)) {
8041 			continue;
8042 		}
8043 
8044 		for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) {
8045 			if (DRT_HASH_TEST_BIT(cmap, index, i)) {
8046 				bits_on++;
8047 			}
8048 		}
8049 		if (bits_on != DRT_HASH_GET_COUNT(cmap, index)) {
8050 			panic("bits_on = %d,  index = %d", bits_on, index);
8051 		}
8052 	}
8053 }
8054 #endif
8055 
8056 /*
8057  * Internal interface only.
8058  */
8059 static kern_return_t
vfs_get_scmap_push_behavior_internal(void ** cmapp,int * push_flag)8060 vfs_get_scmap_push_behavior_internal(void **cmapp, int *push_flag)
8061 {
8062 	struct vfs_drt_clustermap *cmap;
8063 
8064 	/* sanity */
8065 	if ((cmapp == NULL) || (*cmapp == NULL) || (push_flag == NULL)) {
8066 		return KERN_FAILURE;
8067 	}
8068 	cmap = *cmapp;
8069 
8070 	if (cmap->scm_modulus == DRT_HASH_XLARGE_MODULUS) {
8071 		/*
8072 		 * If we have a full xlarge sparse cluster,
8073 		 * we push it out all at once so the cluster
8074 		 * map can be available to absorb more I/Os.
8075 		 * This is done on large memory configs so
8076 		 * the small I/Os don't interfere with the
8077 		 * pro workloads.
8078 		 */
8079 		*push_flag = PUSH_ALL;
8080 	}
8081 	return KERN_SUCCESS;
8082 }
8083