1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)vfs_cluster.c 8.10 (Berkeley) 3/28/95
62 */
63
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/buf_internal.h>
67 #include <sys/mount_internal.h>
68 #include <sys/vnode_internal.h>
69 #include <sys/trace.h>
70 #include <kern/kalloc.h>
71 #include <sys/time.h>
72 #include <sys/kernel.h>
73 #include <sys/resourcevar.h>
74 #include <miscfs/specfs/specdev.h>
75 #include <sys/uio_internal.h>
76 #include <libkern/libkern.h>
77 #include <machine/machine_routines.h>
78
79 #include <sys/ubc_internal.h>
80 #include <vm/vnode_pager.h>
81 #include <vm/vm_upl.h>
82
83 #include <mach/mach_types.h>
84 #include <mach/memory_object_types.h>
85 #include <mach/vm_map.h>
86 #include <mach/upl.h>
87 #include <mach/thread_info.h>
88 #include <kern/task.h>
89 #include <kern/policy_internal.h>
90 #include <kern/thread.h>
91
92 #include <vm/vm_kern_xnu.h>
93 #include <vm/vm_map_xnu.h>
94 #include <vm/vm_pageout_xnu.h>
95 #include <vm/vm_fault.h>
96 #include <vm/vm_ubc.h>
97
98 #include <sys/kdebug.h>
99 #include <sys/kdebug_triage.h>
100 #include <libkern/OSAtomic.h>
101
102 #include <sys/sdt.h>
103
104 #include <stdbool.h>
105
106 #include <vfs/vfs_disk_conditioner.h>
107
108 #if 0
109 #undef KERNEL_DEBUG
110 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
111 #endif
112
113
114 #define CL_READ 0x01
115 #define CL_WRITE 0x02
116 #define CL_ASYNC 0x04
117 #define CL_COMMIT 0x08
118 #define CL_PAGEOUT 0x10
119 #define CL_AGE 0x20
120 #define CL_NOZERO 0x40
121 #define CL_PAGEIN 0x80
122 #define CL_DEV_MEMORY 0x100
123 #define CL_PRESERVE 0x200
124 #define CL_THROTTLE 0x400
125 #define CL_KEEPCACHED 0x800
126 #define CL_DIRECT_IO 0x1000
127 #define CL_PASSIVE 0x2000
128 #define CL_IOSTREAMING 0x4000
129 #define CL_CLOSE 0x8000
130 #define CL_ENCRYPTED 0x10000
131 #define CL_RAW_ENCRYPTED 0x20000
132 #define CL_NOCACHE 0x40000
133 #define CL_DIRECT_IO_FSBLKSZ 0x80000
134
135 #define MAX_VECTOR_UPL_SIZE (2 * MAX_UPL_SIZE_BYTES)
136
137 #define CLUSTER_IO_WAITING ((buf_t)1)
138
139 extern void vector_upl_set_iostate(upl_t, upl_t, vm_offset_t, upl_size_t);
140
141 struct clios {
142 lck_mtx_t io_mtxp;
143 u_int io_completed; /* amount of io that has currently completed */
144 u_int io_issued; /* amount of io that was successfully issued */
145 int io_error; /* error code of first error encountered */
146 int io_wanted; /* someone is sleeping waiting for a change in state */
147 };
148
149 struct cl_direct_read_lock {
150 LIST_ENTRY(cl_direct_read_lock) chain;
151 int32_t ref_count;
152 vnode_t vp;
153 lck_rw_t rw_lock;
154 };
155
156 #define CL_DIRECT_READ_LOCK_BUCKETS 61
157
158 static LIST_HEAD(cl_direct_read_locks, cl_direct_read_lock)
159 cl_direct_read_locks[CL_DIRECT_READ_LOCK_BUCKETS];
160
161 static LCK_GRP_DECLARE(cl_mtx_grp, "cluster I/O");
162 static LCK_MTX_DECLARE(cl_transaction_mtxp, &cl_mtx_grp);
163 static LCK_SPIN_DECLARE(cl_direct_read_spin_lock, &cl_mtx_grp);
164
165 static ZONE_DEFINE(cl_rd_zone, "cluster_read",
166 sizeof(struct cl_readahead), ZC_ZFREE_CLEARMEM);
167
168 static ZONE_DEFINE(cl_wr_zone, "cluster_write",
169 sizeof(struct cl_writebehind), ZC_ZFREE_CLEARMEM);
170
171 #define IO_UNKNOWN 0
172 #define IO_DIRECT 1
173 #define IO_CONTIG 2
174 #define IO_COPY 3
175
176 #define PUSH_DELAY 0x01
177 #define PUSH_ALL 0x02
178 #define PUSH_SYNC 0x04
179
180
181 static void cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset, size_t verify_block_size);
182 static void cluster_wait_IO(buf_t cbp_head, int async);
183 static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait);
184
185 static int cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length);
186
187 static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
188 int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg);
189 static void cluster_iodone_verify_continue(void);
190 static int cluster_iodone(buf_t bp, void *callback_arg);
191 static int cluster_iodone_finish(buf_t cbp_head, void *callback_arg);
192 static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp);
193 static int cluster_is_throttled(vnode_t vp);
194
195 static void cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name);
196
197 static void cluster_syncup(vnode_t vp, off_t newEOF, int (*)(buf_t, void *), void *callback_arg, int flags);
198
199 static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference);
200 static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference);
201
202 static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags,
203 int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
204 static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
205 int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
206 static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
207 int (*)(buf_t, void *), void *callback_arg, int flags) __attribute__((noinline));
208
209 static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF,
210 off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
211 static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
212 int flags, int (*callback)(buf_t, void *), void *callback_arg, uint32_t min_io_size) __attribute__((noinline));
213 static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF,
214 int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag) __attribute__((noinline));
215
216 static void cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes, boolean_t *first_pass,
217 off_t write_off, int write_cnt, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
218
219 static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg);
220
221 static int cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
222 static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra,
223 int (*callback)(buf_t, void *), void *callback_arg, int bflag);
224
225 static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_ioitiated);
226
227 static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *),
228 void *callback_arg, int *err, boolean_t vm_initiated);
229
230 static int sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
231 static int sparse_cluster_push(struct cl_writebehind *, void **cmapp, vnode_t vp, off_t EOF, int push_flag,
232 int io_flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
233 static int sparse_cluster_add(struct cl_writebehind *, void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF,
234 int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
235
236 static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp);
237 static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp);
238 static kern_return_t vfs_drt_control(void **cmapp, int op_type);
239 static kern_return_t vfs_get_scmap_push_behavior_internal(void **cmapp, int *push_flag);
240
241
242 /*
243 * For throttled IO to check whether
244 * a block is cached by the boot cache
245 * and thus it can avoid delaying the IO.
246 *
247 * bootcache_contains_block is initially
248 * NULL. The BootCache will set it while
249 * the cache is active and clear it when
250 * the cache is jettisoned.
251 *
252 * Returns 0 if the block is not
253 * contained in the cache, 1 if it is
254 * contained.
255 *
256 * The function pointer remains valid
257 * after the cache has been evicted even
258 * if bootcache_contains_block has been
259 * cleared.
260 *
261 * See rdar://9974130 The new throttling mechanism breaks the boot cache for throttled IOs
262 */
263 int (*bootcache_contains_block)(dev_t device, u_int64_t blkno) = NULL;
264
265
266 /*
267 * limit the internal I/O size so that we
268 * can represent it in a 32 bit int
269 */
270 #define MAX_IO_REQUEST_SIZE (1024 * 1024 * 512)
271 #define MAX_IO_CONTIG_SIZE MAX_UPL_SIZE_BYTES
272 #define MAX_VECTS 16
273 /*
274 * The MIN_DIRECT_WRITE_SIZE governs how much I/O should be issued before we consider
275 * allowing the caller to bypass the buffer cache. For small I/Os (less than 16k),
276 * we have not historically allowed the write to bypass the UBC.
277 */
278 #define MIN_DIRECT_WRITE_SIZE (16384)
279
280 #define WRITE_THROTTLE 6
281 #define WRITE_THROTTLE_SSD 2
282 #define WRITE_BEHIND 1
283 #define WRITE_BEHIND_SSD 1
284
285 #if !defined(XNU_TARGET_OS_OSX)
286 #define PREFETCH 1
287 #define PREFETCH_SSD 1
288 uint32_t speculative_prefetch_max = (2048 * 1024); /* maximum bytes in a specluative read-ahead */
289 uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead */
290 #else /* XNU_TARGET_OS_OSX */
291 #define PREFETCH 3
292 #define PREFETCH_SSD 2
293 uint32_t speculative_prefetch_max = (MAX_UPL_SIZE_BYTES * 3); /* maximum bytes in a specluative read-ahead */
294 uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead on SSDs*/
295 #endif /* ! XNU_TARGET_OS_OSX */
296
297 /* maximum bytes for read-ahead */
298 uint32_t prefetch_max = (1024 * 1024 * 1024);
299 /* maximum bytes for outstanding reads */
300 uint32_t overlapping_read_max = (1024 * 1024 * 1024);
301 /* maximum bytes for outstanding writes */
302 uint32_t overlapping_write_max = (1024 * 1024 * 1024);
303
304 #define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * (base))
305 #define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
306
307 int speculative_reads_disabled = 0;
308
309 /*
310 * throttle the number of async writes that
311 * can be outstanding on a single vnode
312 * before we issue a synchronous write
313 */
314 #define THROTTLE_MAXCNT 0
315
316 uint32_t throttle_max_iosize = (128 * 1024);
317
318 #define THROTTLE_MAX_IOSIZE (throttle_max_iosize)
319
320 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_max_iosize, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_max_iosize, 0, "");
321
322 struct verify_buf {
323 TAILQ_ENTRY(verify_buf) vb_entry;
324 buf_t vb_cbp;
325 void* vb_callback_arg;
326 int32_t vb_whichq;
327 };
328
329 TAILQ_HEAD(, verify_buf) verify_free_head;
330 TAILQ_HEAD(, verify_buf) verify_work_head;
331
332 #define MAX_VERIFY_THREADS 4
333 #define MAX_REQUESTS_PER_THREAD 2
334
335 static struct verify_buf verify_bufs[MAX_VERIFY_THREADS * MAX_REQUESTS_PER_THREAD];
336 /*
337 * Each thread needs to check if the item at the head of the queue has a UPL
338 * pointer that is any of the threads are currently operating on.
339 * slot 0 is for the io completion thread to do the request inline if there are no free
340 * queue slots.
341 */
342 static int verify_in_flight = 0;
343
344 #if defined(XNU_TARGET_OS_IOS)
345 #define NUM_DEFAULT_THREADS 2
346 #else
347 #define NUM_DEFAULT_THREADS 0
348 #endif
349
350 static TUNABLE(uint32_t, num_verify_threads, "num_verify_threads", NUM_DEFAULT_THREADS);
351 static uint32_t cluster_verify_threads = 0; /* will be launched as needed upto num_verify_threads */
352
353 static void
cluster_verify_init(void)354 cluster_verify_init(void)
355 {
356 TAILQ_INIT(&verify_free_head);
357 TAILQ_INIT(&verify_work_head);
358
359 if (num_verify_threads > MAX_VERIFY_THREADS) {
360 num_verify_threads = MAX_VERIFY_THREADS;
361 }
362
363 for (int i = 0; i < num_verify_threads * MAX_REQUESTS_PER_THREAD; i++) {
364 TAILQ_INSERT_TAIL(&verify_free_head, &verify_bufs[i], vb_entry);
365 }
366 }
367
368 void
cluster_init(void)369 cluster_init(void)
370 {
371 for (int i = 0; i < CL_DIRECT_READ_LOCK_BUCKETS; ++i) {
372 LIST_INIT(&cl_direct_read_locks[i]);
373 }
374
375 cluster_verify_init();
376 }
377
378 uint32_t
cluster_max_io_size(mount_t mp,int type)379 cluster_max_io_size(mount_t mp, int type)
380 {
381 uint32_t max_io_size;
382 uint32_t segcnt;
383 uint32_t maxcnt;
384
385 switch (type) {
386 case CL_READ:
387 segcnt = mp->mnt_segreadcnt;
388 maxcnt = mp->mnt_maxreadcnt;
389 break;
390 case CL_WRITE:
391 segcnt = mp->mnt_segwritecnt;
392 maxcnt = mp->mnt_maxwritecnt;
393 break;
394 default:
395 segcnt = min(mp->mnt_segreadcnt, mp->mnt_segwritecnt);
396 maxcnt = min(mp->mnt_maxreadcnt, mp->mnt_maxwritecnt);
397 break;
398 }
399 if (segcnt > (MAX_UPL_SIZE_BYTES >> PAGE_SHIFT)) {
400 /*
401 * don't allow a size beyond the max UPL size we can create
402 */
403 segcnt = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
404 }
405 max_io_size = min((segcnt * PAGE_SIZE), maxcnt);
406
407 if (max_io_size < MAX_UPL_TRANSFER_BYTES) {
408 /*
409 * don't allow a size smaller than the old fixed limit
410 */
411 max_io_size = MAX_UPL_TRANSFER_BYTES;
412 } else {
413 /*
414 * make sure the size specified is a multiple of PAGE_SIZE
415 */
416 max_io_size &= ~PAGE_MASK;
417 }
418 return max_io_size;
419 }
420
421 /*
422 * Returns max prefetch value. If the value overflows or exceeds the specified
423 * 'prefetch_limit', it will be capped at 'prefetch_limit' value.
424 */
425 static inline uint32_t
cluster_max_prefetch(vnode_t vp,uint32_t max_io_size,uint32_t prefetch_limit)426 cluster_max_prefetch(vnode_t vp, uint32_t max_io_size, uint32_t prefetch_limit)
427 {
428 bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
429 uint32_t io_scale = IO_SCALE(vp, is_ssd ? PREFETCH_SSD : PREFETCH);
430 uint32_t prefetch = 0;
431
432 if (__improbable(os_mul_overflow(max_io_size, io_scale, &prefetch) ||
433 (prefetch > prefetch_limit))) {
434 prefetch = prefetch_limit;
435 }
436
437 return prefetch;
438 }
439
440 static inline uint32_t
calculate_max_throttle_size(vnode_t vp)441 calculate_max_throttle_size(vnode_t vp)
442 {
443 bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
444 uint32_t io_scale = IO_SCALE(vp, is_ssd ? 2 : 1);
445
446 return MIN(io_scale * THROTTLE_MAX_IOSIZE, MAX_UPL_TRANSFER_BYTES);
447 }
448
449 static inline uint32_t
calculate_max_throttle_cnt(vnode_t vp)450 calculate_max_throttle_cnt(vnode_t vp)
451 {
452 bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
453 uint32_t io_scale = IO_SCALE(vp, 1);
454
455 return is_ssd ? MIN(io_scale, 4) : THROTTLE_MAXCNT;
456 }
457
458 #define CLW_ALLOCATE 0x01
459 #define CLW_RETURNLOCKED 0x02
460 #define CLW_IONOCACHE 0x04
461 #define CLW_IOPASSIVE 0x08
462
463 /*
464 * if the read ahead context doesn't yet exist,
465 * allocate and initialize it...
466 * the vnode lock serializes multiple callers
467 * during the actual assignment... first one
468 * to grab the lock wins... the other callers
469 * will release the now unnecessary storage
470 *
471 * once the context is present, try to grab (but don't block on)
472 * the lock associated with it... if someone
473 * else currently owns it, than the read
474 * will run without read-ahead. this allows
475 * multiple readers to run in parallel and
476 * since there's only 1 read ahead context,
477 * there's no real loss in only allowing 1
478 * reader to have read-ahead enabled.
479 */
480 static struct cl_readahead *
cluster_get_rap(vnode_t vp)481 cluster_get_rap(vnode_t vp)
482 {
483 struct ubc_info *ubc;
484 struct cl_readahead *rap;
485
486 ubc = vp->v_ubcinfo;
487
488 if ((rap = ubc->cl_rahead) == NULL) {
489 rap = zalloc_flags(cl_rd_zone, Z_WAITOK | Z_ZERO);
490 rap->cl_lastr = -1;
491 lck_mtx_init(&rap->cl_lockr, &cl_mtx_grp, LCK_ATTR_NULL);
492
493 vnode_lock(vp);
494
495 if (ubc->cl_rahead == NULL) {
496 ubc->cl_rahead = rap;
497 } else {
498 lck_mtx_destroy(&rap->cl_lockr, &cl_mtx_grp);
499 zfree(cl_rd_zone, rap);
500 rap = ubc->cl_rahead;
501 }
502 vnode_unlock(vp);
503 }
504 if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE) {
505 return rap;
506 }
507
508 return (struct cl_readahead *)NULL;
509 }
510
511
512 /*
513 * if the write behind context doesn't yet exist,
514 * and CLW_ALLOCATE is specified, allocate and initialize it...
515 * the vnode lock serializes multiple callers
516 * during the actual assignment... first one
517 * to grab the lock wins... the other callers
518 * will release the now unnecessary storage
519 *
520 * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
521 * the lock associated with the write behind context before
522 * returning
523 */
524
525 static struct cl_writebehind *
cluster_get_wbp(vnode_t vp,int flags)526 cluster_get_wbp(vnode_t vp, int flags)
527 {
528 struct ubc_info *ubc;
529 struct cl_writebehind *wbp;
530
531 ubc = vp->v_ubcinfo;
532
533 if ((wbp = ubc->cl_wbehind) == NULL) {
534 if (!(flags & CLW_ALLOCATE)) {
535 return (struct cl_writebehind *)NULL;
536 }
537
538 wbp = zalloc_flags(cl_wr_zone, Z_WAITOK | Z_ZERO);
539
540 lck_mtx_init(&wbp->cl_lockw, &cl_mtx_grp, LCK_ATTR_NULL);
541
542 vnode_lock(vp);
543
544 if (ubc->cl_wbehind == NULL) {
545 ubc->cl_wbehind = wbp;
546 } else {
547 lck_mtx_destroy(&wbp->cl_lockw, &cl_mtx_grp);
548 zfree(cl_wr_zone, wbp);
549 wbp = ubc->cl_wbehind;
550 }
551 vnode_unlock(vp);
552 }
553 if (flags & CLW_RETURNLOCKED) {
554 lck_mtx_lock(&wbp->cl_lockw);
555 }
556
557 return wbp;
558 }
559
560
561 static void
cluster_syncup(vnode_t vp,off_t newEOF,int (* callback)(buf_t,void *),void * callback_arg,int flags)562 cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, int flags)
563 {
564 struct cl_writebehind *wbp;
565
566 if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
567 if (wbp->cl_number) {
568 lck_mtx_lock(&wbp->cl_lockw);
569
570 cluster_try_push(wbp, vp, newEOF, PUSH_ALL | flags, 0, callback, callback_arg, NULL, FALSE);
571
572 lck_mtx_unlock(&wbp->cl_lockw);
573 }
574 }
575 }
576
577
578 static int
cluster_io_present_in_BC(vnode_t vp,off_t f_offset)579 cluster_io_present_in_BC(vnode_t vp, off_t f_offset)
580 {
581 daddr64_t blkno;
582 size_t io_size;
583 int (*bootcache_check_fn)(dev_t device, u_int64_t blkno) = bootcache_contains_block;
584
585 if (bootcache_check_fn && vp->v_mount && vp->v_mount->mnt_devvp) {
586 if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL)) {
587 return 0;
588 }
589
590 if (io_size == 0) {
591 return 0;
592 }
593
594 if (bootcache_check_fn(vp->v_mount->mnt_devvp->v_rdev, blkno)) {
595 return 1;
596 }
597 }
598 return 0;
599 }
600
601
602 static int
cluster_is_throttled(vnode_t vp)603 cluster_is_throttled(vnode_t vp)
604 {
605 return throttle_io_will_be_throttled(-1, vp->v_mount);
606 }
607
608
609 static void
cluster_iostate_wait(struct clios * iostate,u_int target,const char * wait_name)610 cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name)
611 {
612 lck_mtx_lock(&iostate->io_mtxp);
613
614 while ((iostate->io_issued - iostate->io_completed) > target) {
615 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
616 iostate->io_issued, iostate->io_completed, target, 0, 0);
617
618 iostate->io_wanted = 1;
619 msleep((caddr_t)&iostate->io_wanted, &iostate->io_mtxp, PRIBIO + 1, wait_name, NULL);
620
621 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
622 iostate->io_issued, iostate->io_completed, target, 0, 0);
623 }
624 lck_mtx_unlock(&iostate->io_mtxp);
625 }
626
627
628 static void
cluster_handle_associated_upl(struct clios * iostate,upl_t upl,upl_offset_t upl_offset,upl_size_t size,off_t f_offset)629 cluster_handle_associated_upl(struct clios *iostate, upl_t upl,
630 upl_offset_t upl_offset, upl_size_t size, off_t f_offset)
631 {
632 if (!size) {
633 return;
634 }
635
636 upl_t associated_upl = upl_associated_upl(upl);
637
638 if (!associated_upl) {
639 return;
640 }
641
642 /*
643 * The associated upl functions as a "range lock" for the file.
644 *
645 * The associated upl is created and is attached to to the upl in
646 * cluster_io when the direct io write is being started. Since the
647 * upl may be released in parts so the corresponding associated upl
648 * has to be released in parts as well.
649 *
650 * We have the f_offset, upl_offset and size and from that we have figure
651 * out the associated upl offset and length, we are interested in.
652 */
653 upl_offset_t assoc_upl_offset, assoc_upl_end;
654
655 /* ALIGNED UPL's */
656 if ((upl_offset & PAGE_MASK) == (f_offset & PAGE_MASK)) {
657 assoc_upl_offset = trunc_page_32(upl_offset);
658 assoc_upl_end = round_page_32(upl_offset + size);
659 goto do_commit;
660 }
661
662 /*
663 * HANDLE UNALIGNED UPLS
664 *
665 * ( See also cluster_io where the associated upl is created )
666 * While we create the upl in one go, we will be dumping the pages in
667 * the upl in "transaction sized chunks" relative to the upl. Except
668 * for the first transction, the upl_offset will always be page aligned.
669 * and when the upl's are not aligned the associated upl offset will not
670 * be page aligned and so we have to truncate and round up the starting
671 * and the end of the pages in question and see if they are shared with
672 * other transctions or not. If two transctions "share" a page in the
673 * associated upl, the first one to complete "marks" it and skips that
674 * page and the second one will include it in the "commit range"
675 *
676 * As an example, consider the case where 4 transctions are needed (this
677 * is the worst case).
678 *
679 * Transaction for 0-1 (size -> PAGE_SIZE - upl_offset)
680 *
681 * This covers the associated upl from a -> c. a->b is not shared but
682 * b-c is shared with the next transction so the first one to complete
683 * will only "mark" it.
684 *
685 * Transaction for 1-2 (size -> PAGE_SIZE)
686 *
687 * For transaction 1, assoc_upl_offset would be 0 (corresponding to the
688 * file offset a or b depending on what file offset the upl_offset
689 * corrssponds to ) and assoc_upl_end would correspond to the file
690 * offset c.
691 *
692 * (associated_upl - based on f_offset alignment)
693 * 0 a b c d e f
694 * <----|----|----|----|----|----|-----|---->
695 *
696 *
697 * (upl - based on user buffer address alignment)
698 * <__--|----|----|--__>
699 *
700 * 0 1 2 3
701 *
702 */
703 upl_size_t assoc_upl_size = upl_get_size(associated_upl);
704 #if 0
705 /* knock off the simple case first -> this transaction covers the entire UPL */
706 upl_offset_t upl_end = round_page_32(upl_offset + size);
707 upl_size_t upl_size = vector_upl_get_size(upl);
708
709 if ((trunc_page_32(upl_offset) == 0) && (upl_end == upl_size)) {
710 assoc_upl_offset = 0;
711 assoc_upl_end = assoc_upl_size;
712 goto do_commit;
713 }
714 #endif
715 off_t assoc_upl_start_f_offset = upl_adjusted_offset(associated_upl, PAGE_MASK);
716
717 assoc_upl_offset = (upl_offset_t)trunc_page_64(f_offset - assoc_upl_start_f_offset);
718 assoc_upl_end = round_page_64(f_offset + size) - assoc_upl_start_f_offset;
719
720 /*
721 * We can only sanity check the offset returned by upl_adjusted_offset
722 * for the first transaction for this UPL i.e. when (upl_offset < PAGE_SIZE)
723 */
724 assertf((upl_offset >= PAGE_SIZE) || ((assoc_upl_start_f_offset == trunc_page_64(f_offset)) && (assoc_upl_offset == 0)),
725 "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_offset = %d",
726 upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_offset);
727
728 assertf((upl_offset == assoc_upl_offset) || (upl_offset > assoc_upl_offset && ((upl_offset - assoc_upl_offset) <= PAGE_SIZE)) ||
729 (assoc_upl_offset > upl_offset && ((assoc_upl_offset - upl_offset) <= PAGE_SIZE)),
730 "abs(upl_offset - assoc_upl_offset) > PAGE_SIZE : "
731 "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_offset = %d",
732 upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_offset);
733
734 assertf(assoc_upl_end <= assoc_upl_size,
735 "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_size = %d, assoc_upl_offset = %d, assoc_upl_end = %d",
736 upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_size, assoc_upl_offset, assoc_upl_end);
737
738 assertf((assoc_upl_size > PAGE_SIZE) || (assoc_upl_offset == 0 && assoc_upl_end == PAGE_SIZE),
739 "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_size = %d, assoc_upl_offset = %d, assoc_upl_end = %d",
740 upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_size, assoc_upl_offset, assoc_upl_end);
741
742 if (assoc_upl_size == PAGE_SIZE) {
743 assoc_upl_offset = 0;
744 assoc_upl_end = PAGE_SIZE;
745 goto do_commit;
746 }
747
748 /*
749 * We have to check if the first and last pages of the associated UPL
750 * range could potentially be shared with other transactions and if the
751 * "sharing transactions" are both done. The first one sets the mark bit
752 * and the second one checks it and if set it includes that page in the
753 * pages to be "freed".
754 */
755 bool check_first_pg = (assoc_upl_offset != 0) || ((f_offset + size) < (assoc_upl_start_f_offset + PAGE_SIZE));
756 bool check_last_pg = (assoc_upl_end != assoc_upl_size) || (f_offset > ((assoc_upl_start_f_offset + assoc_upl_size) - PAGE_SIZE));
757
758 if (check_first_pg || check_last_pg) {
759 int first_pg = assoc_upl_offset >> PAGE_SHIFT;
760 int last_pg = trunc_page_32(assoc_upl_end - 1) >> PAGE_SHIFT;
761 upl_page_info_t *assoc_pl = UPL_GET_INTERNAL_PAGE_LIST(associated_upl);
762
763 lck_mtx_lock_spin(&iostate->io_mtxp);
764 if (check_first_pg && !upl_page_get_mark(assoc_pl, first_pg)) {
765 /*
766 * The first page isn't marked so let another transaction
767 * completion handle it.
768 */
769 upl_page_set_mark(assoc_pl, first_pg, true);
770 assoc_upl_offset += PAGE_SIZE;
771 }
772 if (check_last_pg && !upl_page_get_mark(assoc_pl, last_pg)) {
773 /*
774 * The last page isn't marked so mark the page and let another
775 * transaction completion handle it.
776 */
777 upl_page_set_mark(assoc_pl, last_pg, true);
778 assoc_upl_end -= PAGE_SIZE;
779 }
780 lck_mtx_unlock(&iostate->io_mtxp);
781 }
782
783 if (assoc_upl_end <= assoc_upl_offset) {
784 return;
785 }
786
787 do_commit:
788 size = assoc_upl_end - assoc_upl_offset;
789
790 boolean_t empty;
791
792 /*
793 * We can unlock these pages now and as this is for a
794 * direct/uncached write, we want to dump the pages too.
795 */
796 kern_return_t kr = upl_abort_range(associated_upl, assoc_upl_offset, size,
797 UPL_ABORT_DUMP_PAGES, &empty);
798
799 assert(!kr);
800
801 if (!kr && empty) {
802 upl_set_associated_upl(upl, NULL);
803 upl_deallocate(associated_upl);
804 }
805 }
806
807 static int
cluster_ioerror(upl_t upl,int upl_offset,int abort_size,int error,int io_flags,vnode_t vp)808 cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp)
809 {
810 int upl_abort_code = 0;
811 int page_in = 0;
812 int page_out = 0;
813
814 if ((io_flags & (B_PHYS | B_CACHE)) == (B_PHYS | B_CACHE)) {
815 /*
816 * direct write of any flavor, or a direct read that wasn't aligned
817 */
818 ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY);
819 } else {
820 if (io_flags & B_PAGEIO) {
821 if (io_flags & B_READ) {
822 page_in = 1;
823 } else {
824 page_out = 1;
825 }
826 }
827 if (io_flags & B_CACHE) {
828 /*
829 * leave pages in the cache unchanged on error
830 */
831 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
832 } else if (((io_flags & B_READ) == 0) && ((error != ENXIO) || vnode_isswap(vp))) {
833 /*
834 * transient error on pageout/write path... leave pages unchanged
835 */
836 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
837 } else if (page_in) {
838 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
839 } else {
840 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
841 }
842
843 ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code);
844 }
845 return upl_abort_code;
846 }
847
848 static int
cluster_iodone_finish(buf_t cbp_head,void * callback_arg)849 cluster_iodone_finish(buf_t cbp_head, void *callback_arg)
850 {
851 int b_flags;
852 int error;
853 int total_size;
854 int total_resid;
855 int upl_offset;
856 int zero_offset;
857 int pg_offset = 0;
858 int commit_size = 0;
859 int upl_flags = 0;
860 int transaction_size = 0;
861 upl_t upl;
862 buf_t cbp;
863 buf_t cbp_next;
864 buf_t real_bp;
865 vnode_t vp;
866 struct clios *iostate;
867 void *verify_ctx;
868
869 error = 0;
870 total_size = 0;
871 total_resid = 0;
872
873 cbp = cbp_head;
874 vp = cbp->b_vp;
875 upl_offset = cbp->b_uploffset;
876 upl = cbp->b_upl;
877 b_flags = cbp->b_flags;
878 real_bp = cbp->b_real_bp;
879 zero_offset = cbp->b_validend;
880 iostate = (struct clios *)cbp->b_iostate;
881
882 if (real_bp) {
883 real_bp->b_dev = cbp->b_dev;
884 }
885
886 while (cbp) {
887 if ((cbp->b_flags & B_ERROR) && error == 0) {
888 error = cbp->b_error;
889 }
890
891 total_resid += cbp->b_resid;
892 total_size += cbp->b_bcount;
893
894 cbp_next = cbp->b_trans_next;
895
896 if (cbp_next == NULL) {
897 /*
898 * compute the overall size of the transaction
899 * in case we created one that has 'holes' in it
900 * 'total_size' represents the amount of I/O we
901 * did, not the span of the transaction w/r to the UPL
902 */
903 transaction_size = cbp->b_uploffset + cbp->b_bcount - upl_offset;
904 }
905
906 cbp = cbp_next;
907 }
908
909 if (ISSET(b_flags, B_COMMIT_UPL)) {
910 cluster_handle_associated_upl(iostate,
911 cbp_head->b_upl,
912 upl_offset,
913 transaction_size,
914 cbp_head->b_clfoffset);
915 }
916
917 if (error == 0 && total_resid) {
918 error = EIO;
919 }
920
921 if (error == 0) {
922 int (*cliodone_func)(buf_t, void *) = (int (*)(buf_t, void *))(cbp_head->b_cliodone);
923
924 if (cliodone_func != NULL) {
925 cbp_head->b_bcount = transaction_size;
926
927 error = (*cliodone_func)(cbp_head, callback_arg);
928 }
929 }
930 if (zero_offset) {
931 cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
932 }
933
934 verify_ctx = cbp_head->b_attr.ba_verify_ctx;
935 cbp_head->b_attr.ba_verify_ctx = NULL;
936 if (verify_ctx) {
937 vnode_verify_flags_t verify_flags = VNODE_VERIFY_CONTEXT_FREE;
938 caddr_t verify_buf = NULL;
939 off_t start_off = cbp_head->b_clfoffset;
940 size_t verify_length = transaction_size;
941 vm_offset_t vaddr;
942
943 if (!error) {
944 /*
945 * Map it in.
946 *
947 * ubc_upl_map_range unfortunately cannot handle concurrent map
948 * requests for the same UPL and returns failures when it can't
949 * map. The map exclusive mechanism enforces mutual exclusion
950 * for concurrent requests.
951 */
952 os_atomic_inc(&verify_in_flight, relaxed);
953 upl_set_map_exclusive(upl);
954 error = ubc_upl_map_range(upl, upl_offset, round_page(transaction_size), VM_PROT_DEFAULT, &vaddr);
955 if (error) {
956 upl_clear_map_exclusive(upl);
957 printf("ubc_upl_map_range returned error %d upl = %p, upl_offset = %d, size = %d",
958 error, upl, (int)upl_offset, (int)round_page(transaction_size));
959 error = EIO;
960 if (os_atomic_dec_orig(&verify_in_flight, relaxed) == 0) {
961 panic("verify_in_flight underflow");
962 }
963 } else {
964 verify_buf = (caddr_t)vaddr;
965 verify_flags |= VNODE_VERIFY_WITH_CONTEXT;
966 }
967 }
968
969 int verify_error = VNOP_VERIFY(vp, start_off, (uint8_t *)verify_buf, verify_length, 0, &verify_ctx, verify_flags, NULL);
970 if (!error) {
971 error = verify_error;
972 }
973
974 if (verify_buf) {
975 (void)ubc_upl_unmap_range(upl, upl_offset, round_page(transaction_size));
976 upl_clear_map_exclusive(upl);
977 verify_buf = NULL;
978 if (os_atomic_dec_orig(&verify_in_flight, relaxed) == 0) {
979 panic("verify_in_flight underflow");
980 }
981 }
982 } else if (cbp_head->b_attr.ba_flags & BA_WILL_VERIFY) {
983 error = EBADMSG;
984 }
985
986 if (iostate) {
987 int need_wakeup = 0;
988
989 /*
990 * someone has issued multiple I/Os asynchrounsly
991 * and is waiting for them to complete (streaming)
992 */
993 lck_mtx_lock_spin(&iostate->io_mtxp);
994
995 if (error && iostate->io_error == 0) {
996 iostate->io_error = error;
997 }
998
999 iostate->io_completed += total_size;
1000
1001 if (iostate->io_wanted) {
1002 /*
1003 * someone is waiting for the state of
1004 * this io stream to change
1005 */
1006 iostate->io_wanted = 0;
1007 need_wakeup = 1;
1008 }
1009 lck_mtx_unlock(&iostate->io_mtxp);
1010
1011 if (need_wakeup) {
1012 wakeup((caddr_t)&iostate->io_wanted);
1013 }
1014 }
1015
1016 if (b_flags & B_COMMIT_UPL) {
1017 pg_offset = upl_offset & PAGE_MASK;
1018 commit_size = (pg_offset + transaction_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1019
1020 if (error) {
1021 upl_set_iodone_error(upl, error);
1022
1023 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags, vp);
1024 } else {
1025 upl_flags = UPL_COMMIT_FREE_ON_EMPTY;
1026
1027 if ((b_flags & B_PHYS) && (b_flags & B_READ)) {
1028 upl_flags |= UPL_COMMIT_SET_DIRTY;
1029 }
1030
1031 if (b_flags & B_AGE) {
1032 upl_flags |= UPL_COMMIT_INACTIVATE;
1033 }
1034
1035 ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags);
1036 }
1037 }
1038
1039 cbp = cbp_head->b_trans_next;
1040 while (cbp) {
1041 cbp_next = cbp->b_trans_next;
1042
1043 if (cbp != cbp_head) {
1044 free_io_buf(cbp);
1045 }
1046
1047 cbp = cbp_next;
1048 }
1049 free_io_buf(cbp_head);
1050
1051 if (real_bp) {
1052 if (error) {
1053 real_bp->b_flags |= B_ERROR;
1054 real_bp->b_error = error;
1055 }
1056 real_bp->b_resid = total_resid;
1057
1058 buf_biodone(real_bp);
1059 }
1060 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
1061 upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0);
1062
1063 return error;
1064 }
1065
1066 static void
cluster_iodone_verify_continue(void)1067 cluster_iodone_verify_continue(void)
1068 {
1069 lck_mtx_lock_spin(&cl_transaction_mtxp);
1070 for (;;) {
1071 struct verify_buf *vb = TAILQ_FIRST(&verify_work_head);
1072
1073 if (!vb) {
1074 assert_wait(&verify_work_head, (THREAD_UNINT));
1075 break;
1076 }
1077 buf_t cbp = vb->vb_cbp;
1078 void* callback_arg = vb->vb_callback_arg;
1079
1080 TAILQ_REMOVE(&verify_work_head, vb, vb_entry);
1081 vb->vb_cbp = NULL;
1082 vb->vb_callback_arg = NULL;
1083 vb->vb_whichq = 0;
1084 TAILQ_INSERT_TAIL(&verify_free_head, vb, vb_entry);
1085 lck_mtx_unlock(&cl_transaction_mtxp);
1086
1087 (void)cluster_iodone_finish(cbp, callback_arg);
1088 cbp = NULL;
1089 lck_mtx_lock_spin(&cl_transaction_mtxp);
1090 }
1091 lck_mtx_unlock(&cl_transaction_mtxp);
1092 thread_block((thread_continue_t)cluster_iodone_verify_continue);
1093 /* NOT REACHED */
1094 }
1095
1096 static void
cluster_verify_thread(void)1097 cluster_verify_thread(void)
1098 {
1099 thread_set_thread_name(current_thread(), "cluster_verify_thread");
1100 #if !defined(__x86_64__)
1101 thread_group_join_io_storage();
1102 #endif /* __x86_64__ */
1103 cluster_iodone_verify_continue();
1104 /* NOT REACHED */
1105 }
1106
1107 static bool
enqueue_buf_for_verify(buf_t cbp,void * callback_arg)1108 enqueue_buf_for_verify(buf_t cbp, void *callback_arg)
1109 {
1110 struct verify_buf *vb;
1111
1112 vb = TAILQ_FIRST(&verify_free_head);
1113 if (vb) {
1114 TAILQ_REMOVE(&verify_free_head, vb, vb_entry);
1115 vb->vb_cbp = cbp;
1116 vb->vb_callback_arg = callback_arg;
1117 vb->vb_whichq = 1;
1118 TAILQ_INSERT_TAIL(&verify_work_head, vb, vb_entry);
1119 return true;
1120 } else {
1121 return false;
1122 }
1123 }
1124
1125 static int
cluster_iodone(buf_t bp,void * callback_arg)1126 cluster_iodone(buf_t bp, void *callback_arg)
1127 {
1128 buf_t cbp;
1129 buf_t cbp_head;
1130 int error = 0;
1131 boolean_t transaction_complete = FALSE;
1132 bool async;
1133
1134 __IGNORE_WCASTALIGN(cbp_head = (buf_t)(bp->b_trans_head));
1135
1136 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START,
1137 cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
1138
1139 async = cluster_verify_threads &&
1140 (os_atomic_load(&cbp_head->b_attr.ba_flags, acquire) & BA_ASYNC_VERIFY);
1141
1142 assert(!async || cbp_head->b_attr.ba_verify_ctx);
1143
1144 if (cbp_head->b_trans_next || !(cbp_head->b_flags & B_EOT)) {
1145 lck_mtx_lock_spin(&cl_transaction_mtxp);
1146
1147 bp->b_flags |= B_TDONE;
1148
1149 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
1150 /*
1151 * all I/O requests that are part of this transaction
1152 * have to complete before we can process it
1153 */
1154 if (!(cbp->b_flags & B_TDONE)) {
1155 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
1156 cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
1157
1158 lck_mtx_unlock(&cl_transaction_mtxp);
1159
1160 return 0;
1161 }
1162
1163 if (cbp->b_trans_next == CLUSTER_IO_WAITING) {
1164 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
1165 cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
1166
1167 lck_mtx_unlock(&cl_transaction_mtxp);
1168 wakeup(cbp);
1169
1170 return 0;
1171 }
1172
1173 if (cbp->b_flags & B_EOT) {
1174 transaction_complete = TRUE;
1175
1176 if (async) {
1177 async = enqueue_buf_for_verify(cbp_head, callback_arg);
1178 }
1179 }
1180 }
1181 lck_mtx_unlock(&cl_transaction_mtxp);
1182
1183 if (transaction_complete == FALSE) {
1184 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
1185 cbp_head, 0, 0, 0, 0);
1186 return 0;
1187 }
1188 } else if (async) {
1189 lck_mtx_lock_spin(&cl_transaction_mtxp);
1190 async = enqueue_buf_for_verify(cbp_head, callback_arg);
1191 lck_mtx_unlock(&cl_transaction_mtxp);
1192 }
1193
1194 if (async) {
1195 wakeup(&verify_work_head);
1196 } else {
1197 error = cluster_iodone_finish(cbp_head, callback_arg);
1198 }
1199
1200 return error;
1201 }
1202
1203
1204 uint32_t
cluster_throttle_io_limit(vnode_t vp,uint32_t * limit)1205 cluster_throttle_io_limit(vnode_t vp, uint32_t *limit)
1206 {
1207 if (cluster_is_throttled(vp)) {
1208 *limit = calculate_max_throttle_size(vp);
1209 return 1;
1210 }
1211 return 0;
1212 }
1213
1214
1215 void
cluster_zero(upl_t upl,upl_offset_t upl_offset,int size,buf_t bp)1216 cluster_zero(upl_t upl, upl_offset_t upl_offset, int size, buf_t bp)
1217 {
1218 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START,
1219 upl_offset, size, bp, 0, 0);
1220
1221 if (bp == NULL || bp->b_datap == 0) {
1222 upl_page_info_t *pl;
1223 addr64_t zero_addr;
1224
1225 pl = ubc_upl_pageinfo(upl);
1226
1227 if (upl_device_page(pl) == TRUE) {
1228 zero_addr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + upl_offset;
1229
1230 bzero_phys_nc(zero_addr, size);
1231 } else {
1232 while (size) {
1233 int page_offset;
1234 int page_index;
1235 int zero_cnt;
1236
1237 page_index = upl_offset / PAGE_SIZE;
1238 page_offset = upl_offset & PAGE_MASK;
1239
1240 zero_addr = ((addr64_t)upl_phys_page(pl, page_index) << PAGE_SHIFT) + page_offset;
1241 zero_cnt = min(PAGE_SIZE - page_offset, size);
1242
1243 bzero_phys(zero_addr, zero_cnt);
1244
1245 size -= zero_cnt;
1246 upl_offset += zero_cnt;
1247 }
1248 }
1249 } else {
1250 bzero((caddr_t)((vm_offset_t)bp->b_datap + upl_offset), size);
1251 }
1252
1253 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END,
1254 upl_offset, size, 0, 0, 0);
1255 }
1256
1257
1258 static void
cluster_EOT(buf_t cbp_head,buf_t cbp_tail,int zero_offset,size_t verify_block_size)1259 cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset, size_t verify_block_size)
1260 {
1261 /*
1262 * We will assign a verification context to cbp_head.
1263 * This will be passed back to the filesystem when
1264 * verifying (in cluster_iodone).
1265 */
1266 if (verify_block_size) {
1267 off_t start_off = cbp_head->b_clfoffset;
1268 size_t length;
1269 void *verify_ctx = NULL;
1270 int error = 0;
1271 vnode_t vp = buf_vnode(cbp_head);
1272
1273 if (cbp_head == cbp_tail) {
1274 length = cbp_head->b_bcount;
1275 } else {
1276 length = (cbp_tail->b_clfoffset + cbp_tail->b_bcount) - start_off;
1277 }
1278
1279 /*
1280 * zero_offset is non zero for the transaction containing the EOF
1281 * (if the filesize is not page aligned). In that case we might
1282 * have the transaction size not be page/verify block size aligned
1283 */
1284 if ((zero_offset == 0) &&
1285 ((length < verify_block_size) || (length % verify_block_size)) != 0) {
1286 panic("%s length = %zu, verify_block_size = %zu",
1287 __FUNCTION__, length, verify_block_size);
1288 }
1289
1290 error = VNOP_VERIFY(vp, start_off, NULL, length,
1291 &verify_block_size, &verify_ctx, VNODE_VERIFY_CONTEXT_ALLOC, NULL);
1292
1293 assert(!(error && verify_ctx));
1294
1295 if (verify_ctx) {
1296 if (num_verify_threads && (os_atomic_load(&cluster_verify_threads, relaxed) == 0)) {
1297 if (os_atomic_inc_orig(&cluster_verify_threads, relaxed) == 0) {
1298 thread_t thread;
1299 int i;
1300
1301 for (i = 0; i < num_verify_threads && i < MAX_VERIFY_THREADS; i++) {
1302 kernel_thread_start((thread_continue_t)cluster_verify_thread, NULL, &thread);
1303 thread_deallocate(thread);
1304 }
1305 os_atomic_store(&cluster_verify_threads, i, relaxed);
1306 } else {
1307 os_atomic_dec(&cluster_verify_threads, relaxed);
1308 }
1309 }
1310 cbp_head->b_attr.ba_verify_ctx = verify_ctx;
1311 /*
1312 * At least one thread is busy (at the time we
1313 * checked), so we can let it get queued for
1314 * async processing. It's fine if we occasionally get
1315 * this wrong.
1316 */
1317 if (os_atomic_load(&verify_in_flight, relaxed)) {
1318 /* This flag and the setting of ba_verify_ctx needs to be ordered */
1319 os_atomic_or(&cbp_head->b_attr.ba_flags, BA_ASYNC_VERIFY, release);
1320 }
1321 }
1322 } else {
1323 cbp_head->b_attr.ba_verify_ctx = NULL;
1324 }
1325
1326 cbp_head->b_validend = zero_offset;
1327 cbp_tail->b_flags |= B_EOT;
1328 }
1329
1330 static void
cluster_wait_IO(buf_t cbp_head,int async)1331 cluster_wait_IO(buf_t cbp_head, int async)
1332 {
1333 buf_t cbp;
1334
1335 if (async) {
1336 /*
1337 * Async callback completion will not normally generate a
1338 * wakeup upon I/O completion. To get woken up, we set
1339 * b_trans_next (which is safe for us to modify) on the last
1340 * buffer to CLUSTER_IO_WAITING so that cluster_iodone knows
1341 * to wake us up when all buffers as part of this transaction
1342 * are completed. This is done under the umbrella of
1343 * cl_transaction_mtxp which is also taken in cluster_iodone.
1344 */
1345 bool done = true;
1346 buf_t last = NULL;
1347
1348 lck_mtx_lock_spin(&cl_transaction_mtxp);
1349
1350 for (cbp = cbp_head; cbp; last = cbp, cbp = cbp->b_trans_next) {
1351 if (!ISSET(cbp->b_flags, B_TDONE)) {
1352 done = false;
1353 }
1354 }
1355
1356 if (!done) {
1357 last->b_trans_next = CLUSTER_IO_WAITING;
1358
1359 DTRACE_IO1(wait__start, buf_t, last);
1360 do {
1361 msleep(last, &cl_transaction_mtxp, PSPIN | (PRIBIO + 1), "cluster_wait_IO", NULL);
1362
1363 /*
1364 * We should only have been woken up if all the
1365 * buffers are completed, but just in case...
1366 */
1367 done = true;
1368 for (cbp = cbp_head; cbp != CLUSTER_IO_WAITING; cbp = cbp->b_trans_next) {
1369 if (!ISSET(cbp->b_flags, B_TDONE)) {
1370 done = false;
1371 break;
1372 }
1373 }
1374 } while (!done);
1375 DTRACE_IO1(wait__done, buf_t, last);
1376
1377 last->b_trans_next = NULL;
1378 }
1379
1380 lck_mtx_unlock(&cl_transaction_mtxp);
1381 } else { // !async
1382 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
1383 buf_biowait(cbp);
1384 }
1385 }
1386 }
1387
1388 static void
cluster_complete_transaction(buf_t * cbp_head,void * callback_arg,int * retval,int flags,int needwait)1389 cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait)
1390 {
1391 buf_t cbp;
1392 int error;
1393 boolean_t isswapout = FALSE;
1394
1395 /*
1396 * cluster_complete_transaction will
1397 * only be called if we've issued a complete chain in synchronous mode
1398 * or, we've already done a cluster_wait_IO on an incomplete chain
1399 */
1400 if (needwait) {
1401 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) {
1402 buf_biowait(cbp);
1403 }
1404 }
1405 /*
1406 * we've already waited on all of the I/Os in this transaction,
1407 * so mark all of the buf_t's in this transaction as B_TDONE
1408 * so that cluster_iodone sees the transaction as completed
1409 */
1410 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) {
1411 cbp->b_flags |= B_TDONE;
1412 cbp->b_attr.ba_flags &= ~BA_ASYNC_VERIFY;
1413 }
1414 cbp = *cbp_head;
1415
1416 if ((flags & (CL_ASYNC | CL_PAGEOUT)) == CL_PAGEOUT && vnode_isswap(cbp->b_vp)) {
1417 isswapout = TRUE;
1418 }
1419
1420 error = cluster_iodone(cbp, callback_arg);
1421
1422 if (!(flags & CL_ASYNC) && error && *retval == 0) {
1423 if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO)) {
1424 *retval = error;
1425 } else if (isswapout == TRUE) {
1426 *retval = error;
1427 }
1428 }
1429 *cbp_head = (buf_t)NULL;
1430 }
1431
1432
1433 static int
cluster_io(vnode_t vp,upl_t upl,vm_offset_t upl_offset,off_t f_offset,int non_rounded_size,int flags,buf_t real_bp,struct clios * iostate,int (* callback)(buf_t,void *),void * callback_arg)1434 cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
1435 int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
1436 {
1437 buf_t cbp;
1438 u_int size;
1439 u_int io_size;
1440 int io_flags;
1441 int bmap_flags;
1442 int error = 0;
1443 int retval = 0;
1444 buf_t cbp_head = NULL;
1445 buf_t cbp_tail = NULL;
1446 int trans_count = 0;
1447 int max_trans_count;
1448 u_int pg_count;
1449 int pg_offset;
1450 u_int max_iosize;
1451 u_int max_vectors;
1452 int priv;
1453 int zero_offset = 0;
1454 int async_throttle = 0;
1455 mount_t mp;
1456 size_t verify_block_size = 0;
1457 vm_offset_t upl_end_offset;
1458 boolean_t need_EOT = FALSE;
1459
1460 if (real_bp) {
1461 /*
1462 * we currently don't support buffers larger than a page
1463 */
1464 if (non_rounded_size > PAGE_SIZE) {
1465 panic("%s(): Called with real buffer of size %d bytes which "
1466 "is greater than the maximum allowed size of "
1467 "%d bytes (the system PAGE_SIZE).\n",
1468 __FUNCTION__, non_rounded_size, PAGE_SIZE);
1469 }
1470 }
1471
1472 mp = vp->v_mount;
1473
1474 if ((flags & CL_READ) && mp && !(mp->mnt_kern_flag & MNTK_VIRTUALDEV)) {
1475 if ((flags & CL_PAGEIN) || cluster_verify_threads) {
1476 error = VNOP_VERIFY(vp, f_offset, NULL, 0, &verify_block_size, NULL, VNODE_VERIFY_DEFAULT, NULL);
1477 if (error) {
1478 if (error != ENOTSUP) {
1479 return error;
1480 }
1481 error = 0;
1482 }
1483 if (verify_block_size != PAGE_SIZE) {
1484 verify_block_size = 0;
1485 }
1486 }
1487
1488 if (verify_block_size && real_bp) {
1489 panic("%s(): Called with real buffer and needs verification ",
1490 __FUNCTION__);
1491 }
1492
1493 /*
1494 * For direct io, only allow cluster verification if f_offset
1495 * and upl_offset are both page aligned. They will always be
1496 * page aligned for pageins and cached reads. If they are not
1497 * page aligned, leave it to the filesystem to do verification
1498 * Furthermore, the size also has to be aligned to page size.
1499 * Strictly speaking the alignments need to be for verify_block_size
1500 * but since the only verify_block_size that is currently supported
1501 * is page size, we check against page alignment.
1502 */
1503 if (verify_block_size && (flags & (CL_DEV_MEMORY | CL_DIRECT_IO)) &&
1504 ((f_offset & PAGE_MASK) || (upl_offset & PAGE_MASK) || (non_rounded_size & PAGE_MASK))) {
1505 verify_block_size = 0;
1506 }
1507 }
1508
1509 /*
1510 * we don't want to do any funny rounding of the size for IO requests
1511 * coming through the DIRECT or CONTIGUOUS paths... those pages don't
1512 * belong to us... we can't extend (nor do we need to) the I/O to fill
1513 * out a page
1514 */
1515 if (mp->mnt_devblocksize > 1 && !(flags & (CL_DEV_MEMORY | CL_DIRECT_IO))) {
1516 /*
1517 * round the requested size up so that this I/O ends on a
1518 * page boundary in case this is a 'write'... if the filesystem
1519 * has blocks allocated to back the page beyond the EOF, we want to
1520 * make sure to write out the zero's that are sitting beyond the EOF
1521 * so that in case the filesystem doesn't explicitly zero this area
1522 * if a hole is created via a lseek/write beyond the current EOF,
1523 * it will return zeros when it's read back from the disk. If the
1524 * physical allocation doesn't extend for the whole page, we'll
1525 * only write/read from the disk up to the end of this allocation
1526 * via the extent info returned from the VNOP_BLOCKMAP call.
1527 */
1528 pg_offset = upl_offset & PAGE_MASK;
1529
1530 size = (((non_rounded_size + pg_offset) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - pg_offset;
1531 } else {
1532 /*
1533 * anyone advertising a blocksize of 1 byte probably
1534 * can't deal with us rounding up the request size
1535 * AFP is one such filesystem/device
1536 */
1537 size = non_rounded_size;
1538 }
1539 upl_end_offset = upl_offset + size;
1540
1541 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, (int)f_offset, size, upl_offset, flags, 0);
1542
1543 /*
1544 * Set the maximum transaction size to the maximum desired number of
1545 * buffers.
1546 */
1547 max_trans_count = 8;
1548 if (flags & CL_DEV_MEMORY) {
1549 max_trans_count = 16;
1550 }
1551
1552 if (flags & CL_READ) {
1553 io_flags = B_READ;
1554 bmap_flags = VNODE_READ;
1555
1556 max_iosize = mp->mnt_maxreadcnt;
1557 max_vectors = mp->mnt_segreadcnt;
1558 } else {
1559 io_flags = B_WRITE;
1560 bmap_flags = VNODE_WRITE;
1561
1562 max_iosize = mp->mnt_maxwritecnt;
1563 max_vectors = mp->mnt_segwritecnt;
1564 }
1565 if (verify_block_size) {
1566 bmap_flags |= VNODE_CLUSTER_VERIFY;
1567 }
1568 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_NONE, max_iosize, max_vectors, mp->mnt_devblocksize, 0, 0);
1569
1570 /*
1571 * make sure the maximum iosize is a
1572 * multiple of the page size
1573 */
1574 max_iosize &= ~PAGE_MASK;
1575
1576 /*
1577 * Ensure the maximum iosize is sensible.
1578 */
1579 if (!max_iosize) {
1580 max_iosize = PAGE_SIZE;
1581 }
1582
1583 if (flags & CL_THROTTLE) {
1584 if (!(flags & CL_PAGEOUT) && cluster_is_throttled(vp)) {
1585 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
1586
1587 if (max_iosize > max_throttle_size) {
1588 max_iosize = max_throttle_size;
1589 }
1590 async_throttle = calculate_max_throttle_cnt(vp);
1591 } else {
1592 if ((flags & CL_DEV_MEMORY)) {
1593 async_throttle = IO_SCALE(vp, VNODE_ASYNC_THROTTLE);
1594 } else {
1595 u_int max_cluster;
1596 u_int max_cluster_size;
1597 u_int scale;
1598
1599 if (vp->v_mount->mnt_minsaturationbytecount) {
1600 max_cluster_size = vp->v_mount->mnt_minsaturationbytecount;
1601
1602 scale = 1;
1603 } else {
1604 max_cluster_size = MAX_CLUSTER_SIZE(vp);
1605
1606 if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
1607 scale = WRITE_THROTTLE_SSD;
1608 } else {
1609 scale = WRITE_THROTTLE;
1610 }
1611 }
1612 if (max_iosize > max_cluster_size) {
1613 max_cluster = max_cluster_size;
1614 } else {
1615 max_cluster = max_iosize;
1616 }
1617
1618 if (size < max_cluster) {
1619 max_cluster = size;
1620 }
1621
1622 if (flags & CL_CLOSE) {
1623 scale += MAX_CLUSTERS;
1624 }
1625
1626 async_throttle = min(IO_SCALE(vp, VNODE_ASYNC_THROTTLE), ((scale * max_cluster_size) / max_cluster) - 1);
1627 }
1628 }
1629 }
1630 if (flags & CL_AGE) {
1631 io_flags |= B_AGE;
1632 }
1633 if (flags & (CL_PAGEIN | CL_PAGEOUT)) {
1634 io_flags |= B_PAGEIO;
1635 }
1636 if (flags & (CL_IOSTREAMING)) {
1637 io_flags |= B_IOSTREAMING;
1638 }
1639 if (flags & CL_COMMIT) {
1640 io_flags |= B_COMMIT_UPL;
1641 }
1642 if (flags & CL_DIRECT_IO) {
1643 io_flags |= B_PHYS;
1644 }
1645 if (flags & (CL_PRESERVE | CL_KEEPCACHED)) {
1646 io_flags |= B_CACHE;
1647 }
1648 if (flags & CL_PASSIVE) {
1649 io_flags |= B_PASSIVE;
1650 }
1651 if (flags & CL_ENCRYPTED) {
1652 io_flags |= B_ENCRYPTED_IO;
1653 }
1654
1655 if (vp->v_flag & VSYSTEM) {
1656 io_flags |= B_META;
1657 }
1658
1659 if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
1660 /*
1661 * then we are going to end up
1662 * with a page that we can't complete (the file size wasn't a multiple
1663 * of PAGE_SIZE and we're trying to read to the end of the file
1664 * so we'll go ahead and zero out the portion of the page we can't
1665 * read in from the file
1666 */
1667 zero_offset = (int)(upl_offset + non_rounded_size);
1668 } else if (!ISSET(flags, CL_READ) && ISSET(flags, CL_DIRECT_IO)) {
1669 assert(ISSET(flags, CL_COMMIT));
1670
1671 // For a direct/uncached write, we need to lock pages...
1672 upl_t cached_upl = NULL;
1673 upl_page_info_t *cached_pl;
1674
1675 assert(upl_offset < PAGE_SIZE);
1676
1677 /*
1678 *
1679 * f_offset = b
1680 * upl_offset = 8K
1681 *
1682 * (cached_upl - based on f_offset alignment)
1683 * 0 a b c
1684 * <----|----|----|----|----|----|-----|---->
1685 *
1686 *
1687 * (upl - based on user buffer address alignment)
1688 * <__--|----|----|--__>
1689 *
1690 * 0 1x 2x 3x
1691 *
1692 */
1693 const off_t cached_upl_f_offset = trunc_page_64(f_offset);
1694 const int cached_upl_size = round_page_32((f_offset - cached_upl_f_offset) + non_rounded_size);
1695 int num_retries = 0;
1696
1697 /*
1698 * Create a UPL to lock the pages in the cache whilst the
1699 * write is in progress.
1700 */
1701 create_cached_upl:
1702 ubc_create_upl_kernel(vp, cached_upl_f_offset, cached_upl_size, &cached_upl,
1703 &cached_pl, UPL_SET_LITE, VM_KERN_MEMORY_FILE);
1704
1705 /*
1706 * If we are not overwriting the first and last pages completely
1707 * we need to write them out first if they are dirty. These pages
1708 * will be discarded after the write completes so we might lose
1709 * the writes for the parts that are not overwrrtten.
1710 */
1711 bool first_page_needs_sync = false;
1712 bool last_page_needs_sync = false;
1713
1714 if (cached_upl && (cached_upl_f_offset < f_offset) && upl_dirty_page(cached_pl, 0)) {
1715 first_page_needs_sync = true;
1716 }
1717
1718 if (cached_upl && (cached_upl_f_offset + cached_upl_size) > (f_offset + non_rounded_size)) {
1719 int last_page = (cached_upl_size / PAGE_SIZE) - 1;
1720
1721 if ((last_page != 0 || !first_page_needs_sync) && upl_dirty_page(cached_pl, last_page)) {
1722 last_page_needs_sync = true;
1723 }
1724 }
1725
1726 if (first_page_needs_sync || last_page_needs_sync) {
1727 ubc_upl_abort_range(cached_upl, 0, cached_upl_size, UPL_ABORT_FREE_ON_EMPTY);
1728 cached_upl = NULL;
1729 cached_pl = NULL;
1730 if (first_page_needs_sync) {
1731 ubc_msync(vp, cached_upl_f_offset, cached_upl_f_offset + PAGE_SIZE, NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
1732 }
1733 if (last_page_needs_sync) {
1734 off_t cached_upl_end_offset = cached_upl_f_offset + cached_upl_size;
1735
1736 ubc_msync(vp, cached_upl_end_offset - PAGE_SIZE, cached_upl_end_offset, NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
1737 }
1738 if (++num_retries < 16) {
1739 goto create_cached_upl;
1740 }
1741 printf("%s : Number of retries for syncing first or last page reached %d\n", __FUNCTION__, num_retries);
1742 assertf(num_retries < 16, "%s : Number of retries for syncing first or last page reached %d\n", __FUNCTION__, num_retries);
1743 }
1744
1745 /*
1746 * Attach this UPL to the other UPL so that we can find it
1747 * later.
1748 */
1749 upl_set_associated_upl(upl, cached_upl);
1750 assertf(!cached_upl ||
1751 (upl_adjusted_offset(cached_upl, PAGE_MASK) == cached_upl_f_offset),
1752 "upl_adjusted_offset(cached_upl, PAGE_MASK) = %lld, cached_upl_f_offset = %lld",
1753 upl_adjusted_offset(cached_upl, PAGE_MASK), cached_upl_f_offset);
1754 }
1755
1756 while (size) {
1757 daddr64_t blkno;
1758 daddr64_t lblkno;
1759 size_t io_size_tmp;
1760 u_int io_size_wanted;
1761
1762 if (size > max_iosize) {
1763 io_size = max_iosize;
1764 } else {
1765 io_size = size;
1766 }
1767
1768 io_size_wanted = io_size;
1769 io_size_tmp = (size_t)io_size;
1770
1771 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL))) {
1772 break;
1773 }
1774
1775 if (io_size_tmp > io_size_wanted) {
1776 io_size = io_size_wanted;
1777 } else {
1778 io_size = (u_int)io_size_tmp;
1779 }
1780
1781 if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno)) {
1782 real_bp->b_blkno = blkno;
1783 }
1784
1785 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE,
1786 (int)f_offset, (int)(blkno >> 32), (int)blkno, io_size, 0);
1787
1788 if (io_size == 0) {
1789 /*
1790 * vnop_blockmap didn't return an error... however, it did
1791 * return an extent size of 0 which means we can't
1792 * make forward progress on this I/O... a hole in the
1793 * file would be returned as a blkno of -1 with a non-zero io_size
1794 * a real extent is returned with a blkno != -1 and a non-zero io_size
1795 */
1796 error = EINVAL;
1797 break;
1798 }
1799 if (!(flags & CL_READ) && blkno == -1) {
1800 off_t e_offset;
1801 int pageout_flags;
1802
1803 if (upl_get_internal_vectorupl(upl)) {
1804 panic("Vector UPLs should not take this code-path");
1805 }
1806 /*
1807 * we're writing into a 'hole'
1808 */
1809 if (flags & CL_PAGEOUT) {
1810 /*
1811 * if we got here via cluster_pageout
1812 * then just error the request and return
1813 * the 'hole' should already have been covered
1814 */
1815 error = EINVAL;
1816 break;
1817 }
1818 /*
1819 * we can get here if the cluster code happens to
1820 * pick up a page that was dirtied via mmap vs
1821 * a 'write' and the page targets a 'hole'...
1822 * i.e. the writes to the cluster were sparse
1823 * and the file was being written for the first time
1824 *
1825 * we can also get here if the filesystem supports
1826 * 'holes' that are less than PAGE_SIZE.... because
1827 * we can't know if the range in the page that covers
1828 * the 'hole' has been dirtied via an mmap or not,
1829 * we have to assume the worst and try to push the
1830 * entire page to storage.
1831 *
1832 * Try paging out the page individually before
1833 * giving up entirely and dumping it (the pageout
1834 * path will insure that the zero extent accounting
1835 * has been taken care of before we get back into cluster_io)
1836 *
1837 * go direct to vnode_pageout so that we don't have to
1838 * unbusy the page from the UPL... we used to do this
1839 * so that we could call ubc_msync, but that results
1840 * in a potential deadlock if someone else races us to acquire
1841 * that page and wins and in addition needs one of the pages
1842 * we're continuing to hold in the UPL
1843 */
1844 pageout_flags = UPL_MSYNC | UPL_VNODE_PAGER | UPL_NESTED_PAGEOUT;
1845
1846 if (!(flags & CL_ASYNC)) {
1847 pageout_flags |= UPL_IOSYNC;
1848 }
1849 if (!(flags & CL_COMMIT)) {
1850 pageout_flags |= UPL_NOCOMMIT;
1851 }
1852
1853 if (cbp_head) {
1854 buf_t prev_cbp;
1855 uint32_t bytes_in_last_page;
1856
1857 /*
1858 * first we have to wait for the the current outstanding I/Os
1859 * to complete... EOT hasn't been set yet on this transaction
1860 * so the pages won't be released
1861 */
1862 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1863
1864 bytes_in_last_page = cbp_head->b_uploffset & PAGE_MASK;
1865 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
1866 bytes_in_last_page += cbp->b_bcount;
1867 }
1868 bytes_in_last_page &= PAGE_MASK;
1869
1870 while (bytes_in_last_page) {
1871 /*
1872 * we've got a transcation that
1873 * includes the page we're about to push out through vnode_pageout...
1874 * find the bp's in the list which intersect this page and either
1875 * remove them entirely from the transaction (there could be multiple bp's), or
1876 * round it's iosize down to the page boundary (there can only be one)...
1877 *
1878 * find the last bp in the list and act on it
1879 */
1880 for (prev_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next) {
1881 prev_cbp = cbp;
1882 }
1883
1884 if (bytes_in_last_page >= cbp->b_bcount) {
1885 /*
1886 * this buf no longer has any I/O associated with it
1887 */
1888 bytes_in_last_page -= cbp->b_bcount;
1889 cbp->b_bcount = 0;
1890
1891 free_io_buf(cbp);
1892
1893 if (cbp == cbp_head) {
1894 assert(bytes_in_last_page == 0);
1895 /*
1896 * the buf we just freed was the only buf in
1897 * this transaction... so there's no I/O to do
1898 */
1899 cbp_head = NULL;
1900 cbp_tail = NULL;
1901 } else {
1902 /*
1903 * remove the buf we just freed from
1904 * the transaction list
1905 */
1906 prev_cbp->b_trans_next = NULL;
1907 cbp_tail = prev_cbp;
1908 }
1909 } else {
1910 /*
1911 * this is the last bp that has I/O
1912 * intersecting the page of interest
1913 * only some of the I/O is in the intersection
1914 * so clip the size but keep it in the transaction list
1915 */
1916 cbp->b_bcount -= bytes_in_last_page;
1917 cbp_tail = cbp;
1918 bytes_in_last_page = 0;
1919 }
1920 }
1921 if (cbp_head) {
1922 /*
1923 * there was more to the current transaction
1924 * than just the page we are pushing out via vnode_pageout...
1925 * mark it as finished and complete it... we've already
1926 * waited for the I/Os to complete above in the call to cluster_wait_IO
1927 */
1928 cluster_EOT(cbp_head, cbp_tail, 0, 0);
1929
1930 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1931
1932 trans_count = 0;
1933 }
1934 }
1935 if (vnode_pageout(vp, upl, (upl_offset_t)trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
1936 error = EINVAL;
1937 }
1938 e_offset = round_page_64(f_offset + 1);
1939 io_size = (u_int)(e_offset - f_offset);
1940
1941 f_offset += io_size;
1942 upl_offset += io_size;
1943
1944 if (size >= io_size) {
1945 size -= io_size;
1946 } else {
1947 size = 0;
1948 }
1949 /*
1950 * keep track of how much of the original request
1951 * that we've actually completed... non_rounded_size
1952 * may go negative due to us rounding the request
1953 * to a page size multiple (i.e. size > non_rounded_size)
1954 */
1955 non_rounded_size -= io_size;
1956
1957 if (non_rounded_size <= 0) {
1958 /*
1959 * we've transferred all of the data in the original
1960 * request, but we were unable to complete the tail
1961 * of the last page because the file didn't have
1962 * an allocation to back that portion... this is ok.
1963 */
1964 size = 0;
1965 }
1966 if (error) {
1967 if (size == 0) {
1968 flags &= ~CL_COMMIT;
1969 }
1970 break;
1971 }
1972 continue;
1973 }
1974
1975 lblkno = (daddr64_t)(f_offset / CLUSTER_IO_BLOCK_SIZE);
1976
1977 /*
1978 * we have now figured out how much I/O we can do - this is in 'io_size'
1979 * pg_offset is the starting point in the first page for the I/O
1980 * pg_count is the number of full and partial pages that 'io_size' encompasses
1981 */
1982 pg_offset = upl_offset & PAGE_MASK;
1983
1984 if (flags & CL_DEV_MEMORY) {
1985 /*
1986 * treat physical requests as one 'giant' page
1987 */
1988 pg_count = 1;
1989 } else {
1990 pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
1991 }
1992
1993 if ((flags & CL_READ) && blkno == -1) {
1994 vm_offset_t commit_offset;
1995 int bytes_to_zero;
1996 int complete_transaction_now = 0;
1997
1998 /*
1999 * if we're reading and blkno == -1, then we've got a
2000 * 'hole' in the file that we need to deal with by zeroing
2001 * out the affected area in the upl
2002 */
2003 if (io_size >= (u_int)non_rounded_size) {
2004 /*
2005 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
2006 * than 'zero_offset' will be non-zero
2007 * if the 'hole' returned by vnop_blockmap extends all the way to the eof
2008 * (indicated by the io_size finishing off the I/O request for this UPL)
2009 * than we're not going to issue an I/O for the
2010 * last page in this upl... we need to zero both the hole and the tail
2011 * of the page beyond the EOF, since the delayed zero-fill won't kick in
2012 */
2013 bytes_to_zero = non_rounded_size;
2014 if (!(flags & CL_NOZERO)) {
2015 bytes_to_zero = (int)((((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset);
2016 }
2017
2018 zero_offset = 0;
2019 } else {
2020 bytes_to_zero = io_size;
2021 }
2022
2023 pg_count = 0;
2024
2025 cluster_zero(upl, (upl_offset_t)upl_offset, bytes_to_zero, real_bp);
2026
2027 if (cbp_head) {
2028 int pg_resid;
2029
2030 /*
2031 * if there is a current I/O chain pending
2032 * then the first page of the group we just zero'd
2033 * will be handled by the I/O completion if the zero
2034 * fill started in the middle of the page
2035 */
2036 commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2037
2038 pg_resid = (int)(commit_offset - upl_offset);
2039
2040 if (bytes_to_zero >= pg_resid) {
2041 /*
2042 * the last page of the current I/O
2043 * has been completed...
2044 * compute the number of fully zero'd
2045 * pages that are beyond it
2046 * plus the last page if its partial
2047 * and we have no more I/O to issue...
2048 * otherwise a partial page is left
2049 * to begin the next I/O
2050 */
2051 if ((int)io_size >= non_rounded_size) {
2052 pg_count = (bytes_to_zero - pg_resid + (PAGE_SIZE - 1)) / PAGE_SIZE;
2053 } else {
2054 pg_count = (bytes_to_zero - pg_resid) / PAGE_SIZE;
2055 }
2056
2057 complete_transaction_now = 1;
2058 }
2059 } else {
2060 /*
2061 * no pending I/O to deal with
2062 * so, commit all of the fully zero'd pages
2063 * plus the last page if its partial
2064 * and we have no more I/O to issue...
2065 * otherwise a partial page is left
2066 * to begin the next I/O
2067 */
2068 if ((int)io_size >= non_rounded_size) {
2069 pg_count = (pg_offset + bytes_to_zero + (PAGE_SIZE - 1)) / PAGE_SIZE;
2070 } else {
2071 pg_count = (pg_offset + bytes_to_zero) / PAGE_SIZE;
2072 }
2073
2074 commit_offset = upl_offset & ~PAGE_MASK;
2075 }
2076
2077 // Associated UPL is currently only used in the direct write path
2078 assert(!upl_associated_upl(upl));
2079
2080 if ((flags & CL_COMMIT) && pg_count) {
2081 ubc_upl_commit_range(upl, (upl_offset_t)commit_offset,
2082 pg_count * PAGE_SIZE,
2083 UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
2084 }
2085 upl_offset += io_size;
2086 f_offset += io_size;
2087 size -= io_size;
2088
2089 /*
2090 * keep track of how much of the original request
2091 * that we've actually completed... non_rounded_size
2092 * may go negative due to us rounding the request
2093 * to a page size multiple (i.e. size > non_rounded_size)
2094 */
2095 non_rounded_size -= io_size;
2096
2097 if (non_rounded_size <= 0) {
2098 /*
2099 * we've transferred all of the data in the original
2100 * request, but we were unable to complete the tail
2101 * of the last page because the file didn't have
2102 * an allocation to back that portion... this is ok.
2103 */
2104 size = 0;
2105 }
2106 if (cbp_head && (complete_transaction_now || size == 0)) {
2107 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
2108
2109 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0, verify_block_size);
2110
2111 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
2112
2113 trans_count = 0;
2114 }
2115 continue;
2116 }
2117 if (pg_count > max_vectors) {
2118 if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
2119 io_size = PAGE_SIZE - pg_offset;
2120 pg_count = 1;
2121 } else {
2122 io_size -= (pg_count - max_vectors) * PAGE_SIZE;
2123 pg_count = max_vectors;
2124 }
2125 }
2126 /*
2127 * If the transaction is going to reach the maximum number of
2128 * desired elements, truncate the i/o to the nearest page so
2129 * that the actual i/o is initiated after this buffer is
2130 * created and added to the i/o chain.
2131 *
2132 * I/O directed to physically contiguous memory
2133 * doesn't have a requirement to make sure we 'fill' a page
2134 */
2135 if (!(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count &&
2136 ((upl_offset + io_size) & PAGE_MASK)) {
2137 vm_offset_t aligned_ofs;
2138
2139 aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
2140 /*
2141 * If the io_size does not actually finish off even a
2142 * single page we have to keep adding buffers to the
2143 * transaction despite having reached the desired limit.
2144 *
2145 * Eventually we get here with the page being finished
2146 * off (and exceeded) and then we truncate the size of
2147 * this i/o request so that it is page aligned so that
2148 * we can finally issue the i/o on the transaction.
2149 */
2150 if (aligned_ofs > upl_offset) {
2151 io_size = (u_int)(aligned_ofs - upl_offset);
2152 pg_count--;
2153 }
2154 }
2155
2156 if (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV)) {
2157 /*
2158 * if we're not targeting a virtual device i.e. a disk image
2159 * it's safe to dip into the reserve pool since real devices
2160 * can complete this I/O request without requiring additional
2161 * bufs from the alloc_io_buf pool
2162 */
2163 priv = 1;
2164 } else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT) && !cbp_head) {
2165 /*
2166 * Throttle the speculative IO
2167 *
2168 * We can only throttle this if it is the first iobuf
2169 * for the transaction. alloc_io_buf implements
2170 * additional restrictions for diskimages anyway.
2171 */
2172 priv = 0;
2173 } else {
2174 priv = 1;
2175 }
2176
2177 cbp = alloc_io_buf(vp, priv);
2178
2179 if (flags & CL_PAGEOUT) {
2180 u_int i;
2181
2182 /*
2183 * since blocks are in offsets of CLUSTER_IO_BLOCK_SIZE, scale
2184 * iteration to (PAGE_SIZE * pg_count) of blks.
2185 */
2186 for (i = 0; i < (PAGE_SIZE * pg_count) / CLUSTER_IO_BLOCK_SIZE; i++) {
2187 if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY) {
2188 panic("BUSY bp found in cluster_io");
2189 }
2190 }
2191 }
2192 if (flags & CL_ASYNC) {
2193 if (buf_setcallback(cbp, (void *)cluster_iodone, callback_arg)) {
2194 panic("buf_setcallback failed");
2195 }
2196 }
2197 cbp->b_cliodone = (void *)callback;
2198 cbp->b_flags |= io_flags;
2199 if (flags & CL_NOCACHE) {
2200 cbp->b_attr.ba_flags |= BA_NOCACHE;
2201 }
2202 if (verify_block_size) {
2203 cbp->b_attr.ba_flags |= BA_WILL_VERIFY;
2204 }
2205
2206 cbp->b_lblkno = lblkno;
2207 cbp->b_clfoffset = f_offset;
2208 cbp->b_blkno = blkno;
2209 cbp->b_bcount = io_size;
2210
2211 if (buf_setupl(cbp, upl, (uint32_t)upl_offset)) {
2212 panic("buf_setupl failed");
2213 }
2214 #if CONFIG_IOSCHED
2215 upl_set_blkno(upl, upl_offset, io_size, blkno);
2216 #endif
2217 cbp->b_trans_next = (buf_t)NULL;
2218
2219 if ((cbp->b_iostate = (void *)iostate)) {
2220 /*
2221 * caller wants to track the state of this
2222 * io... bump the amount issued against this stream
2223 */
2224 iostate->io_issued += io_size;
2225 }
2226
2227 if (flags & CL_READ) {
2228 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE,
2229 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
2230 } else {
2231 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE,
2232 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
2233 }
2234
2235 if (cbp_head) {
2236 cbp_tail->b_trans_next = cbp;
2237 cbp_tail = cbp;
2238 } else {
2239 cbp_head = cbp;
2240 cbp_tail = cbp;
2241
2242 if ((cbp_head->b_real_bp = real_bp)) {
2243 real_bp = (buf_t)NULL;
2244 }
2245 }
2246 *(buf_t *)(&cbp->b_trans_head) = cbp_head;
2247
2248 trans_count++;
2249
2250 upl_offset += io_size;
2251 f_offset += io_size;
2252 size -= io_size;
2253 /*
2254 * keep track of how much of the original request
2255 * that we've actually completed... non_rounded_size
2256 * may go negative due to us rounding the request
2257 * to a page size multiple (i.e. size > non_rounded_size)
2258 */
2259 non_rounded_size -= io_size;
2260
2261 if (non_rounded_size <= 0) {
2262 /*
2263 * we've transferred all of the data in the original
2264 * request, but we were unable to complete the tail
2265 * of the last page because the file didn't have
2266 * an allocation to back that portion... this is ok.
2267 */
2268 size = 0;
2269 }
2270 if (size == 0) {
2271 /*
2272 * we have no more I/O to issue, so go
2273 * finish the final transaction
2274 */
2275 need_EOT = TRUE;
2276 } else if (((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) &&
2277 ((flags & CL_ASYNC) || trans_count > max_trans_count)) {
2278 /*
2279 * I/O directed to physically contiguous memory...
2280 * which doesn't have a requirement to make sure we 'fill' a page
2281 * or...
2282 * the current I/O we've prepared fully
2283 * completes the last page in this request
2284 * and ...
2285 * it's either an ASYNC request or
2286 * we've already accumulated more than 8 I/O's into
2287 * this transaction so mark it as complete so that
2288 * it can finish asynchronously or via the cluster_complete_transaction
2289 * below if the request is synchronous
2290 */
2291 need_EOT = TRUE;
2292 }
2293 if (need_EOT == TRUE) {
2294 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0, verify_block_size);
2295 }
2296
2297 if (flags & CL_THROTTLE) {
2298 (void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io");
2299 }
2300
2301 if (!(io_flags & B_READ)) {
2302 vnode_startwrite(vp);
2303 }
2304
2305 if (flags & CL_RAW_ENCRYPTED) {
2306 /*
2307 * User requested raw encrypted bytes.
2308 * Twiddle the bit in the ba_flags for the buffer
2309 */
2310 cbp->b_attr.ba_flags |= BA_RAW_ENCRYPTED_IO;
2311 }
2312
2313 (void) VNOP_STRATEGY(cbp);
2314
2315 if (need_EOT == TRUE) {
2316 if (!(flags & CL_ASYNC)) {
2317 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1);
2318 }
2319
2320 need_EOT = FALSE;
2321 trans_count = 0;
2322 cbp_head = NULL;
2323 }
2324 }
2325 if (error) {
2326 int abort_size;
2327
2328 io_size = 0;
2329
2330 if (cbp_head) {
2331 /*
2332 * Wait until all of the outstanding I/O
2333 * for this partial transaction has completed
2334 */
2335 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
2336
2337 /*
2338 * Rewind the upl offset to the beginning of the
2339 * transaction.
2340 */
2341 upl_offset = cbp_head->b_uploffset;
2342 }
2343
2344 if (ISSET(flags, CL_COMMIT)) {
2345 cluster_handle_associated_upl(iostate, upl,
2346 (upl_offset_t)upl_offset,
2347 (upl_size_t)(upl_end_offset - upl_offset),
2348 cbp_head ? cbp_head->b_clfoffset : f_offset);
2349 }
2350
2351 // Free all the IO buffers in this transaction
2352 for (cbp = cbp_head; cbp;) {
2353 buf_t cbp_next;
2354
2355 size += cbp->b_bcount;
2356 io_size += cbp->b_bcount;
2357
2358 cbp_next = cbp->b_trans_next;
2359 free_io_buf(cbp);
2360 cbp = cbp_next;
2361 }
2362
2363 if (iostate) {
2364 int need_wakeup = 0;
2365
2366 /*
2367 * update the error condition for this stream
2368 * since we never really issued the io
2369 * just go ahead and adjust it back
2370 */
2371 lck_mtx_lock_spin(&iostate->io_mtxp);
2372
2373 if (iostate->io_error == 0) {
2374 iostate->io_error = error;
2375 }
2376 iostate->io_issued -= io_size;
2377
2378 if (iostate->io_wanted) {
2379 /*
2380 * someone is waiting for the state of
2381 * this io stream to change
2382 */
2383 iostate->io_wanted = 0;
2384 need_wakeup = 1;
2385 }
2386 lck_mtx_unlock(&iostate->io_mtxp);
2387
2388 if (need_wakeup) {
2389 wakeup((caddr_t)&iostate->io_wanted);
2390 }
2391 }
2392
2393 if (flags & CL_COMMIT) {
2394 int upl_flags;
2395
2396 pg_offset = upl_offset & PAGE_MASK;
2397 abort_size = (int)((upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK);
2398
2399 upl_flags = cluster_ioerror(upl, (int)(upl_offset - pg_offset),
2400 abort_size, error, io_flags, vp);
2401
2402 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
2403 upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
2404 }
2405 if (retval == 0) {
2406 retval = error;
2407 }
2408 } else if (cbp_head) {
2409 panic("%s(): cbp_head is not NULL.", __FUNCTION__);
2410 }
2411
2412 if (real_bp) {
2413 /*
2414 * can get here if we either encountered an error
2415 * or we completely zero-filled the request and
2416 * no I/O was issued
2417 */
2418 if (error) {
2419 real_bp->b_flags |= B_ERROR;
2420 real_bp->b_error = error;
2421 }
2422 buf_biodone(real_bp);
2423 }
2424 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, (int)f_offset, size, upl_offset, retval, 0);
2425
2426 return retval;
2427 }
2428
2429 #define reset_vector_run_state() \
2430 issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0;
2431
2432 static int
vector_cluster_io(vnode_t vp,upl_t vector_upl,vm_offset_t vector_upl_offset,off_t v_upl_uio_offset,int vector_upl_iosize,int io_flag,buf_t real_bp,struct clios * iostate,int (* callback)(buf_t,void *),void * callback_arg)2433 vector_cluster_io(vnode_t vp, upl_t vector_upl, vm_offset_t vector_upl_offset, off_t v_upl_uio_offset, int vector_upl_iosize,
2434 int io_flag, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
2435 {
2436 vector_upl_set_pagelist(vector_upl);
2437
2438 if (io_flag & CL_READ) {
2439 if (vector_upl_offset == 0 && ((vector_upl_iosize & PAGE_MASK) == 0)) {
2440 io_flag &= ~CL_PRESERVE; /*don't zero fill*/
2441 } else {
2442 io_flag |= CL_PRESERVE; /*zero fill*/
2443 }
2444 }
2445 return cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg);
2446 }
2447
2448 static int
cluster_read_prefetch(vnode_t vp,off_t f_offset,u_int size,off_t filesize,int (* callback)(buf_t,void *),void * callback_arg,int bflag)2449 cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
2450 {
2451 int pages_in_prefetch;
2452
2453 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
2454 (int)f_offset, size, (int)filesize, 0, 0);
2455
2456 if (f_offset >= filesize) {
2457 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
2458 (int)f_offset, 0, 0, 0, 0);
2459 return 0;
2460 }
2461 if ((off_t)size > (filesize - f_offset)) {
2462 size = (u_int)(filesize - f_offset);
2463 }
2464 pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
2465
2466 advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag);
2467
2468 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
2469 (int)f_offset + size, pages_in_prefetch, 0, 1, 0);
2470
2471 return pages_in_prefetch;
2472 }
2473
2474
2475
2476 static void
cluster_read_ahead(vnode_t vp,struct cl_extent * extent,off_t filesize,struct cl_readahead * rap,int (* callback)(buf_t,void *),void * callback_arg,int bflag)2477 cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg,
2478 int bflag)
2479 {
2480 daddr64_t r_addr;
2481 off_t f_offset;
2482 int size_of_prefetch;
2483 u_int max_prefetch;
2484
2485
2486 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
2487 (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0);
2488
2489 if (extent->b_addr == rap->cl_lastr && extent->b_addr == extent->e_addr) {
2490 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2491 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0);
2492 return;
2493 }
2494 if (rap->cl_lastr == -1 || (extent->b_addr != rap->cl_lastr && extent->b_addr != (rap->cl_lastr + 1))) {
2495 rap->cl_ralen = 0;
2496 rap->cl_maxra = 0;
2497
2498 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2499 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0);
2500
2501 return;
2502 }
2503
2504 max_prefetch = cluster_max_prefetch(vp,
2505 cluster_max_io_size(vp->v_mount, CL_READ), speculative_prefetch_max);
2506
2507 if (max_prefetch <= PAGE_SIZE) {
2508 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2509 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 6, 0);
2510 return;
2511 }
2512 if (extent->e_addr < rap->cl_maxra && rap->cl_ralen >= 4) {
2513 if ((rap->cl_maxra - extent->e_addr) > (rap->cl_ralen / 4)) {
2514 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2515 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0);
2516 return;
2517 }
2518 }
2519 r_addr = MAX(extent->e_addr, rap->cl_maxra) + 1;
2520 f_offset = (off_t)(r_addr * PAGE_SIZE_64);
2521
2522 size_of_prefetch = 0;
2523
2524 ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
2525
2526 if (size_of_prefetch) {
2527 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2528 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0);
2529 return;
2530 }
2531 if (f_offset < filesize) {
2532 daddr64_t read_size;
2533
2534 rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1;
2535
2536 read_size = (extent->e_addr + 1) - extent->b_addr;
2537
2538 if (read_size > rap->cl_ralen) {
2539 if (read_size > max_prefetch / PAGE_SIZE) {
2540 rap->cl_ralen = max_prefetch / PAGE_SIZE;
2541 } else {
2542 rap->cl_ralen = (int)read_size;
2543 }
2544 }
2545 size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag);
2546
2547 if (size_of_prefetch) {
2548 rap->cl_maxra = (r_addr + size_of_prefetch) - 1;
2549 }
2550 }
2551 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2552 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0);
2553 }
2554
2555
2556 int
cluster_pageout(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags)2557 cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2558 int size, off_t filesize, int flags)
2559 {
2560 return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
2561 }
2562
2563
2564 int
cluster_pageout_ext(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)2565 cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2566 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2567 {
2568 int io_size;
2569 int rounded_size;
2570 off_t max_size;
2571 int local_flags;
2572
2573 local_flags = CL_PAGEOUT | CL_THROTTLE;
2574
2575 if ((flags & UPL_IOSYNC) == 0) {
2576 local_flags |= CL_ASYNC;
2577 }
2578 if ((flags & UPL_NOCOMMIT) == 0) {
2579 local_flags |= CL_COMMIT;
2580 }
2581 if ((flags & UPL_KEEPCACHED)) {
2582 local_flags |= CL_KEEPCACHED;
2583 }
2584 if (flags & UPL_PAGING_ENCRYPTED) {
2585 local_flags |= CL_ENCRYPTED;
2586 }
2587
2588
2589 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
2590 (int)f_offset, size, (int)filesize, local_flags, 0);
2591
2592 /*
2593 * If they didn't specify any I/O, then we are done...
2594 * we can't issue an abort because we don't know how
2595 * big the upl really is
2596 */
2597 if (size <= 0) {
2598 return EINVAL;
2599 }
2600
2601 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
2602 if (local_flags & CL_COMMIT) {
2603 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
2604 }
2605 return EROFS;
2606 }
2607 /*
2608 * can't page-in from a negative offset
2609 * or if we're starting beyond the EOF
2610 * or if the file offset isn't page aligned
2611 * or the size requested isn't a multiple of PAGE_SIZE
2612 */
2613 if (f_offset < 0 || f_offset >= filesize ||
2614 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) {
2615 if (local_flags & CL_COMMIT) {
2616 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
2617 }
2618 return EINVAL;
2619 }
2620 max_size = filesize - f_offset;
2621
2622 if (size < max_size) {
2623 io_size = size;
2624 } else {
2625 io_size = (int)max_size;
2626 }
2627
2628 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2629
2630 if (size > rounded_size) {
2631 if (local_flags & CL_COMMIT) {
2632 ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
2633 UPL_ABORT_FREE_ON_EMPTY);
2634 }
2635 }
2636 return cluster_io(vp, upl, upl_offset, f_offset, io_size,
2637 local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2638 }
2639
2640
2641 int
cluster_pagein(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags)2642 cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2643 int size, off_t filesize, int flags)
2644 {
2645 return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
2646 }
2647
2648
2649 int
cluster_pagein_ext(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)2650 cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2651 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2652 {
2653 u_int io_size;
2654 int rounded_size;
2655 off_t max_size;
2656 int retval;
2657 int local_flags = 0;
2658
2659 if (upl == NULL || size < 0) {
2660 panic("cluster_pagein: NULL upl passed in");
2661 }
2662
2663 if ((flags & UPL_IOSYNC) == 0) {
2664 local_flags |= CL_ASYNC;
2665 }
2666 if ((flags & UPL_NOCOMMIT) == 0) {
2667 local_flags |= CL_COMMIT;
2668 }
2669 if (flags & UPL_IOSTREAMING) {
2670 local_flags |= CL_IOSTREAMING;
2671 }
2672 if (flags & UPL_PAGING_ENCRYPTED) {
2673 local_flags |= CL_ENCRYPTED;
2674 }
2675
2676
2677 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
2678 (int)f_offset, size, (int)filesize, local_flags, 0);
2679
2680 /*
2681 * can't page-in from a negative offset
2682 * or if we're starting beyond the EOF
2683 * or if the file offset isn't page aligned
2684 * or the size requested isn't a multiple of PAGE_SIZE
2685 */
2686 if (f_offset < 0 || f_offset >= filesize ||
2687 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) {
2688 if (local_flags & CL_COMMIT) {
2689 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2690 }
2691
2692 if (f_offset >= filesize) {
2693 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CLUSTER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CL_PGIN_PAST_EOF), 0 /* arg */);
2694 }
2695
2696 return EINVAL;
2697 }
2698 max_size = filesize - f_offset;
2699
2700 if (size < max_size) {
2701 io_size = size;
2702 } else {
2703 io_size = (int)max_size;
2704 }
2705
2706 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2707
2708 if (size > rounded_size && (local_flags & CL_COMMIT)) {
2709 ubc_upl_abort_range(upl, upl_offset + rounded_size,
2710 size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2711 }
2712
2713 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
2714 local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2715
2716 return retval;
2717 }
2718
2719
2720 int
cluster_bp(buf_t bp)2721 cluster_bp(buf_t bp)
2722 {
2723 return cluster_bp_ext(bp, NULL, NULL);
2724 }
2725
2726
2727 int
cluster_bp_ext(buf_t bp,int (* callback)(buf_t,void *),void * callback_arg)2728 cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg)
2729 {
2730 off_t f_offset;
2731 int flags;
2732
2733 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
2734 bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
2735
2736 if (bp->b_flags & B_READ) {
2737 flags = CL_ASYNC | CL_READ;
2738 } else {
2739 flags = CL_ASYNC;
2740 }
2741 if (bp->b_flags & B_PASSIVE) {
2742 flags |= CL_PASSIVE;
2743 }
2744
2745 f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
2746
2747 return cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg);
2748 }
2749
2750
2751
2752 int
cluster_write(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int xflags)2753 cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags)
2754 {
2755 return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL);
2756 }
2757
2758
2759 int
cluster_write_ext(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int xflags,int (* callback)(buf_t,void *),void * callback_arg)2760 cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff,
2761 int xflags, int (*callback)(buf_t, void *), void *callback_arg)
2762 {
2763 user_ssize_t cur_resid;
2764 int retval = 0;
2765 int flags;
2766 int zflags;
2767 int bflag;
2768 int write_type = IO_COPY;
2769 u_int32_t write_length;
2770 uint32_t min_direct_size = MIN_DIRECT_WRITE_SIZE;
2771
2772 flags = xflags;
2773
2774 if (flags & IO_PASSIVE) {
2775 bflag = CL_PASSIVE;
2776 } else {
2777 bflag = 0;
2778 }
2779
2780 if (vp->v_flag & VNOCACHE_DATA) {
2781 flags |= IO_NOCACHE;
2782 bflag |= CL_NOCACHE;
2783 }
2784 if (uio == NULL) {
2785 /*
2786 * no user data...
2787 * this call is being made to zero-fill some range in the file
2788 */
2789 retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
2790
2791 return retval;
2792 }
2793 /*
2794 * do a write through the cache if one of the following is true....
2795 * NOCACHE is not true or NODIRECT is true
2796 * the uio request doesn't target USERSPACE
2797 * otherwise, find out if we want the direct or contig variant for
2798 * the first vector in the uio request
2799 */
2800 if (((flags & (IO_NOCACHE | IO_NODIRECT)) == IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
2801 if (flags & IO_NOCACHE_SWRITE) {
2802 uint32_t fs_bsize = vp->v_mount->mnt_vfsstat.f_bsize;
2803
2804 if (fs_bsize && (fs_bsize < MIN_DIRECT_WRITE_SIZE) &&
2805 ((fs_bsize & (fs_bsize - 1)) == 0)) {
2806 min_direct_size = fs_bsize;
2807 }
2808 }
2809 retval = cluster_io_type(uio, &write_type, &write_length, min_direct_size);
2810 }
2811
2812 if ((flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT) {
2813 /*
2814 * must go through the cached variant in this case
2815 */
2816 write_type = IO_COPY;
2817 }
2818
2819 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < newEOF && retval == 0) {
2820 switch (write_type) {
2821 case IO_COPY:
2822 /*
2823 * make sure the uio_resid isn't too big...
2824 * internally, we want to handle all of the I/O in
2825 * chunk sizes that fit in a 32 bit int
2826 */
2827 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
2828 /*
2829 * we're going to have to call cluster_write_copy
2830 * more than once...
2831 *
2832 * only want the last call to cluster_write_copy to
2833 * have the IO_TAILZEROFILL flag set and only the
2834 * first call should have IO_HEADZEROFILL
2835 */
2836 zflags = flags & ~IO_TAILZEROFILL;
2837 flags &= ~IO_HEADZEROFILL;
2838
2839 write_length = MAX_IO_REQUEST_SIZE;
2840 } else {
2841 /*
2842 * last call to cluster_write_copy
2843 */
2844 zflags = flags;
2845
2846 write_length = (u_int32_t)cur_resid;
2847 }
2848 retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg);
2849 break;
2850
2851 case IO_CONTIG:
2852 zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL);
2853
2854 if (flags & IO_HEADZEROFILL) {
2855 /*
2856 * only do this once per request
2857 */
2858 flags &= ~IO_HEADZEROFILL;
2859
2860 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset,
2861 headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
2862 if (retval) {
2863 break;
2864 }
2865 }
2866 retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag);
2867
2868 if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) {
2869 /*
2870 * we're done with the data from the user specified buffer(s)
2871 * and we've been requested to zero fill at the tail
2872 * treat this as an IO_HEADZEROFILL which doesn't require a uio
2873 * by rearranging the args and passing in IO_HEADZEROFILL
2874 */
2875
2876 /*
2877 * Update the oldEOF to reflect the current EOF. If the UPL page
2878 * to zero-fill is not valid (when F_NOCACHE is set), the
2879 * cluster_write_copy() will perform RMW on the UPL page when
2880 * the oldEOF is not aligned on page boundary due to unaligned
2881 * write.
2882 */
2883 if (uio->uio_offset > oldEOF) {
2884 oldEOF = uio->uio_offset;
2885 }
2886 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)oldEOF, tailOff, uio->uio_offset,
2887 (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
2888 }
2889 break;
2890
2891 case IO_DIRECT:
2892 /*
2893 * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
2894 */
2895 retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg, min_direct_size);
2896 break;
2897
2898 case IO_UNKNOWN:
2899 retval = cluster_io_type(uio, &write_type, &write_length, min_direct_size);
2900 break;
2901 }
2902 /*
2903 * in case we end up calling cluster_write_copy (from cluster_write_direct)
2904 * multiple times to service a multi-vector request that is not aligned properly
2905 * we need to update the oldEOF so that we
2906 * don't zero-fill the head of a page if we've successfully written
2907 * data to that area... 'cluster_write_copy' will zero-fill the head of a
2908 * page that is beyond the oldEOF if the write is unaligned... we only
2909 * want that to happen for the very first page of the cluster_write,
2910 * NOT the first page of each vector making up a multi-vector write.
2911 */
2912 if (uio->uio_offset > oldEOF) {
2913 oldEOF = uio->uio_offset;
2914 }
2915 }
2916 return retval;
2917 }
2918
2919
2920 static int
cluster_write_direct(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,int * write_type,u_int32_t * write_length,int flags,int (* callback)(buf_t,void *),void * callback_arg,uint32_t min_io_size)2921 cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
2922 int flags, int (*callback)(buf_t, void *), void *callback_arg, uint32_t min_io_size)
2923 {
2924 upl_t upl = NULL;
2925 upl_page_info_t *pl;
2926 vm_offset_t upl_offset;
2927 vm_offset_t vector_upl_offset = 0;
2928 u_int32_t io_req_size;
2929 u_int32_t offset_in_file;
2930 u_int32_t offset_in_iovbase;
2931 u_int32_t io_size;
2932 int io_flag = 0;
2933 upl_size_t upl_size = 0, vector_upl_size = 0;
2934 vm_size_t upl_needed_size;
2935 mach_msg_type_number_t pages_in_pl = 0;
2936 upl_control_flags_t upl_flags;
2937 kern_return_t kret = KERN_SUCCESS;
2938 mach_msg_type_number_t i = 0;
2939 int force_data_sync;
2940 int retval = 0;
2941 int first_IO = 1;
2942 struct clios iostate;
2943 user_addr_t iov_base;
2944 u_int32_t mem_alignment_mask;
2945 u_int32_t devblocksize;
2946 u_int32_t max_io_size;
2947 u_int32_t max_upl_size;
2948 u_int32_t max_vector_size;
2949 u_int32_t bytes_outstanding_limit;
2950 boolean_t io_throttled = FALSE;
2951
2952 u_int32_t vector_upl_iosize = 0;
2953 int issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1);
2954 off_t v_upl_uio_offset = 0;
2955 int vector_upl_index = 0;
2956 upl_t vector_upl = NULL;
2957
2958 uint32_t io_align_mask;
2959
2960 /*
2961 * When we enter this routine, we know
2962 * -- the resid will not exceed iov_len
2963 */
2964 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
2965 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
2966
2967 assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT);
2968
2969 max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
2970
2971 io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO;
2972
2973 if (flags & IO_PASSIVE) {
2974 io_flag |= CL_PASSIVE;
2975 }
2976
2977 if (flags & IO_NOCACHE) {
2978 io_flag |= CL_NOCACHE;
2979 }
2980
2981 if (flags & IO_SKIP_ENCRYPTION) {
2982 io_flag |= CL_ENCRYPTED;
2983 }
2984
2985 iostate.io_completed = 0;
2986 iostate.io_issued = 0;
2987 iostate.io_error = 0;
2988 iostate.io_wanted = 0;
2989
2990 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
2991
2992 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
2993 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2994
2995 if (devblocksize == 1) {
2996 /*
2997 * the AFP client advertises a devblocksize of 1
2998 * however, its BLOCKMAP routine maps to physical
2999 * blocks that are PAGE_SIZE in size...
3000 * therefore we can't ask for I/Os that aren't page aligned
3001 * or aren't multiples of PAGE_SIZE in size
3002 * by setting devblocksize to PAGE_SIZE, we re-instate
3003 * the old behavior we had before the mem_alignment_mask
3004 * changes went in...
3005 */
3006 devblocksize = PAGE_SIZE;
3007 }
3008
3009 io_align_mask = PAGE_MASK;
3010 if (min_io_size < MIN_DIRECT_WRITE_SIZE) {
3011 /* The process has opted into fs blocksize direct io writes */
3012 assert((min_io_size & (min_io_size - 1)) == 0);
3013 io_align_mask = min_io_size - 1;
3014 io_flag |= CL_DIRECT_IO_FSBLKSZ;
3015 }
3016
3017 next_dwrite:
3018 io_req_size = *write_length;
3019 iov_base = uio_curriovbase(uio);
3020
3021 offset_in_file = (u_int32_t)(uio->uio_offset & io_align_mask);
3022 offset_in_iovbase = (u_int32_t)(iov_base & mem_alignment_mask);
3023
3024 if (offset_in_file || offset_in_iovbase) {
3025 /*
3026 * one of the 2 important offsets is misaligned
3027 * so fire an I/O through the cache for this entire vector
3028 */
3029 goto wait_for_dwrites;
3030 }
3031 if (iov_base & (devblocksize - 1)) {
3032 /*
3033 * the offset in memory must be on a device block boundary
3034 * so that we can guarantee that we can generate an
3035 * I/O that ends on a page boundary in cluster_io
3036 */
3037 goto wait_for_dwrites;
3038 }
3039
3040 task_update_logical_writes(current_task(), (io_req_size & ~PAGE_MASK), TASK_WRITE_IMMEDIATE, vp);
3041 while ((io_req_size >= PAGE_SIZE || io_req_size >= min_io_size) && uio->uio_offset < newEOF && retval == 0) {
3042 int throttle_type;
3043
3044 if ((throttle_type = cluster_is_throttled(vp))) {
3045 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
3046
3047 /*
3048 * we're in the throttle window, at the very least
3049 * we want to limit the size of the I/O we're about
3050 * to issue
3051 */
3052 if ((flags & IO_RETURN_ON_THROTTLE) && throttle_type == THROTTLE_NOW) {
3053 /*
3054 * we're in the throttle window and at least 1 I/O
3055 * has already been issued by a throttleable thread
3056 * in this window, so return with EAGAIN to indicate
3057 * to the FS issuing the cluster_write call that it
3058 * should now throttle after dropping any locks
3059 */
3060 throttle_info_update_by_mount(vp->v_mount);
3061
3062 io_throttled = TRUE;
3063 goto wait_for_dwrites;
3064 }
3065 max_vector_size = max_throttle_size;
3066 max_io_size = max_throttle_size;
3067 } else {
3068 max_vector_size = MAX_VECTOR_UPL_SIZE;
3069 max_io_size = max_upl_size;
3070 }
3071
3072 if (first_IO) {
3073 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
3074 first_IO = 0;
3075 }
3076 io_size = io_req_size & ~io_align_mask;
3077 iov_base = uio_curriovbase(uio);
3078
3079 if (io_size > max_io_size) {
3080 io_size = max_io_size;
3081 }
3082
3083 if (useVectorUPL && (iov_base & PAGE_MASK)) {
3084 /*
3085 * We have an iov_base that's not page-aligned.
3086 * Issue all I/O's that have been collected within
3087 * this Vectored UPL.
3088 */
3089 if (vector_upl_index) {
3090 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3091 reset_vector_run_state();
3092 }
3093
3094 /*
3095 * After this point, if we are using the Vector UPL path and the base is
3096 * not page-aligned then the UPL with that base will be the first in the vector UPL.
3097 */
3098 }
3099
3100 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
3101 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
3102
3103 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
3104 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
3105
3106 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
3107 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
3108 pages_in_pl = 0;
3109 upl_size = (upl_size_t)upl_needed_size;
3110 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
3111 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
3112
3113 kret = vm_map_get_upl(map,
3114 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
3115 &upl_size,
3116 &upl,
3117 NULL,
3118 &pages_in_pl,
3119 &upl_flags,
3120 VM_KERN_MEMORY_FILE,
3121 force_data_sync);
3122
3123 if (kret != KERN_SUCCESS) {
3124 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3125 0, 0, 0, kret, 0);
3126 /*
3127 * failed to get pagelist
3128 *
3129 * we may have already spun some portion of this request
3130 * off as async requests... we need to wait for the I/O
3131 * to complete before returning
3132 */
3133 goto wait_for_dwrites;
3134 }
3135 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
3136 pages_in_pl = upl_size / PAGE_SIZE;
3137
3138 for (i = 0; i < pages_in_pl; i++) {
3139 if (!upl_valid_page(pl, i)) {
3140 break;
3141 }
3142 }
3143 if (i == pages_in_pl) {
3144 break;
3145 }
3146
3147 /*
3148 * didn't get all the pages back that we
3149 * needed... release this upl and try again
3150 */
3151 ubc_upl_abort(upl, 0);
3152 }
3153 if (force_data_sync >= 3) {
3154 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3155 i, pages_in_pl, upl_size, kret, 0);
3156 /*
3157 * for some reason, we couldn't acquire a hold on all
3158 * the pages needed in the user's address space
3159 *
3160 * we may have already spun some portion of this request
3161 * off as async requests... we need to wait for the I/O
3162 * to complete before returning
3163 */
3164 goto wait_for_dwrites;
3165 }
3166
3167 /*
3168 * Consider the possibility that upl_size wasn't satisfied.
3169 */
3170 if (upl_size < upl_needed_size) {
3171 if (upl_size && upl_offset == 0) {
3172 io_size = upl_size;
3173 } else {
3174 io_size = 0;
3175 }
3176 }
3177 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3178 (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
3179
3180 if (io_size == 0) {
3181 ubc_upl_abort(upl, 0);
3182 /*
3183 * we may have already spun some portion of this request
3184 * off as async requests... we need to wait for the I/O
3185 * to complete before returning
3186 */
3187 goto wait_for_dwrites;
3188 }
3189
3190 if (useVectorUPL) {
3191 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
3192 if (end_off) {
3193 issueVectorUPL = 1;
3194 }
3195 /*
3196 * After this point, if we are using a vector UPL, then
3197 * either all the UPL elements end on a page boundary OR
3198 * this UPL is the last element because it does not end
3199 * on a page boundary.
3200 */
3201 }
3202
3203 /*
3204 * we want push out these writes asynchronously so that we can overlap
3205 * the preparation of the next I/O
3206 * if there are already too many outstanding writes
3207 * wait until some complete before issuing the next
3208 */
3209 if (vp->v_mount->mnt_minsaturationbytecount) {
3210 bytes_outstanding_limit = vp->v_mount->mnt_minsaturationbytecount;
3211 } else {
3212 if (__improbable(os_mul_overflow(max_upl_size, IO_SCALE(vp, 2),
3213 &bytes_outstanding_limit) ||
3214 (bytes_outstanding_limit > overlapping_write_max))) {
3215 bytes_outstanding_limit = overlapping_write_max;
3216 }
3217 }
3218
3219 cluster_iostate_wait(&iostate, bytes_outstanding_limit, "cluster_write_direct");
3220
3221 if (iostate.io_error) {
3222 /*
3223 * one of the earlier writes we issued ran into a hard error
3224 * don't issue any more writes, cleanup the UPL
3225 * that was just created but not used, then
3226 * go wait for all writes that are part of this stream
3227 * to complete before returning the error to the caller
3228 */
3229 ubc_upl_abort(upl, 0);
3230
3231 goto wait_for_dwrites;
3232 }
3233
3234 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
3235 (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
3236
3237 if (!useVectorUPL) {
3238 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
3239 io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3240 } else {
3241 if (!vector_upl_index) {
3242 vector_upl = vector_upl_create(upl_offset, uio->uio_iovcnt);
3243 v_upl_uio_offset = uio->uio_offset;
3244 vector_upl_offset = upl_offset;
3245 }
3246
3247 vector_upl_set_subupl(vector_upl, upl, upl_size);
3248 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
3249 vector_upl_index++;
3250 vector_upl_iosize += io_size;
3251 vector_upl_size += upl_size;
3252
3253 if (issueVectorUPL || vector_upl_index == vector_upl_max_upls(vector_upl) || vector_upl_size >= max_vector_size) {
3254 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3255 reset_vector_run_state();
3256 }
3257 }
3258
3259 /*
3260 * update the uio structure to
3261 * reflect the I/O that we just issued
3262 */
3263 uio_update(uio, (user_size_t)io_size);
3264
3265 /*
3266 * in case we end up calling through to cluster_write_copy to finish
3267 * the tail of this request, we need to update the oldEOF so that we
3268 * don't zero-fill the head of a page if we've successfully written
3269 * data to that area... 'cluster_write_copy' will zero-fill the head of a
3270 * page that is beyond the oldEOF if the write is unaligned... we only
3271 * want that to happen for the very first page of the cluster_write,
3272 * NOT the first page of each vector making up a multi-vector write.
3273 */
3274 if (uio->uio_offset > oldEOF) {
3275 oldEOF = uio->uio_offset;
3276 }
3277
3278 io_req_size -= io_size;
3279
3280 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
3281 (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0);
3282 } /* end while */
3283
3284 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) {
3285 retval = cluster_io_type(uio, write_type, write_length, min_io_size);
3286
3287 if (retval == 0 && *write_type == IO_DIRECT) {
3288 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE,
3289 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
3290
3291 goto next_dwrite;
3292 }
3293 }
3294
3295 wait_for_dwrites:
3296
3297 if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
3298 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3299 reset_vector_run_state();
3300 }
3301 /*
3302 * make sure all async writes issued as part of this stream
3303 * have completed before we return
3304 */
3305 cluster_iostate_wait(&iostate, 0, "cluster_write_direct");
3306
3307 if (iostate.io_error) {
3308 retval = iostate.io_error;
3309 }
3310
3311 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
3312
3313 if (io_throttled == TRUE && retval == 0) {
3314 retval = EAGAIN;
3315 }
3316
3317 if (io_req_size && retval == 0) {
3318 /*
3319 * we couldn't handle the tail of this request in DIRECT mode
3320 * so fire it through the copy path
3321 *
3322 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
3323 * so we can just pass 0 in for the headOff and tailOff
3324 */
3325 if (uio->uio_offset > oldEOF) {
3326 oldEOF = uio->uio_offset;
3327 }
3328
3329 retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
3330
3331 *write_type = IO_UNKNOWN;
3332 }
3333 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
3334 (int)uio->uio_offset, io_req_size, retval, 4, 0);
3335
3336 return retval;
3337 }
3338
3339
3340 static int
cluster_write_contig(vnode_t vp,struct uio * uio,off_t newEOF,int * write_type,u_int32_t * write_length,int (* callback)(buf_t,void *),void * callback_arg,int bflag)3341 cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length,
3342 int (*callback)(buf_t, void *), void *callback_arg, int bflag)
3343 {
3344 upl_page_info_t *pl;
3345 addr64_t src_paddr = 0;
3346 upl_t upl[MAX_VECTS];
3347 vm_offset_t upl_offset;
3348 u_int32_t tail_size = 0;
3349 u_int32_t io_size;
3350 u_int32_t xsize;
3351 upl_size_t upl_size;
3352 vm_size_t upl_needed_size;
3353 mach_msg_type_number_t pages_in_pl;
3354 upl_control_flags_t upl_flags;
3355 kern_return_t kret;
3356 struct clios iostate;
3357 int error = 0;
3358 int cur_upl = 0;
3359 int num_upl = 0;
3360 int n;
3361 user_addr_t iov_base;
3362 u_int32_t devblocksize;
3363 u_int32_t mem_alignment_mask;
3364
3365 /*
3366 * When we enter this routine, we know
3367 * -- the io_req_size will not exceed iov_len
3368 * -- the target address is physically contiguous
3369 */
3370 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
3371
3372 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
3373 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
3374
3375 iostate.io_completed = 0;
3376 iostate.io_issued = 0;
3377 iostate.io_error = 0;
3378 iostate.io_wanted = 0;
3379
3380 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
3381
3382 next_cwrite:
3383 io_size = *write_length;
3384
3385 iov_base = uio_curriovbase(uio);
3386
3387 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
3388 upl_needed_size = upl_offset + io_size;
3389
3390 pages_in_pl = 0;
3391 upl_size = (upl_size_t)upl_needed_size;
3392 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
3393 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
3394
3395 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
3396 kret = vm_map_get_upl(map,
3397 vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
3398 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0);
3399
3400 if (kret != KERN_SUCCESS) {
3401 /*
3402 * failed to get pagelist
3403 */
3404 error = EINVAL;
3405 goto wait_for_cwrites;
3406 }
3407 num_upl++;
3408
3409 /*
3410 * Consider the possibility that upl_size wasn't satisfied.
3411 */
3412 if (upl_size < upl_needed_size) {
3413 /*
3414 * This is a failure in the physical memory case.
3415 */
3416 error = EINVAL;
3417 goto wait_for_cwrites;
3418 }
3419 pl = ubc_upl_pageinfo(upl[cur_upl]);
3420
3421 src_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
3422
3423 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
3424 u_int32_t head_size;
3425
3426 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
3427
3428 if (head_size > io_size) {
3429 head_size = io_size;
3430 }
3431
3432 error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg);
3433
3434 if (error) {
3435 goto wait_for_cwrites;
3436 }
3437
3438 upl_offset += head_size;
3439 src_paddr += head_size;
3440 io_size -= head_size;
3441
3442 iov_base += head_size;
3443 }
3444 if ((u_int32_t)iov_base & mem_alignment_mask) {
3445 /*
3446 * request doesn't set up on a memory boundary
3447 * the underlying DMA engine can handle...
3448 * return an error instead of going through
3449 * the slow copy path since the intent of this
3450 * path is direct I/O from device memory
3451 */
3452 error = EINVAL;
3453 goto wait_for_cwrites;
3454 }
3455
3456 tail_size = io_size & (devblocksize - 1);
3457 io_size -= tail_size;
3458
3459 while (io_size && error == 0) {
3460 if (io_size > MAX_IO_CONTIG_SIZE) {
3461 xsize = MAX_IO_CONTIG_SIZE;
3462 } else {
3463 xsize = io_size;
3464 }
3465 /*
3466 * request asynchronously so that we can overlap
3467 * the preparation of the next I/O... we'll do
3468 * the commit after all the I/O has completed
3469 * since its all issued against the same UPL
3470 * if there are already too many outstanding writes
3471 * wait until some have completed before issuing the next
3472 */
3473 cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_write_contig");
3474
3475 if (iostate.io_error) {
3476 /*
3477 * one of the earlier writes we issued ran into a hard error
3478 * don't issue any more writes...
3479 * go wait for all writes that are part of this stream
3480 * to complete before returning the error to the caller
3481 */
3482 goto wait_for_cwrites;
3483 }
3484 /*
3485 * issue an asynchronous write to cluster_io
3486 */
3487 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
3488 xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg);
3489
3490 if (error == 0) {
3491 /*
3492 * The cluster_io write completed successfully,
3493 * update the uio structure
3494 */
3495 uio_update(uio, (user_size_t)xsize);
3496
3497 upl_offset += xsize;
3498 src_paddr += xsize;
3499 io_size -= xsize;
3500 }
3501 }
3502 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) {
3503 error = cluster_io_type(uio, write_type, write_length, 0);
3504
3505 if (error == 0 && *write_type == IO_CONTIG) {
3506 cur_upl++;
3507 goto next_cwrite;
3508 }
3509 } else {
3510 *write_type = IO_UNKNOWN;
3511 }
3512
3513 wait_for_cwrites:
3514 /*
3515 * make sure all async writes that are part of this stream
3516 * have completed before we proceed
3517 */
3518 cluster_iostate_wait(&iostate, 0, "cluster_write_contig");
3519
3520 if (iostate.io_error) {
3521 error = iostate.io_error;
3522 }
3523
3524 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
3525
3526 if (error == 0 && tail_size) {
3527 error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg);
3528 }
3529
3530 for (n = 0; n < num_upl; n++) {
3531 /*
3532 * just release our hold on each physically contiguous
3533 * region without changing any state
3534 */
3535 ubc_upl_abort(upl[n], 0);
3536 }
3537
3538 return error;
3539 }
3540
3541
3542 /*
3543 * need to avoid a race between an msync of a range of pages dirtied via mmap
3544 * vs a filesystem such as HFS deciding to write a 'hole' to disk via cluster_write's
3545 * zerofill mechanism before it has seen the VNOP_PAGEOUTs for the pages being msync'd
3546 *
3547 * we should never force-zero-fill pages that are already valid in the cache...
3548 * the entire page contains valid data (either from disk, zero-filled or dirtied
3549 * via an mmap) so we can only do damage by trying to zero-fill
3550 *
3551 */
3552 static int
cluster_zero_range(upl_t upl,upl_page_info_t * pl,int flags,int io_offset,off_t zero_off,off_t upl_f_offset,int bytes_to_zero)3553 cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off_t zero_off, off_t upl_f_offset, int bytes_to_zero)
3554 {
3555 int zero_pg_index;
3556 boolean_t need_cluster_zero = TRUE;
3557
3558 if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
3559 bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64));
3560 zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
3561
3562 if (upl_valid_page(pl, zero_pg_index)) {
3563 /*
3564 * never force zero valid pages - dirty or clean
3565 * we'll leave these in the UPL for cluster_write_copy to deal with
3566 */
3567 need_cluster_zero = FALSE;
3568 }
3569 }
3570 if (need_cluster_zero == TRUE) {
3571 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
3572 }
3573
3574 return bytes_to_zero;
3575 }
3576
3577
3578 void
cluster_update_state(vnode_t vp,vm_object_offset_t s_offset,vm_object_offset_t e_offset,boolean_t vm_initiated)3579 cluster_update_state(vnode_t vp, vm_object_offset_t s_offset, vm_object_offset_t e_offset, boolean_t vm_initiated)
3580 {
3581 struct cl_extent cl;
3582 boolean_t first_pass = TRUE;
3583
3584 assert(s_offset < e_offset);
3585 assert((s_offset & PAGE_MASK_64) == 0);
3586 assert((e_offset & PAGE_MASK_64) == 0);
3587
3588 cl.b_addr = (daddr64_t)(s_offset / PAGE_SIZE_64);
3589 cl.e_addr = (daddr64_t)(e_offset / PAGE_SIZE_64);
3590
3591 cluster_update_state_internal(vp, &cl, 0, TRUE, &first_pass, s_offset, (int)(e_offset - s_offset),
3592 vp->v_un.vu_ubcinfo->ui_size, NULL, NULL, vm_initiated);
3593 }
3594
3595
3596 static void
cluster_update_state_internal(vnode_t vp,struct cl_extent * cl,int flags,boolean_t defer_writes,boolean_t * first_pass,off_t write_off,int write_cnt,off_t newEOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)3597 cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes,
3598 boolean_t *first_pass, off_t write_off, int write_cnt, off_t newEOF,
3599 int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
3600 {
3601 struct cl_writebehind *wbp;
3602 int cl_index;
3603 int ret_cluster_try_push;
3604 u_int max_cluster_pgcount;
3605
3606
3607 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
3608
3609 /*
3610 * take the lock to protect our accesses
3611 * of the writebehind and sparse cluster state
3612 */
3613 wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
3614
3615 if (wbp->cl_scmap) {
3616 if (!(flags & IO_NOCACHE)) {
3617 /*
3618 * we've fallen into the sparse
3619 * cluster method of delaying dirty pages
3620 */
3621 sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated);
3622
3623 lck_mtx_unlock(&wbp->cl_lockw);
3624 return;
3625 }
3626 /*
3627 * must have done cached writes that fell into
3628 * the sparse cluster mechanism... we've switched
3629 * to uncached writes on the file, so go ahead
3630 * and push whatever's in the sparse map
3631 * and switch back to normal clustering
3632 */
3633 wbp->cl_number = 0;
3634
3635 sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, newEOF, PUSH_ALL, 0, callback, callback_arg, vm_initiated);
3636 /*
3637 * no clusters of either type present at this point
3638 * so just go directly to start_new_cluster since
3639 * we know we need to delay this I/O since we've
3640 * already released the pages back into the cache
3641 * to avoid the deadlock with sparse_cluster_push
3642 */
3643 goto start_new_cluster;
3644 }
3645 if (*first_pass == TRUE) {
3646 if (write_off == wbp->cl_last_write) {
3647 wbp->cl_seq_written += write_cnt;
3648 } else {
3649 wbp->cl_seq_written = write_cnt;
3650 }
3651
3652 wbp->cl_last_write = write_off + write_cnt;
3653
3654 *first_pass = FALSE;
3655 }
3656 if (wbp->cl_number == 0) {
3657 /*
3658 * no clusters currently present
3659 */
3660 goto start_new_cluster;
3661 }
3662
3663 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
3664 /*
3665 * check each cluster that we currently hold
3666 * try to merge some or all of this write into
3667 * one or more of the existing clusters... if
3668 * any portion of the write remains, start a
3669 * new cluster
3670 */
3671 if (cl->b_addr >= wbp->cl_clusters[cl_index].b_addr) {
3672 /*
3673 * the current write starts at or after the current cluster
3674 */
3675 if (cl->e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3676 /*
3677 * we have a write that fits entirely
3678 * within the existing cluster limits
3679 */
3680 if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) {
3681 /*
3682 * update our idea of where the cluster ends
3683 */
3684 wbp->cl_clusters[cl_index].e_addr = cl->e_addr;
3685 }
3686 break;
3687 }
3688 if (cl->b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3689 /*
3690 * we have a write that starts in the middle of the current cluster
3691 * but extends beyond the cluster's limit... we know this because
3692 * of the previous checks
3693 * we'll extend the current cluster to the max
3694 * and update the b_addr for the current write to reflect that
3695 * the head of it was absorbed into this cluster...
3696 * note that we'll always have a leftover tail in this case since
3697 * full absorbtion would have occurred in the clause above
3698 */
3699 wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount;
3700
3701 cl->b_addr = wbp->cl_clusters[cl_index].e_addr;
3702 }
3703 /*
3704 * we come here for the case where the current write starts
3705 * beyond the limit of the existing cluster or we have a leftover
3706 * tail after a partial absorbtion
3707 *
3708 * in either case, we'll check the remaining clusters before
3709 * starting a new one
3710 */
3711 } else {
3712 /*
3713 * the current write starts in front of the cluster we're currently considering
3714 */
3715 if ((wbp->cl_clusters[cl_index].e_addr - cl->b_addr) <= max_cluster_pgcount) {
3716 /*
3717 * we can just merge the new request into
3718 * this cluster and leave it in the cache
3719 * since the resulting cluster is still
3720 * less than the maximum allowable size
3721 */
3722 wbp->cl_clusters[cl_index].b_addr = cl->b_addr;
3723
3724 if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) {
3725 /*
3726 * the current write completely
3727 * envelops the existing cluster and since
3728 * each write is limited to at most max_cluster_pgcount pages
3729 * we can just use the start and last blocknos of the write
3730 * to generate the cluster limits
3731 */
3732 wbp->cl_clusters[cl_index].e_addr = cl->e_addr;
3733 }
3734 break;
3735 }
3736 /*
3737 * if we were to combine this write with the current cluster
3738 * we would exceed the cluster size limit.... so,
3739 * let's see if there's any overlap of the new I/O with
3740 * the cluster we're currently considering... in fact, we'll
3741 * stretch the cluster out to it's full limit and see if we
3742 * get an intersection with the current write
3743 *
3744 */
3745 if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) {
3746 /*
3747 * the current write extends into the proposed cluster
3748 * clip the length of the current write after first combining it's
3749 * tail with the newly shaped cluster
3750 */
3751 wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount;
3752
3753 cl->e_addr = wbp->cl_clusters[cl_index].b_addr;
3754 }
3755 /*
3756 * if we get here, there was no way to merge
3757 * any portion of this write with this cluster
3758 * or we could only merge part of it which
3759 * will leave a tail...
3760 * we'll check the remaining clusters before starting a new one
3761 */
3762 }
3763 }
3764 if (cl_index < wbp->cl_number) {
3765 /*
3766 * we found an existing cluster(s) that we
3767 * could entirely merge this I/O into
3768 */
3769 goto delay_io;
3770 }
3771
3772 if (defer_writes == FALSE &&
3773 wbp->cl_number == MAX_CLUSTERS &&
3774 wbp->cl_seq_written >= (MAX_CLUSTERS * (max_cluster_pgcount * PAGE_SIZE))) {
3775 uint32_t n;
3776
3777 if (vp->v_mount->mnt_minsaturationbytecount) {
3778 n = vp->v_mount->mnt_minsaturationbytecount / MAX_CLUSTER_SIZE(vp);
3779
3780 if (n > MAX_CLUSTERS) {
3781 n = MAX_CLUSTERS;
3782 }
3783 } else {
3784 n = 0;
3785 }
3786
3787 if (n == 0) {
3788 if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
3789 n = WRITE_BEHIND_SSD;
3790 } else {
3791 n = WRITE_BEHIND;
3792 }
3793 }
3794 while (n--) {
3795 cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg, NULL, vm_initiated);
3796 }
3797 }
3798 if (wbp->cl_number < MAX_CLUSTERS) {
3799 /*
3800 * we didn't find an existing cluster to
3801 * merge into, but there's room to start
3802 * a new one
3803 */
3804 goto start_new_cluster;
3805 }
3806 /*
3807 * no exisitng cluster to merge with and no
3808 * room to start a new one... we'll try
3809 * pushing one of the existing ones... if none of
3810 * them are able to be pushed, we'll switch
3811 * to the sparse cluster mechanism
3812 * cluster_try_push updates cl_number to the
3813 * number of remaining clusters... and
3814 * returns the number of currently unused clusters
3815 */
3816 ret_cluster_try_push = 0;
3817
3818 /*
3819 * if writes are not deferred, call cluster push immediately
3820 */
3821 if (defer_writes == FALSE) {
3822 ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg, NULL, vm_initiated);
3823 }
3824 /*
3825 * execute following regardless of writes being deferred or not
3826 */
3827 if (ret_cluster_try_push == 0) {
3828 /*
3829 * no more room in the normal cluster mechanism
3830 * so let's switch to the more expansive but expensive
3831 * sparse mechanism....
3832 */
3833 sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg, vm_initiated);
3834 sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated);
3835
3836 lck_mtx_unlock(&wbp->cl_lockw);
3837 return;
3838 }
3839 start_new_cluster:
3840 wbp->cl_clusters[wbp->cl_number].b_addr = cl->b_addr;
3841 wbp->cl_clusters[wbp->cl_number].e_addr = cl->e_addr;
3842
3843 wbp->cl_clusters[wbp->cl_number].io_flags = 0;
3844
3845 if (flags & IO_NOCACHE) {
3846 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE;
3847 }
3848
3849 if (flags & IO_PASSIVE) {
3850 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE;
3851 }
3852
3853 wbp->cl_number++;
3854 delay_io:
3855 lck_mtx_unlock(&wbp->cl_lockw);
3856 return;
3857 }
3858
3859
3860 static int
cluster_write_copy(vnode_t vp,struct uio * uio,u_int32_t io_req_size,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int flags,int (* callback)(buf_t,void *),void * callback_arg)3861 cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff,
3862 off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg)
3863 {
3864 upl_page_info_t *pl;
3865 upl_t upl;
3866 vm_offset_t upl_offset = 0;
3867 vm_size_t upl_size;
3868 off_t upl_f_offset;
3869 int pages_in_upl;
3870 int start_offset;
3871 int xfer_resid;
3872 int io_size;
3873 int io_offset;
3874 int bytes_to_zero;
3875 int bytes_to_move;
3876 kern_return_t kret;
3877 int retval = 0;
3878 int io_resid;
3879 long long total_size;
3880 long long zero_cnt;
3881 off_t zero_off;
3882 long long zero_cnt1;
3883 off_t zero_off1;
3884 off_t write_off = 0;
3885 int write_cnt = 0;
3886 boolean_t first_pass = FALSE;
3887 struct cl_extent cl;
3888 int bflag;
3889 u_int max_io_size;
3890
3891 if (uio) {
3892 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
3893 (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0);
3894
3895 io_resid = io_req_size;
3896 } else {
3897 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
3898 0, 0, (int)oldEOF, (int)newEOF, 0);
3899
3900 io_resid = 0;
3901 }
3902 if (flags & IO_PASSIVE) {
3903 bflag = CL_PASSIVE;
3904 } else {
3905 bflag = 0;
3906 }
3907 if (flags & IO_NOCACHE) {
3908 bflag |= CL_NOCACHE;
3909 }
3910
3911 if (flags & IO_SKIP_ENCRYPTION) {
3912 bflag |= CL_ENCRYPTED;
3913 }
3914
3915 zero_cnt = 0;
3916 zero_cnt1 = 0;
3917 zero_off = 0;
3918 zero_off1 = 0;
3919
3920 max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
3921
3922 if (flags & IO_HEADZEROFILL) {
3923 /*
3924 * some filesystems (HFS is one) don't support unallocated holes within a file...
3925 * so we zero fill the intervening space between the old EOF and the offset
3926 * where the next chunk of real data begins.... ftruncate will also use this
3927 * routine to zero fill to the new EOF when growing a file... in this case, the
3928 * uio structure will not be provided
3929 */
3930 if (uio) {
3931 if (headOff < uio->uio_offset) {
3932 zero_cnt = uio->uio_offset - headOff;
3933 zero_off = headOff;
3934 }
3935 } else if (headOff < newEOF) {
3936 zero_cnt = newEOF - headOff;
3937 zero_off = headOff;
3938 }
3939 } else {
3940 if (uio && uio->uio_offset > oldEOF) {
3941 zero_off = uio->uio_offset & ~PAGE_MASK_64;
3942
3943 if (zero_off >= oldEOF) {
3944 zero_cnt = uio->uio_offset - zero_off;
3945
3946 flags |= IO_HEADZEROFILL;
3947 }
3948 }
3949 }
3950 if (flags & IO_TAILZEROFILL) {
3951 if (uio) {
3952 zero_off1 = uio->uio_offset + io_req_size;
3953
3954 if (zero_off1 < tailOff) {
3955 zero_cnt1 = tailOff - zero_off1;
3956 }
3957 }
3958 } else {
3959 if (uio && newEOF > oldEOF) {
3960 zero_off1 = uio->uio_offset + io_req_size;
3961
3962 if (zero_off1 == newEOF && (zero_off1 & PAGE_MASK_64)) {
3963 zero_cnt1 = PAGE_SIZE_64 - (zero_off1 & PAGE_MASK_64);
3964
3965 flags |= IO_TAILZEROFILL;
3966 }
3967 }
3968 }
3969 if (zero_cnt == 0 && uio == (struct uio *) 0) {
3970 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
3971 retval, 0, 0, 0, 0);
3972 return 0;
3973 }
3974 if (uio) {
3975 write_off = uio->uio_offset;
3976 write_cnt = (int)uio_resid(uio);
3977 /*
3978 * delay updating the sequential write info
3979 * in the control block until we've obtained
3980 * the lock for it
3981 */
3982 first_pass = TRUE;
3983 }
3984 while ((total_size = (io_resid + zero_cnt + zero_cnt1)) && retval == 0) {
3985 /*
3986 * for this iteration of the loop, figure out where our starting point is
3987 */
3988 if (zero_cnt) {
3989 start_offset = (int)(zero_off & PAGE_MASK_64);
3990 upl_f_offset = zero_off - start_offset;
3991 } else if (io_resid) {
3992 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
3993 upl_f_offset = uio->uio_offset - start_offset;
3994 } else {
3995 start_offset = (int)(zero_off1 & PAGE_MASK_64);
3996 upl_f_offset = zero_off1 - start_offset;
3997 }
3998 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE,
3999 (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0);
4000
4001 if (total_size > max_io_size) {
4002 total_size = max_io_size;
4003 }
4004
4005 cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
4006
4007 if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) {
4008 /*
4009 * assumption... total_size <= io_resid
4010 * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
4011 */
4012 if ((start_offset + total_size) > max_io_size) {
4013 total_size = max_io_size - start_offset;
4014 }
4015 xfer_resid = (int)total_size;
4016
4017 retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1);
4018
4019 if (retval) {
4020 break;
4021 }
4022
4023 io_resid -= (total_size - xfer_resid);
4024 total_size = xfer_resid;
4025 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
4026 upl_f_offset = uio->uio_offset - start_offset;
4027
4028 if (total_size == 0) {
4029 if (start_offset) {
4030 /*
4031 * the write did not finish on a page boundary
4032 * which will leave upl_f_offset pointing to the
4033 * beginning of the last page written instead of
4034 * the page beyond it... bump it in this case
4035 * so that the cluster code records the last page
4036 * written as dirty
4037 */
4038 upl_f_offset += PAGE_SIZE_64;
4039 }
4040 upl_size = 0;
4041
4042 goto check_cluster;
4043 }
4044 }
4045 /*
4046 * compute the size of the upl needed to encompass
4047 * the requested write... limit each call to cluster_io
4048 * to the maximum UPL size... cluster_io will clip if
4049 * this exceeds the maximum io_size for the device,
4050 * make sure to account for
4051 * a starting offset that's not page aligned
4052 */
4053 upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4054
4055 if (upl_size > max_io_size) {
4056 upl_size = max_io_size;
4057 }
4058
4059 pages_in_upl = (int)(upl_size / PAGE_SIZE);
4060 io_size = (int)(upl_size - start_offset);
4061
4062 if ((long long)io_size > total_size) {
4063 io_size = (int)total_size;
4064 }
4065
4066 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
4067
4068
4069 /*
4070 * Gather the pages from the buffer cache.
4071 * The UPL_WILL_MODIFY flag lets the UPL subsystem know
4072 * that we intend to modify these pages.
4073 */
4074 kret = ubc_create_upl_kernel(vp,
4075 upl_f_offset,
4076 (int)upl_size,
4077 &upl,
4078 &pl,
4079 UPL_SET_LITE | ((uio != NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY),
4080 VM_KERN_MEMORY_FILE);
4081 if (kret != KERN_SUCCESS) {
4082 panic("cluster_write_copy: failed to get pagelist");
4083 }
4084
4085 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END,
4086 upl, (int)upl_f_offset, start_offset, 0, 0);
4087
4088 if (start_offset && upl_f_offset < oldEOF && !upl_valid_page(pl, 0)) {
4089 int read_size;
4090
4091 /*
4092 * we're starting in the middle of the first page of the upl
4093 * and the page isn't currently valid, so we're going to have
4094 * to read it in first... this is a synchronous operation
4095 */
4096 read_size = PAGE_SIZE;
4097
4098 if ((upl_f_offset + read_size) > oldEOF) {
4099 read_size = (int)(oldEOF - upl_f_offset);
4100 }
4101
4102 retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
4103 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
4104 if (retval) {
4105 /*
4106 * we had an error during the read which causes us to abort
4107 * the current cluster_write request... before we do, we need
4108 * to release the rest of the pages in the upl without modifying
4109 * there state and mark the failed page in error
4110 */
4111 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4112
4113 if (upl_size > PAGE_SIZE) {
4114 ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size,
4115 UPL_ABORT_FREE_ON_EMPTY);
4116 }
4117
4118 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4119 upl, 0, 0, retval, 0);
4120 break;
4121 }
4122 }
4123 if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
4124 /*
4125 * the last offset we're writing to in this upl does not end on a page
4126 * boundary... if it's not beyond the old EOF, then we'll also need to
4127 * pre-read this page in if it isn't already valid
4128 */
4129 upl_offset = upl_size - PAGE_SIZE;
4130
4131 if ((upl_f_offset + start_offset + io_size) < oldEOF &&
4132 !upl_valid_page(pl, (int)(upl_offset / PAGE_SIZE))) {
4133 int read_size;
4134
4135 read_size = PAGE_SIZE;
4136
4137 if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF) {
4138 read_size = (int)(oldEOF - (upl_f_offset + upl_offset));
4139 }
4140
4141 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
4142 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
4143 if (retval) {
4144 /*
4145 * we had an error during the read which causes us to abort
4146 * the current cluster_write request... before we do, we
4147 * need to release the rest of the pages in the upl without
4148 * modifying there state and mark the failed page in error
4149 */
4150 ubc_upl_abort_range(upl, (upl_offset_t)upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4151
4152 if (upl_size > PAGE_SIZE) {
4153 ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, UPL_ABORT_FREE_ON_EMPTY);
4154 }
4155
4156 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4157 upl, 0, 0, retval, 0);
4158 break;
4159 }
4160 }
4161 }
4162 xfer_resid = io_size;
4163 io_offset = start_offset;
4164
4165 while (zero_cnt && xfer_resid) {
4166 if (zero_cnt < (long long)xfer_resid) {
4167 bytes_to_zero = (int)zero_cnt;
4168 } else {
4169 bytes_to_zero = xfer_resid;
4170 }
4171
4172 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero);
4173
4174 xfer_resid -= bytes_to_zero;
4175 zero_cnt -= bytes_to_zero;
4176 zero_off += bytes_to_zero;
4177 io_offset += bytes_to_zero;
4178 }
4179 if (xfer_resid && io_resid) {
4180 u_int32_t io_requested;
4181
4182 bytes_to_move = min(io_resid, xfer_resid);
4183 io_requested = bytes_to_move;
4184
4185 retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
4186
4187 if (retval) {
4188 ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, UPL_ABORT_FREE_ON_EMPTY);
4189
4190 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4191 upl, 0, 0, retval, 0);
4192 } else {
4193 io_resid -= bytes_to_move;
4194 xfer_resid -= bytes_to_move;
4195 io_offset += bytes_to_move;
4196 }
4197 }
4198 while (xfer_resid && zero_cnt1 && retval == 0) {
4199 if (zero_cnt1 < (long long)xfer_resid) {
4200 bytes_to_zero = (int)zero_cnt1;
4201 } else {
4202 bytes_to_zero = xfer_resid;
4203 }
4204
4205 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero);
4206
4207 xfer_resid -= bytes_to_zero;
4208 zero_cnt1 -= bytes_to_zero;
4209 zero_off1 += bytes_to_zero;
4210 io_offset += bytes_to_zero;
4211 }
4212 if (retval == 0) {
4213 int do_zeroing = 1;
4214
4215 io_size += start_offset;
4216
4217 /* Force more restrictive zeroing behavior only on APFS */
4218 if ((vnode_tag(vp) == VT_APFS) && (newEOF < oldEOF)) {
4219 do_zeroing = 0;
4220 }
4221
4222 if (do_zeroing && (upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
4223 /*
4224 * if we're extending the file with this write
4225 * we'll zero fill the rest of the page so that
4226 * if the file gets extended again in such a way as to leave a
4227 * hole starting at this EOF, we'll have zero's in the correct spot
4228 */
4229 cluster_zero(upl, io_size, (int)(upl_size - io_size), NULL);
4230 }
4231 /*
4232 * release the upl now if we hold one since...
4233 * 1) pages in it may be present in the sparse cluster map
4234 * and may span 2 separate buckets there... if they do and
4235 * we happen to have to flush a bucket to make room and it intersects
4236 * this upl, a deadlock may result on page BUSY
4237 * 2) we're delaying the I/O... from this point forward we're just updating
4238 * the cluster state... no need to hold the pages, so commit them
4239 * 3) IO_SYNC is set...
4240 * because we had to ask for a UPL that provides currenty non-present pages, the
4241 * UPL has been automatically set to clear the dirty flags (both software and hardware)
4242 * upon committing it... this is not the behavior we want since it's possible for
4243 * pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
4244 * we'll pick these pages back up later with the correct behavior specified.
4245 * 4) we don't want to hold pages busy in a UPL and then block on the cluster lock... if a flush
4246 * of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages
4247 * we hold since the flushing context is holding the cluster lock.
4248 */
4249 ubc_upl_commit_range(upl, 0, (upl_size_t)upl_size,
4250 UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
4251 check_cluster:
4252 /*
4253 * calculate the last logical block number
4254 * that this delayed I/O encompassed
4255 */
4256 cl.e_addr = (daddr64_t)((upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64);
4257
4258 if (flags & IO_SYNC) {
4259 /*
4260 * if the IO_SYNC flag is set than we need to bypass
4261 * any clustering and immediately issue the I/O
4262 *
4263 * we don't hold the lock at this point
4264 *
4265 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
4266 * so that we correctly deal with a change in state of the hardware modify bit...
4267 * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force
4268 * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also
4269 * responsible for generating the correct sized I/O(s)
4270 */
4271 retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg, FALSE);
4272 } else {
4273 boolean_t defer_writes = FALSE;
4274
4275 if (vfs_flags(vp->v_mount) & MNT_DEFWRITE) {
4276 defer_writes = TRUE;
4277 }
4278
4279 cluster_update_state_internal(vp, &cl, flags, defer_writes, &first_pass,
4280 write_off, write_cnt, newEOF, callback, callback_arg, FALSE);
4281 }
4282 }
4283 }
4284 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, retval, 0, io_resid, 0, 0);
4285
4286 return retval;
4287 }
4288
4289
4290
4291 int
cluster_read(vnode_t vp,struct uio * uio,off_t filesize,int xflags)4292 cluster_read(vnode_t vp, struct uio *uio, off_t filesize, int xflags)
4293 {
4294 return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL);
4295 }
4296
4297
4298 int
cluster_read_ext(vnode_t vp,struct uio * uio,off_t filesize,int xflags,int (* callback)(buf_t,void *),void * callback_arg)4299 cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (*callback)(buf_t, void *), void *callback_arg)
4300 {
4301 int retval = 0;
4302 int flags;
4303 user_ssize_t cur_resid;
4304 u_int32_t io_size;
4305 u_int32_t read_length = 0;
4306 int read_type = IO_COPY;
4307 bool check_io_type;
4308
4309 flags = xflags;
4310
4311 if (vp->v_flag & VNOCACHE_DATA) {
4312 flags |= IO_NOCACHE;
4313 }
4314 if ((vp->v_flag & VRAOFF) || speculative_reads_disabled) {
4315 flags |= IO_RAOFF;
4316 }
4317
4318 if (flags & IO_SKIP_ENCRYPTION) {
4319 flags |= IO_ENCRYPTED;
4320 }
4321
4322 /*
4323 * do a read through the cache if one of the following is true....
4324 * NOCACHE is not true
4325 * the uio request doesn't target USERSPACE (unless IO_NOCACHE_SYSSPACE is also set)
4326 * Alternatively, if IO_ENCRYPTED is set, then we want to bypass the cache as well.
4327 * Reading encrypted data from a CP filesystem should never result in the data touching
4328 * the UBC.
4329 *
4330 * otherwise, find out if we want the direct or contig variant for
4331 * the first vector in the uio request
4332 */
4333 check_io_type = false;
4334 if (flags & IO_NOCACHE) {
4335 if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
4336 /*
4337 * no-cache to user-space: ok to consider IO_DIRECT.
4338 */
4339 check_io_type = true;
4340 } else if (uio->uio_segflg == UIO_SYSSPACE &&
4341 (flags & IO_NOCACHE_SYSSPACE)) {
4342 /*
4343 * no-cache to kernel-space but w/ IO_NOCACHE_SYSSPACE:
4344 * ok to consider IO_DIRECT.
4345 * The caller should make sure to target kernel buffer
4346 * that is backed by regular anonymous memory (i.e.
4347 * not backed by the kernel object or an external
4348 * memory manager like device memory or a file).
4349 */
4350 check_io_type = true;
4351 }
4352 } else if (flags & IO_ENCRYPTED) {
4353 check_io_type = true;
4354 }
4355 if (check_io_type) {
4356 retval = cluster_io_type(uio, &read_type, &read_length, 0);
4357 }
4358
4359 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < filesize && retval == 0) {
4360 switch (read_type) {
4361 case IO_COPY:
4362 /*
4363 * make sure the uio_resid isn't too big...
4364 * internally, we want to handle all of the I/O in
4365 * chunk sizes that fit in a 32 bit int
4366 */
4367 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
4368 io_size = MAX_IO_REQUEST_SIZE;
4369 } else {
4370 io_size = (u_int32_t)cur_resid;
4371 }
4372
4373 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
4374 break;
4375
4376 case IO_DIRECT:
4377 retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg);
4378 break;
4379
4380 case IO_CONTIG:
4381 retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags);
4382 break;
4383
4384 case IO_UNKNOWN:
4385 retval = cluster_io_type(uio, &read_type, &read_length, 0);
4386 break;
4387 }
4388 }
4389 return retval;
4390 }
4391
4392
4393
4394 static void
cluster_read_upl_release(upl_t upl,int start_pg,int last_pg,int take_reference)4395 cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference)
4396 {
4397 int range;
4398 int abort_flags = UPL_ABORT_FREE_ON_EMPTY;
4399
4400 if ((range = last_pg - start_pg)) {
4401 if (take_reference) {
4402 abort_flags |= UPL_ABORT_REFERENCE;
4403 }
4404
4405 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags);
4406 }
4407 }
4408
4409
4410 static int
cluster_read_copy(vnode_t vp,struct uio * uio,u_int32_t io_req_size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)4411 cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
4412 {
4413 upl_page_info_t *pl;
4414 upl_t upl = NULL;
4415 vm_offset_t upl_offset;
4416 u_int32_t upl_size;
4417 off_t upl_f_offset;
4418 int start_offset;
4419 int start_pg;
4420 int last_pg;
4421 int uio_last = 0;
4422 int pages_in_upl;
4423 off_t max_size;
4424 off_t last_ioread_offset;
4425 off_t last_request_offset;
4426 kern_return_t kret;
4427 int error = 0;
4428 int retval = 0;
4429 u_int32_t size_of_prefetch;
4430 u_int32_t xsize;
4431 u_int32_t io_size;
4432 u_int32_t max_rd_size;
4433 u_int32_t max_io_size;
4434 u_int32_t max_prefetch;
4435 u_int rd_ahead_enabled = 1;
4436 u_int prefetch_enabled = 1;
4437 struct cl_readahead * rap;
4438 struct clios iostate;
4439 struct cl_extent extent;
4440 int bflag;
4441 int take_reference = 1;
4442 int policy = IOPOL_DEFAULT;
4443 boolean_t iolock_inited = FALSE;
4444
4445 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
4446 (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0);
4447
4448 if (flags & IO_ENCRYPTED) {
4449 panic("encrypted blocks will hit UBC!");
4450 }
4451
4452 policy = throttle_get_io_policy(NULL);
4453
4454 if (policy == THROTTLE_LEVEL_TIER3 || policy == THROTTLE_LEVEL_TIER2 || (flags & IO_NOCACHE)) {
4455 take_reference = 0;
4456 }
4457
4458 if (flags & IO_PASSIVE) {
4459 bflag = CL_PASSIVE;
4460 } else {
4461 bflag = 0;
4462 }
4463
4464 if (flags & IO_NOCACHE) {
4465 bflag |= CL_NOCACHE;
4466 }
4467
4468 if (flags & IO_SKIP_ENCRYPTION) {
4469 bflag |= CL_ENCRYPTED;
4470 }
4471
4472 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
4473 max_prefetch = cluster_max_prefetch(vp, max_io_size, prefetch_max);
4474 max_rd_size = max_prefetch;
4475
4476 last_request_offset = uio->uio_offset + io_req_size;
4477
4478 if (last_request_offset > filesize) {
4479 last_request_offset = filesize;
4480 }
4481
4482 if ((flags & (IO_RAOFF | IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) {
4483 rd_ahead_enabled = 0;
4484 rap = NULL;
4485 } else {
4486 if (cluster_is_throttled(vp)) {
4487 /*
4488 * we're in the throttle window, at the very least
4489 * we want to limit the size of the I/O we're about
4490 * to issue
4491 */
4492 rd_ahead_enabled = 0;
4493 prefetch_enabled = 0;
4494
4495 max_rd_size = calculate_max_throttle_size(vp);
4496 }
4497 if ((rap = cluster_get_rap(vp)) == NULL) {
4498 rd_ahead_enabled = 0;
4499 } else {
4500 extent.b_addr = uio->uio_offset / PAGE_SIZE_64;
4501 extent.e_addr = (last_request_offset - 1) / PAGE_SIZE_64;
4502 }
4503 }
4504 if (rap != NULL && rap->cl_ralen && (rap->cl_lastr == extent.b_addr || (rap->cl_lastr + 1) == extent.b_addr)) {
4505 /*
4506 * determine if we already have a read-ahead in the pipe courtesy of the
4507 * last read systemcall that was issued...
4508 * if so, pick up it's extent to determine where we should start
4509 * with respect to any read-ahead that might be necessary to
4510 * garner all the data needed to complete this read systemcall
4511 */
4512 last_ioread_offset = (rap->cl_maxra * PAGE_SIZE_64) + PAGE_SIZE_64;
4513
4514 if (last_ioread_offset < uio->uio_offset) {
4515 last_ioread_offset = (off_t)0;
4516 } else if (last_ioread_offset > last_request_offset) {
4517 last_ioread_offset = last_request_offset;
4518 }
4519 } else {
4520 last_ioread_offset = (off_t)0;
4521 }
4522
4523 while (io_req_size && uio->uio_offset < filesize && retval == 0) {
4524 max_size = filesize - uio->uio_offset;
4525 bool leftover_upl_aborted = false;
4526
4527 if ((off_t)(io_req_size) < max_size) {
4528 io_size = io_req_size;
4529 } else {
4530 io_size = (u_int32_t)max_size;
4531 }
4532
4533 if (!(flags & IO_NOCACHE)) {
4534 while (io_size) {
4535 u_int32_t io_resid;
4536 u_int32_t io_requested;
4537
4538 /*
4539 * if we keep finding the pages we need already in the cache, then
4540 * don't bother to call cluster_read_prefetch since it costs CPU cycles
4541 * to determine that we have all the pages we need... once we miss in
4542 * the cache and have issued an I/O, than we'll assume that we're likely
4543 * to continue to miss in the cache and it's to our advantage to try and prefetch
4544 */
4545 if (last_request_offset && last_ioread_offset && (size_of_prefetch = (u_int32_t)(last_request_offset - last_ioread_offset))) {
4546 if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) {
4547 /*
4548 * we've already issued I/O for this request and
4549 * there's still work to do and
4550 * our prefetch stream is running dry, so issue a
4551 * pre-fetch I/O... the I/O latency will overlap
4552 * with the copying of the data
4553 */
4554 if (size_of_prefetch > max_rd_size) {
4555 size_of_prefetch = max_rd_size;
4556 }
4557
4558 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
4559
4560 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
4561
4562 if (last_ioread_offset > last_request_offset) {
4563 last_ioread_offset = last_request_offset;
4564 }
4565 }
4566 }
4567 /*
4568 * limit the size of the copy we're about to do so that
4569 * we can notice that our I/O pipe is running dry and
4570 * get the next I/O issued before it does go dry
4571 */
4572 if (last_ioread_offset && io_size > (max_io_size / 4)) {
4573 io_resid = (max_io_size / 4);
4574 } else {
4575 io_resid = io_size;
4576 }
4577
4578 io_requested = io_resid;
4579
4580 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, take_reference);
4581
4582 xsize = io_requested - io_resid;
4583
4584 io_size -= xsize;
4585 io_req_size -= xsize;
4586
4587 if (retval || io_resid) {
4588 /*
4589 * if we run into a real error or
4590 * a page that is not in the cache
4591 * we need to leave streaming mode
4592 */
4593 break;
4594 }
4595
4596 if (rd_ahead_enabled && (io_size == 0 || last_ioread_offset == last_request_offset)) {
4597 /*
4598 * we're already finished the I/O for this read request
4599 * let's see if we should do a read-ahead
4600 */
4601 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
4602 }
4603 }
4604 if (retval) {
4605 break;
4606 }
4607 if (io_size == 0) {
4608 if (rap != NULL) {
4609 if (extent.e_addr < rap->cl_lastr) {
4610 rap->cl_maxra = 0;
4611 }
4612 rap->cl_lastr = extent.e_addr;
4613 }
4614 break;
4615 }
4616 /*
4617 * recompute max_size since cluster_copy_ubc_data_internal
4618 * may have advanced uio->uio_offset
4619 */
4620 max_size = filesize - uio->uio_offset;
4621 }
4622
4623 iostate.io_completed = 0;
4624 iostate.io_issued = 0;
4625 iostate.io_error = 0;
4626 iostate.io_wanted = 0;
4627
4628 if ((flags & IO_RETURN_ON_THROTTLE)) {
4629 if (cluster_is_throttled(vp) == THROTTLE_NOW) {
4630 if (!cluster_io_present_in_BC(vp, uio->uio_offset)) {
4631 /*
4632 * we're in the throttle window and at least 1 I/O
4633 * has already been issued by a throttleable thread
4634 * in this window, so return with EAGAIN to indicate
4635 * to the FS issuing the cluster_read call that it
4636 * should now throttle after dropping any locks
4637 */
4638 throttle_info_update_by_mount(vp->v_mount);
4639
4640 retval = EAGAIN;
4641 break;
4642 }
4643 }
4644 }
4645
4646 /*
4647 * compute the size of the upl needed to encompass
4648 * the requested read... limit each call to cluster_io
4649 * to the maximum UPL size... cluster_io will clip if
4650 * this exceeds the maximum io_size for the device,
4651 * make sure to account for
4652 * a starting offset that's not page aligned
4653 */
4654 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
4655 upl_f_offset = uio->uio_offset - (off_t)start_offset;
4656
4657 if (io_size > max_rd_size) {
4658 io_size = max_rd_size;
4659 }
4660
4661 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4662
4663 if (flags & IO_NOCACHE) {
4664 if (upl_size > max_io_size) {
4665 upl_size = max_io_size;
4666 }
4667 } else {
4668 if (upl_size > max_io_size / 4) {
4669 upl_size = max_io_size / 4;
4670 upl_size &= ~PAGE_MASK;
4671
4672 if (upl_size == 0) {
4673 upl_size = PAGE_SIZE;
4674 }
4675 }
4676 }
4677 pages_in_upl = upl_size / PAGE_SIZE;
4678
4679 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
4680 upl, (int)upl_f_offset, upl_size, start_offset, 0);
4681
4682 kret = ubc_create_upl_kernel(vp,
4683 upl_f_offset,
4684 upl_size,
4685 &upl,
4686 &pl,
4687 UPL_FILE_IO | UPL_SET_LITE,
4688 VM_KERN_MEMORY_FILE);
4689 if (kret != KERN_SUCCESS) {
4690 panic("cluster_read_copy: failed to get pagelist");
4691 }
4692
4693 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END,
4694 upl, (int)upl_f_offset, upl_size, start_offset, 0);
4695
4696 /*
4697 * scan from the beginning of the upl looking for the first
4698 * non-valid page.... this will become the first page in
4699 * the request we're going to make to 'cluster_io'... if all
4700 * of the pages are valid, we won't call through to 'cluster_io'
4701 */
4702 for (start_pg = 0; start_pg < pages_in_upl; start_pg++) {
4703 if (!upl_valid_page(pl, start_pg)) {
4704 break;
4705 }
4706 }
4707
4708 /*
4709 * scan from the starting invalid page looking for a valid
4710 * page before the end of the upl is reached, if we
4711 * find one, then it will be the last page of the request to
4712 * 'cluster_io'
4713 */
4714 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
4715 if (upl_valid_page(pl, last_pg)) {
4716 break;
4717 }
4718 }
4719
4720 if (start_pg < last_pg) {
4721 /*
4722 * we found a range of 'invalid' pages that must be filled
4723 * if the last page in this range is the last page of the file
4724 * we may have to clip the size of it to keep from reading past
4725 * the end of the last physical block associated with the file
4726 */
4727 if (iolock_inited == FALSE) {
4728 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
4729
4730 iolock_inited = TRUE;
4731 }
4732 upl_offset = start_pg * PAGE_SIZE;
4733 io_size = (last_pg - start_pg) * PAGE_SIZE;
4734
4735 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) {
4736 io_size = (u_int32_t)(filesize - (upl_f_offset + upl_offset));
4737 }
4738
4739 /*
4740 * Find out if this needs verification, we'll have to manage the UPL
4741 * diffrently if so. Note that this call only lets us know if
4742 * verification is enabled on this mount point, the actual verification
4743 * is performed in the File system.
4744 */
4745 size_t verify_block_size = 0;
4746 if ((VNOP_VERIFY(vp, start_offset, NULL, 0, &verify_block_size, NULL, VNODE_VERIFY_DEFAULT, NULL) == 0) /* && verify_block_size */) {
4747 for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
4748 if (!upl_valid_page(pl, uio_last)) {
4749 break;
4750 }
4751 }
4752 if (uio_last < pages_in_upl) {
4753 /*
4754 * there were some invalid pages beyond the valid pages
4755 * that we didn't issue an I/O for, just release them
4756 * unchanged now, so that any prefetch/readahed can
4757 * include them
4758 */
4759 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
4760 (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
4761 leftover_upl_aborted = true;
4762 }
4763 }
4764
4765 /*
4766 * issue an asynchronous read to cluster_io
4767 */
4768
4769 error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
4770 io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
4771
4772 if (rap) {
4773 if (extent.e_addr < rap->cl_maxra) {
4774 /*
4775 * we've just issued a read for a block that should have been
4776 * in the cache courtesy of the read-ahead engine... something
4777 * has gone wrong with the pipeline, so reset the read-ahead
4778 * logic which will cause us to restart from scratch
4779 */
4780 rap->cl_maxra = 0;
4781 }
4782 }
4783 }
4784 if (error == 0) {
4785 /*
4786 * if the read completed successfully, or there was no I/O request
4787 * issued, than copy the data into user land via 'cluster_upl_copy_data'
4788 * we'll first add on any 'valid'
4789 * pages that were present in the upl when we acquired it.
4790 */
4791 u_int val_size;
4792
4793 if (!leftover_upl_aborted) {
4794 for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
4795 if (!upl_valid_page(pl, uio_last)) {
4796 break;
4797 }
4798 }
4799 if (uio_last < pages_in_upl) {
4800 /*
4801 * there were some invalid pages beyond the valid pages
4802 * that we didn't issue an I/O for, just release them
4803 * unchanged now, so that any prefetch/readahed can
4804 * include them
4805 */
4806 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
4807 (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
4808 }
4809 }
4810
4811 /*
4812 * compute size to transfer this round, if io_req_size is
4813 * still non-zero after this attempt, we'll loop around and
4814 * set up for another I/O.
4815 */
4816 val_size = (uio_last * PAGE_SIZE) - start_offset;
4817
4818 if (val_size > max_size) {
4819 val_size = (u_int)max_size;
4820 }
4821
4822 if (val_size > io_req_size) {
4823 val_size = io_req_size;
4824 }
4825
4826 if ((uio->uio_offset + val_size) > last_ioread_offset) {
4827 last_ioread_offset = uio->uio_offset + val_size;
4828 }
4829
4830 if ((size_of_prefetch = (u_int32_t)(last_request_offset - last_ioread_offset)) && prefetch_enabled) {
4831 if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) {
4832 /*
4833 * if there's still I/O left to do for this request, and...
4834 * we're not in hard throttle mode, and...
4835 * we're close to using up the previous prefetch, then issue a
4836 * new pre-fetch I/O... the I/O latency will overlap
4837 * with the copying of the data
4838 */
4839 if (size_of_prefetch > max_rd_size) {
4840 size_of_prefetch = max_rd_size;
4841 }
4842
4843 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
4844
4845 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
4846
4847 if (last_ioread_offset > last_request_offset) {
4848 last_ioread_offset = last_request_offset;
4849 }
4850 }
4851 } else if ((uio->uio_offset + val_size) == last_request_offset) {
4852 /*
4853 * this transfer will finish this request, so...
4854 * let's try to read ahead if we're in
4855 * a sequential access pattern and we haven't
4856 * explicitly disabled it
4857 */
4858 if (rd_ahead_enabled) {
4859 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
4860 }
4861
4862 if (rap != NULL) {
4863 if (extent.e_addr < rap->cl_lastr) {
4864 rap->cl_maxra = 0;
4865 }
4866 rap->cl_lastr = extent.e_addr;
4867 }
4868 }
4869 if (iolock_inited == TRUE) {
4870 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4871 }
4872
4873 if (iostate.io_error) {
4874 error = iostate.io_error;
4875 } else {
4876 u_int32_t io_requested;
4877
4878 io_requested = val_size;
4879
4880 retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested);
4881
4882 io_req_size -= (val_size - io_requested);
4883 }
4884 } else {
4885 if (iolock_inited == TRUE) {
4886 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4887 }
4888 }
4889 if (start_pg < last_pg) {
4890 /*
4891 * compute the range of pages that we actually issued an I/O for
4892 * and either commit them as valid if the I/O succeeded
4893 * or abort them if the I/O failed or we're not supposed to
4894 * keep them in the cache
4895 */
4896 io_size = (last_pg - start_pg) * PAGE_SIZE;
4897
4898 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0);
4899
4900 if (error || (flags & IO_NOCACHE)) {
4901 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
4902 UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4903 } else {
4904 int commit_flags = UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY;
4905
4906 if (take_reference) {
4907 commit_flags |= UPL_COMMIT_INACTIVATE;
4908 } else {
4909 commit_flags |= UPL_COMMIT_SPECULATE;
4910 }
4911
4912 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags);
4913 }
4914 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0);
4915 }
4916 if ((last_pg - start_pg) < pages_in_upl) {
4917 /*
4918 * the set of pages that we issued an I/O for did not encompass
4919 * the entire upl... so just release these without modifying
4920 * their state
4921 */
4922 if (error) {
4923 if (leftover_upl_aborted) {
4924 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, (uio_last - start_pg) * PAGE_SIZE,
4925 UPL_ABORT_FREE_ON_EMPTY);
4926 } else {
4927 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
4928 }
4929 } else {
4930 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
4931 upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
4932
4933 /*
4934 * handle any valid pages at the beginning of
4935 * the upl... release these appropriately
4936 */
4937 cluster_read_upl_release(upl, 0, start_pg, take_reference);
4938
4939 /*
4940 * handle any valid pages immediately after the
4941 * pages we issued I/O for... ... release these appropriately
4942 */
4943 cluster_read_upl_release(upl, last_pg, uio_last, take_reference);
4944
4945 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, -1, -1, 0, 0);
4946 }
4947 }
4948 if (retval == 0) {
4949 retval = error;
4950 }
4951
4952 if (io_req_size) {
4953 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
4954
4955 if (cluster_is_throttled(vp)) {
4956 /*
4957 * we're in the throttle window, at the very least
4958 * we want to limit the size of the I/O we're about
4959 * to issue
4960 */
4961 rd_ahead_enabled = 0;
4962 prefetch_enabled = 0;
4963 max_rd_size = max_throttle_size;
4964 } else {
4965 if (max_rd_size == max_throttle_size) {
4966 /*
4967 * coming out of throttled state
4968 */
4969 if (policy != THROTTLE_LEVEL_TIER3 && policy != THROTTLE_LEVEL_TIER2) {
4970 if (rap != NULL) {
4971 rd_ahead_enabled = 1;
4972 }
4973 prefetch_enabled = 1;
4974 }
4975 max_rd_size = max_prefetch;
4976 last_ioread_offset = 0;
4977 }
4978 }
4979 }
4980 }
4981 if (iolock_inited == TRUE) {
4982 /*
4983 * cluster_io returned an error after it
4984 * had already issued some I/O. we need
4985 * to wait for that I/O to complete before
4986 * we can destroy the iostate mutex...
4987 * 'retval' already contains the early error
4988 * so no need to pick it up from iostate.io_error
4989 */
4990 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4991
4992 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
4993 }
4994 if (rap != NULL) {
4995 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
4996 (int)uio->uio_offset, io_req_size, rap->cl_lastr, retval, 0);
4997
4998 lck_mtx_unlock(&rap->cl_lockr);
4999 } else {
5000 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
5001 (int)uio->uio_offset, io_req_size, 0, retval, 0);
5002 }
5003
5004 return retval;
5005 }
5006
5007 /*
5008 * We don't want another read/write lock for every vnode in the system
5009 * so we keep a hash of them here. There should never be very many of
5010 * these around at any point in time.
5011 */
5012 cl_direct_read_lock_t *
cluster_lock_direct_read(vnode_t vp,lck_rw_type_t type)5013 cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type)
5014 {
5015 struct cl_direct_read_locks *head
5016 = &cl_direct_read_locks[(uintptr_t)vp / sizeof(*vp)
5017 % CL_DIRECT_READ_LOCK_BUCKETS];
5018
5019 struct cl_direct_read_lock *lck, *new_lck = NULL;
5020
5021 for (;;) {
5022 lck_spin_lock(&cl_direct_read_spin_lock);
5023
5024 LIST_FOREACH(lck, head, chain) {
5025 if (lck->vp == vp) {
5026 ++lck->ref_count;
5027 lck_spin_unlock(&cl_direct_read_spin_lock);
5028 if (new_lck) {
5029 // Someone beat us to it, ditch the allocation
5030 lck_rw_destroy(&new_lck->rw_lock, &cl_mtx_grp);
5031 kfree_type(cl_direct_read_lock_t, new_lck);
5032 }
5033 lck_rw_lock(&lck->rw_lock, type);
5034 return lck;
5035 }
5036 }
5037
5038 if (new_lck) {
5039 // Use the lock we allocated
5040 LIST_INSERT_HEAD(head, new_lck, chain);
5041 lck_spin_unlock(&cl_direct_read_spin_lock);
5042 lck_rw_lock(&new_lck->rw_lock, type);
5043 return new_lck;
5044 }
5045
5046 lck_spin_unlock(&cl_direct_read_spin_lock);
5047
5048 // Allocate a new lock
5049 new_lck = kalloc_type(cl_direct_read_lock_t, Z_WAITOK);
5050 lck_rw_init(&new_lck->rw_lock, &cl_mtx_grp, LCK_ATTR_NULL);
5051 new_lck->vp = vp;
5052 new_lck->ref_count = 1;
5053
5054 // Got to go round again
5055 }
5056 }
5057
5058 void
cluster_unlock_direct_read(cl_direct_read_lock_t * lck)5059 cluster_unlock_direct_read(cl_direct_read_lock_t *lck)
5060 {
5061 lck_rw_done(&lck->rw_lock);
5062
5063 lck_spin_lock(&cl_direct_read_spin_lock);
5064 if (lck->ref_count == 1) {
5065 LIST_REMOVE(lck, chain);
5066 lck_spin_unlock(&cl_direct_read_spin_lock);
5067 lck_rw_destroy(&lck->rw_lock, &cl_mtx_grp);
5068 kfree_type(cl_direct_read_lock_t, lck);
5069 } else {
5070 --lck->ref_count;
5071 lck_spin_unlock(&cl_direct_read_spin_lock);
5072 }
5073 }
5074
5075 static int
cluster_read_direct(vnode_t vp,struct uio * uio,off_t filesize,int * read_type,u_int32_t * read_length,int flags,int (* callback)(buf_t,void *),void * callback_arg)5076 cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
5077 int flags, int (*callback)(buf_t, void *), void *callback_arg)
5078 {
5079 upl_t upl = NULL;
5080 upl_page_info_t *pl;
5081 off_t max_io_size;
5082 vm_offset_t upl_offset, vector_upl_offset = 0;
5083 upl_size_t upl_size = 0, vector_upl_size = 0;
5084 vm_size_t upl_needed_size;
5085 unsigned int pages_in_pl;
5086 upl_control_flags_t upl_flags;
5087 kern_return_t kret = KERN_SUCCESS;
5088 unsigned int i;
5089 int force_data_sync;
5090 int retval = 0;
5091 int no_zero_fill = 0;
5092 int io_flag = 0;
5093 int misaligned = 0;
5094 struct clios iostate;
5095 user_addr_t iov_base;
5096 u_int32_t io_req_size;
5097 u_int32_t offset_in_file;
5098 u_int32_t offset_in_iovbase;
5099 u_int32_t io_size;
5100 u_int32_t io_min;
5101 u_int32_t xsize;
5102 u_int32_t devblocksize;
5103 u_int32_t mem_alignment_mask;
5104 u_int32_t max_upl_size;
5105 u_int32_t max_rd_size;
5106 u_int32_t max_rd_ahead;
5107 u_int32_t max_vector_size;
5108 boolean_t io_throttled = FALSE;
5109
5110 u_int32_t vector_upl_iosize = 0;
5111 int issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1);
5112 off_t v_upl_uio_offset = 0;
5113 int vector_upl_index = 0;
5114 upl_t vector_upl = NULL;
5115 cl_direct_read_lock_t *lock = NULL;
5116
5117 assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT);
5118
5119 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
5120 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
5121
5122 max_upl_size = cluster_max_io_size(vp->v_mount, CL_READ);
5123
5124 max_rd_size = max_upl_size;
5125
5126 if (__improbable(os_mul_overflow(max_rd_size, IO_SCALE(vp, 2),
5127 &max_rd_ahead) || (max_rd_ahead > overlapping_read_max))) {
5128 max_rd_ahead = overlapping_read_max;
5129 }
5130
5131 io_flag = CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO | CL_DIRECT_IO;
5132
5133 if (flags & IO_PASSIVE) {
5134 io_flag |= CL_PASSIVE;
5135 }
5136
5137 if (flags & IO_ENCRYPTED) {
5138 io_flag |= CL_RAW_ENCRYPTED;
5139 }
5140
5141 if (flags & IO_NOCACHE) {
5142 io_flag |= CL_NOCACHE;
5143 }
5144
5145 if (flags & IO_SKIP_ENCRYPTION) {
5146 io_flag |= CL_ENCRYPTED;
5147 }
5148
5149 iostate.io_completed = 0;
5150 iostate.io_issued = 0;
5151 iostate.io_error = 0;
5152 iostate.io_wanted = 0;
5153
5154 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
5155
5156 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
5157 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
5158
5159 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
5160 (int)devblocksize, (int)mem_alignment_mask, 0, 0, 0);
5161
5162 if (devblocksize == 1) {
5163 /*
5164 * the AFP client advertises a devblocksize of 1
5165 * however, its BLOCKMAP routine maps to physical
5166 * blocks that are PAGE_SIZE in size...
5167 * therefore we can't ask for I/Os that aren't page aligned
5168 * or aren't multiples of PAGE_SIZE in size
5169 * by setting devblocksize to PAGE_SIZE, we re-instate
5170 * the old behavior we had before the mem_alignment_mask
5171 * changes went in...
5172 */
5173 devblocksize = PAGE_SIZE;
5174 }
5175
5176 /*
5177 * We are going to need this uio for the prefaulting later
5178 * especially for the cases where multiple non-contiguous
5179 * iovs are passed into this routine.
5180 *
5181 * Note that we only want to prefault for direct IOs to userspace buffers,
5182 * not kernel buffers.
5183 */
5184 uio_t uio_acct = NULL;
5185 if (uio->uio_segflg != UIO_SYSSPACE) {
5186 uio_acct = uio_duplicate(uio);
5187 }
5188
5189 next_dread:
5190 io_req_size = *read_length;
5191 iov_base = uio_curriovbase(uio);
5192
5193 offset_in_file = (u_int32_t)uio->uio_offset & (devblocksize - 1);
5194 offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
5195
5196 if (vm_map_page_mask(current_map()) < PAGE_MASK) {
5197 /*
5198 * XXX TODO4K
5199 * Direct I/O might not work as expected from a 16k kernel space
5200 * to a 4k user space because each 4k chunk might point to
5201 * a different 16k physical page...
5202 * Let's go the "misaligned" way.
5203 */
5204 if (!misaligned) {
5205 DEBUG4K_VFS("forcing misaligned\n");
5206 }
5207 misaligned = 1;
5208 }
5209
5210 if (offset_in_file || offset_in_iovbase) {
5211 /*
5212 * one of the 2 important offsets is misaligned
5213 * so fire an I/O through the cache for this entire vector
5214 */
5215 misaligned = 1;
5216 }
5217 if (iov_base & (devblocksize - 1)) {
5218 /*
5219 * the offset in memory must be on a device block boundary
5220 * so that we can guarantee that we can generate an
5221 * I/O that ends on a page boundary in cluster_io
5222 */
5223 misaligned = 1;
5224 }
5225
5226 max_io_size = filesize - uio->uio_offset;
5227
5228 /*
5229 * The user must request IO in aligned chunks. If the
5230 * offset into the file is bad, or the userland pointer
5231 * is non-aligned, then we cannot service the encrypted IO request.
5232 */
5233 if (flags & IO_ENCRYPTED) {
5234 if (misaligned || (io_req_size & (devblocksize - 1))) {
5235 retval = EINVAL;
5236 }
5237
5238 max_io_size = roundup(max_io_size, devblocksize);
5239 }
5240
5241 if ((off_t)io_req_size > max_io_size) {
5242 io_req_size = (u_int32_t)max_io_size;
5243 }
5244
5245 /*
5246 * When we get to this point, we know...
5247 * -- the offset into the file is on a devblocksize boundary
5248 */
5249
5250 while (io_req_size && retval == 0) {
5251 u_int32_t io_start;
5252
5253 if (cluster_is_throttled(vp)) {
5254 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
5255
5256 /*
5257 * we're in the throttle window, at the very least
5258 * we want to limit the size of the I/O we're about
5259 * to issue
5260 */
5261 max_rd_size = max_throttle_size;
5262 max_rd_ahead = max_throttle_size - 1;
5263 max_vector_size = max_throttle_size;
5264 } else {
5265 max_rd_size = max_upl_size;
5266 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
5267 max_vector_size = MAX_VECTOR_UPL_SIZE;
5268 }
5269 io_start = io_size = io_req_size;
5270
5271 /*
5272 * First look for pages already in the cache
5273 * and move them to user space. But only do this
5274 * check if we are not retrieving encrypted data directly
5275 * from the filesystem; those blocks should never
5276 * be in the UBC.
5277 *
5278 * cluster_copy_ubc_data returns the resid
5279 * in io_size
5280 */
5281 if ((flags & IO_ENCRYPTED) == 0) {
5282 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
5283 }
5284 /*
5285 * calculate the number of bytes actually copied
5286 * starting size - residual
5287 */
5288 xsize = io_start - io_size;
5289
5290 io_req_size -= xsize;
5291
5292 if (useVectorUPL && (xsize || (iov_base & PAGE_MASK))) {
5293 /*
5294 * We found something in the cache or we have an iov_base that's not
5295 * page-aligned.
5296 *
5297 * Issue all I/O's that have been collected within this Vectored UPL.
5298 */
5299 if (vector_upl_index) {
5300 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5301 reset_vector_run_state();
5302 }
5303
5304 if (xsize) {
5305 useVectorUPL = 0;
5306 }
5307
5308 /*
5309 * After this point, if we are using the Vector UPL path and the base is
5310 * not page-aligned then the UPL with that base will be the first in the vector UPL.
5311 */
5312 }
5313
5314 /*
5315 * check to see if we are finished with this request.
5316 *
5317 * If we satisfied this IO already, then io_req_size will be 0.
5318 * Otherwise, see if the IO was mis-aligned and needs to go through
5319 * the UBC to deal with the 'tail'.
5320 *
5321 */
5322 if (io_req_size == 0 || (misaligned)) {
5323 /*
5324 * see if there's another uio vector to
5325 * process that's of type IO_DIRECT
5326 *
5327 * break out of while loop to get there
5328 */
5329 break;
5330 }
5331 /*
5332 * assume the request ends on a device block boundary
5333 */
5334 io_min = devblocksize;
5335
5336 /*
5337 * we can handle I/O's in multiples of the device block size
5338 * however, if io_size isn't a multiple of devblocksize we
5339 * want to clip it back to the nearest page boundary since
5340 * we are going to have to go through cluster_read_copy to
5341 * deal with the 'overhang'... by clipping it to a PAGE_SIZE
5342 * multiple, we avoid asking the drive for the same physical
5343 * blocks twice.. once for the partial page at the end of the
5344 * request and a 2nd time for the page we read into the cache
5345 * (which overlaps the end of the direct read) in order to
5346 * get at the overhang bytes
5347 */
5348 if (io_size & (devblocksize - 1)) {
5349 assert(!(flags & IO_ENCRYPTED));
5350 /*
5351 * Clip the request to the previous page size boundary
5352 * since request does NOT end on a device block boundary
5353 */
5354 io_size &= ~PAGE_MASK;
5355 io_min = PAGE_SIZE;
5356 }
5357 if (retval || io_size < io_min) {
5358 /*
5359 * either an error or we only have the tail left to
5360 * complete via the copy path...
5361 * we may have already spun some portion of this request
5362 * off as async requests... we need to wait for the I/O
5363 * to complete before returning
5364 */
5365 goto wait_for_dreads;
5366 }
5367
5368 /*
5369 * Don't re-check the UBC data if we are looking for uncached IO
5370 * or asking for encrypted blocks.
5371 */
5372 if ((flags & IO_ENCRYPTED) == 0) {
5373 if ((xsize = io_size) > max_rd_size) {
5374 xsize = max_rd_size;
5375 }
5376
5377 io_size = 0;
5378
5379 if (!lock) {
5380 /*
5381 * We hold a lock here between the time we check the
5382 * cache and the time we issue I/O. This saves us
5383 * from having to lock the pages in the cache. Not
5384 * all clients will care about this lock but some
5385 * clients may want to guarantee stability between
5386 * here and when the I/O is issued in which case they
5387 * will take the lock exclusively.
5388 */
5389 lock = cluster_lock_direct_read(vp, LCK_RW_TYPE_SHARED);
5390 }
5391
5392 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
5393
5394 if (io_size == 0) {
5395 /*
5396 * a page must have just come into the cache
5397 * since the first page in this range is no
5398 * longer absent, go back and re-evaluate
5399 */
5400 continue;
5401 }
5402 }
5403 if ((flags & IO_RETURN_ON_THROTTLE)) {
5404 if (cluster_is_throttled(vp) == THROTTLE_NOW) {
5405 if (!cluster_io_present_in_BC(vp, uio->uio_offset)) {
5406 /*
5407 * we're in the throttle window and at least 1 I/O
5408 * has already been issued by a throttleable thread
5409 * in this window, so return with EAGAIN to indicate
5410 * to the FS issuing the cluster_read call that it
5411 * should now throttle after dropping any locks
5412 */
5413 throttle_info_update_by_mount(vp->v_mount);
5414
5415 io_throttled = TRUE;
5416 goto wait_for_dreads;
5417 }
5418 }
5419 }
5420 if (io_size > max_rd_size) {
5421 io_size = max_rd_size;
5422 }
5423
5424 iov_base = uio_curriovbase(uio);
5425
5426 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
5427 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
5428
5429 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
5430 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
5431
5432 if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0)) {
5433 no_zero_fill = 1;
5434 } else {
5435 no_zero_fill = 0;
5436 }
5437
5438 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
5439 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
5440 pages_in_pl = 0;
5441 upl_size = (upl_size_t)upl_needed_size;
5442 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
5443 if (no_zero_fill) {
5444 upl_flags |= UPL_NOZEROFILL;
5445 }
5446 if (force_data_sync) {
5447 upl_flags |= UPL_FORCE_DATA_SYNC;
5448 }
5449
5450 kret = vm_map_create_upl(map,
5451 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
5452 &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE);
5453
5454 if (kret != KERN_SUCCESS) {
5455 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5456 (int)upl_offset, upl_size, io_size, kret, 0);
5457 /*
5458 * failed to get pagelist
5459 *
5460 * we may have already spun some portion of this request
5461 * off as async requests... we need to wait for the I/O
5462 * to complete before returning
5463 */
5464 goto wait_for_dreads;
5465 }
5466 pages_in_pl = upl_size / PAGE_SIZE;
5467 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
5468
5469 for (i = 0; i < pages_in_pl; i++) {
5470 if (!upl_page_present(pl, i)) {
5471 break;
5472 }
5473 }
5474 if (i == pages_in_pl) {
5475 break;
5476 }
5477
5478 ubc_upl_abort(upl, 0);
5479 }
5480 if (force_data_sync >= 3) {
5481 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5482 (int)upl_offset, upl_size, io_size, kret, 0);
5483
5484 goto wait_for_dreads;
5485 }
5486 /*
5487 * Consider the possibility that upl_size wasn't satisfied.
5488 */
5489 if (upl_size < upl_needed_size) {
5490 if (upl_size && upl_offset == 0) {
5491 io_size = upl_size;
5492 } else {
5493 io_size = 0;
5494 }
5495 }
5496 if (io_size == 0) {
5497 ubc_upl_abort(upl, 0);
5498 goto wait_for_dreads;
5499 }
5500 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5501 (int)upl_offset, upl_size, io_size, kret, 0);
5502
5503 if (useVectorUPL) {
5504 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
5505 if (end_off) {
5506 issueVectorUPL = 1;
5507 }
5508 /*
5509 * After this point, if we are using a vector UPL, then
5510 * either all the UPL elements end on a page boundary OR
5511 * this UPL is the last element because it does not end
5512 * on a page boundary.
5513 */
5514 }
5515
5516 /*
5517 * request asynchronously so that we can overlap
5518 * the preparation of the next I/O
5519 * if there are already too many outstanding reads
5520 * wait until some have completed before issuing the next read
5521 */
5522 cluster_iostate_wait(&iostate, max_rd_ahead, "cluster_read_direct");
5523
5524 if (iostate.io_error) {
5525 /*
5526 * one of the earlier reads we issued ran into a hard error
5527 * don't issue any more reads, cleanup the UPL
5528 * that was just created but not used, then
5529 * go wait for any other reads to complete before
5530 * returning the error to the caller
5531 */
5532 ubc_upl_abort(upl, 0);
5533
5534 goto wait_for_dreads;
5535 }
5536 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
5537 upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
5538
5539 if (!useVectorUPL) {
5540 if (no_zero_fill) {
5541 io_flag &= ~CL_PRESERVE;
5542 } else {
5543 io_flag |= CL_PRESERVE;
5544 }
5545
5546 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5547 } else {
5548 if (!vector_upl_index) {
5549 vector_upl = vector_upl_create(upl_offset, uio->uio_iovcnt);
5550 v_upl_uio_offset = uio->uio_offset;
5551 vector_upl_offset = upl_offset;
5552 }
5553
5554 vector_upl_set_subupl(vector_upl, upl, upl_size);
5555 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
5556 vector_upl_index++;
5557 vector_upl_size += upl_size;
5558 vector_upl_iosize += io_size;
5559
5560 if (issueVectorUPL || vector_upl_index == vector_upl_max_upls(vector_upl) || vector_upl_size >= max_vector_size) {
5561 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5562 reset_vector_run_state();
5563 }
5564 }
5565
5566 if (lock) {
5567 // We don't need to wait for the I/O to complete
5568 cluster_unlock_direct_read(lock);
5569 lock = NULL;
5570 }
5571
5572 /*
5573 * update the uio structure
5574 */
5575 if ((flags & IO_ENCRYPTED) && (max_io_size < io_size)) {
5576 uio_update(uio, (user_size_t)max_io_size);
5577 } else {
5578 uio_update(uio, (user_size_t)io_size);
5579 }
5580
5581 io_req_size -= io_size;
5582
5583 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
5584 upl, (int)uio->uio_offset, io_req_size, retval, 0);
5585 } /* end while */
5586
5587 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0 && uio->uio_offset < filesize) {
5588 retval = cluster_io_type(uio, read_type, read_length, 0);
5589
5590 if (retval == 0 && *read_type == IO_DIRECT) {
5591 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
5592 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
5593
5594 goto next_dread;
5595 }
5596 }
5597
5598 wait_for_dreads:
5599
5600 if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
5601 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5602 reset_vector_run_state();
5603 }
5604
5605 // We don't need to wait for the I/O to complete
5606 if (lock) {
5607 cluster_unlock_direct_read(lock);
5608 }
5609
5610 /*
5611 * make sure all async reads that are part of this stream
5612 * have completed before we return
5613 */
5614 cluster_iostate_wait(&iostate, 0, "cluster_read_direct");
5615
5616 if (iostate.io_error) {
5617 retval = iostate.io_error;
5618 }
5619
5620 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
5621
5622 if (io_throttled == TRUE && retval == 0) {
5623 retval = EAGAIN;
5624 }
5625
5626 vm_map_offset_t current_page_size, current_page_mask;
5627 current_page_size = vm_map_page_size(current_map());
5628 current_page_mask = vm_map_page_mask(current_map());
5629 if (uio_acct) {
5630 assert(uio_acct->uio_segflg != UIO_SYSSPACE);
5631 off_t bytes_to_prefault = 0, bytes_prefaulted = 0;
5632 user_addr_t curr_iov_base = 0;
5633 user_addr_t curr_iov_end = 0;
5634 user_size_t curr_iov_len = 0;
5635
5636 bytes_to_prefault = uio_offset(uio) - uio_offset(uio_acct);
5637
5638 for (; bytes_prefaulted < bytes_to_prefault;) {
5639 curr_iov_base = uio_curriovbase(uio_acct);
5640 curr_iov_len = MIN(uio_curriovlen(uio_acct), bytes_to_prefault - bytes_prefaulted);
5641 curr_iov_end = curr_iov_base + curr_iov_len;
5642
5643 for (; curr_iov_base < curr_iov_end;) {
5644 /*
5645 * This is specifically done for pmap accounting purposes.
5646 * vm_pre_fault() will call vm_fault() to enter the page into
5647 * the pmap if there isn't _a_ physical page for that VA already.
5648 */
5649 vm_pre_fault(vm_map_trunc_page(curr_iov_base, current_page_mask), VM_PROT_READ);
5650 curr_iov_base += current_page_size;
5651 bytes_prefaulted += current_page_size;
5652 }
5653 /*
5654 * Use update instead of advance so we can see how many iovs we processed.
5655 */
5656 uio_update(uio_acct, curr_iov_len);
5657 }
5658 uio_free(uio_acct);
5659 uio_acct = NULL;
5660 }
5661
5662 if (io_req_size && retval == 0) {
5663 /*
5664 * we couldn't handle the tail of this request in DIRECT mode
5665 * so fire it through the copy path
5666 */
5667 if (flags & IO_ENCRYPTED) {
5668 /*
5669 * We cannot fall back to the copy path for encrypted I/O. If this
5670 * happens, there is something wrong with the user buffer passed
5671 * down.
5672 */
5673 retval = EFAULT;
5674 } else {
5675 retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg);
5676 }
5677
5678 *read_type = IO_UNKNOWN;
5679 }
5680 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
5681 (int)uio->uio_offset, (int)uio_resid(uio), io_req_size, retval, 0);
5682
5683 return retval;
5684 }
5685
5686
5687 static int
cluster_read_contig(vnode_t vp,struct uio * uio,off_t filesize,int * read_type,u_int32_t * read_length,int (* callback)(buf_t,void *),void * callback_arg,int flags)5688 cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
5689 int (*callback)(buf_t, void *), void *callback_arg, int flags)
5690 {
5691 upl_page_info_t *pl;
5692 upl_t upl[MAX_VECTS];
5693 vm_offset_t upl_offset;
5694 addr64_t dst_paddr = 0;
5695 user_addr_t iov_base;
5696 off_t max_size;
5697 upl_size_t upl_size;
5698 vm_size_t upl_needed_size;
5699 mach_msg_type_number_t pages_in_pl;
5700 upl_control_flags_t upl_flags;
5701 kern_return_t kret;
5702 struct clios iostate;
5703 int error = 0;
5704 int cur_upl = 0;
5705 int num_upl = 0;
5706 int n;
5707 u_int32_t xsize;
5708 u_int32_t io_size;
5709 u_int32_t devblocksize;
5710 u_int32_t mem_alignment_mask;
5711 u_int32_t tail_size = 0;
5712 int bflag;
5713
5714 if (flags & IO_PASSIVE) {
5715 bflag = CL_PASSIVE;
5716 } else {
5717 bflag = 0;
5718 }
5719
5720 if (flags & IO_NOCACHE) {
5721 bflag |= CL_NOCACHE;
5722 }
5723
5724 /*
5725 * When we enter this routine, we know
5726 * -- the read_length will not exceed the current iov_len
5727 * -- the target address is physically contiguous for read_length
5728 */
5729 cluster_syncup(vp, filesize, callback, callback_arg, PUSH_SYNC);
5730
5731 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
5732 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
5733
5734 iostate.io_completed = 0;
5735 iostate.io_issued = 0;
5736 iostate.io_error = 0;
5737 iostate.io_wanted = 0;
5738
5739 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
5740
5741 next_cread:
5742 io_size = *read_length;
5743
5744 max_size = filesize - uio->uio_offset;
5745
5746 if (io_size > max_size) {
5747 io_size = (u_int32_t)max_size;
5748 }
5749
5750 iov_base = uio_curriovbase(uio);
5751
5752 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
5753 upl_needed_size = upl_offset + io_size;
5754
5755 pages_in_pl = 0;
5756 upl_size = (upl_size_t)upl_needed_size;
5757 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
5758
5759
5760 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_START,
5761 (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
5762
5763 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
5764 kret = vm_map_get_upl(map,
5765 vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
5766 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0);
5767
5768 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END,
5769 (int)upl_offset, upl_size, io_size, kret, 0);
5770
5771 if (kret != KERN_SUCCESS) {
5772 /*
5773 * failed to get pagelist
5774 */
5775 error = EINVAL;
5776 goto wait_for_creads;
5777 }
5778 num_upl++;
5779
5780 if (upl_size < upl_needed_size) {
5781 /*
5782 * The upl_size wasn't satisfied.
5783 */
5784 error = EINVAL;
5785 goto wait_for_creads;
5786 }
5787 pl = ubc_upl_pageinfo(upl[cur_upl]);
5788
5789 dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
5790
5791 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
5792 u_int32_t head_size;
5793
5794 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
5795
5796 if (head_size > io_size) {
5797 head_size = io_size;
5798 }
5799
5800 error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, CL_READ, callback, callback_arg);
5801
5802 if (error) {
5803 goto wait_for_creads;
5804 }
5805
5806 upl_offset += head_size;
5807 dst_paddr += head_size;
5808 io_size -= head_size;
5809
5810 iov_base += head_size;
5811 }
5812 if ((u_int32_t)iov_base & mem_alignment_mask) {
5813 /*
5814 * request doesn't set up on a memory boundary
5815 * the underlying DMA engine can handle...
5816 * return an error instead of going through
5817 * the slow copy path since the intent of this
5818 * path is direct I/O to device memory
5819 */
5820 error = EINVAL;
5821 goto wait_for_creads;
5822 }
5823
5824 tail_size = io_size & (devblocksize - 1);
5825
5826 io_size -= tail_size;
5827
5828 while (io_size && error == 0) {
5829 if (io_size > MAX_IO_CONTIG_SIZE) {
5830 xsize = MAX_IO_CONTIG_SIZE;
5831 } else {
5832 xsize = io_size;
5833 }
5834 /*
5835 * request asynchronously so that we can overlap
5836 * the preparation of the next I/O... we'll do
5837 * the commit after all the I/O has completed
5838 * since its all issued against the same UPL
5839 * if there are already too many outstanding reads
5840 * wait until some have completed before issuing the next
5841 */
5842 cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_read_contig");
5843
5844 if (iostate.io_error) {
5845 /*
5846 * one of the earlier reads we issued ran into a hard error
5847 * don't issue any more reads...
5848 * go wait for any other reads to complete before
5849 * returning the error to the caller
5850 */
5851 goto wait_for_creads;
5852 }
5853 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize,
5854 CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC | bflag,
5855 (buf_t)NULL, &iostate, callback, callback_arg);
5856 /*
5857 * The cluster_io read was issued successfully,
5858 * update the uio structure
5859 */
5860 if (error == 0) {
5861 uio_update(uio, (user_size_t)xsize);
5862
5863 dst_paddr += xsize;
5864 upl_offset += xsize;
5865 io_size -= xsize;
5866 }
5867 }
5868 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS && uio->uio_offset < filesize) {
5869 error = cluster_io_type(uio, read_type, read_length, 0);
5870
5871 if (error == 0 && *read_type == IO_CONTIG) {
5872 cur_upl++;
5873 goto next_cread;
5874 }
5875 } else {
5876 *read_type = IO_UNKNOWN;
5877 }
5878
5879 wait_for_creads:
5880 /*
5881 * make sure all async reads that are part of this stream
5882 * have completed before we proceed
5883 */
5884 cluster_iostate_wait(&iostate, 0, "cluster_read_contig");
5885
5886 if (iostate.io_error) {
5887 error = iostate.io_error;
5888 }
5889
5890 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
5891
5892 if (error == 0 && tail_size) {
5893 error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg);
5894 }
5895
5896 for (n = 0; n < num_upl; n++) {
5897 /*
5898 * just release our hold on each physically contiguous
5899 * region without changing any state
5900 */
5901 ubc_upl_abort(upl[n], 0);
5902 }
5903
5904 return error;
5905 }
5906
5907
5908 static int
cluster_io_type(struct uio * uio,int * io_type,u_int32_t * io_length,u_int32_t min_length)5909 cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length)
5910 {
5911 user_size_t iov_len;
5912 user_addr_t iov_base = 0;
5913 upl_t upl;
5914 upl_size_t upl_size;
5915 upl_control_flags_t upl_flags;
5916 int retval = 0;
5917
5918 /*
5919 * skip over any emtpy vectors
5920 */
5921 uio_update(uio, (user_size_t)0);
5922
5923 iov_len = uio_curriovlen(uio);
5924
5925 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_START, uio, (int)iov_len, 0, 0, 0);
5926
5927 if (iov_len) {
5928 iov_base = uio_curriovbase(uio);
5929 /*
5930 * make sure the size of the vector isn't too big...
5931 * internally, we want to handle all of the I/O in
5932 * chunk sizes that fit in a 32 bit int
5933 */
5934 if (iov_len > (user_size_t)MAX_IO_REQUEST_SIZE) {
5935 upl_size = MAX_IO_REQUEST_SIZE;
5936 } else {
5937 upl_size = (u_int32_t)iov_len;
5938 }
5939
5940 upl_flags = UPL_QUERY_OBJECT_TYPE;
5941
5942 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
5943 if ((vm_map_get_upl(map,
5944 vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
5945 &upl_size, &upl, NULL, NULL, &upl_flags, VM_KERN_MEMORY_FILE, 0)) != KERN_SUCCESS) {
5946 /*
5947 * the user app must have passed in an invalid address
5948 */
5949 retval = EFAULT;
5950 }
5951 if (upl_size == 0) {
5952 retval = EFAULT;
5953 }
5954
5955 *io_length = upl_size;
5956
5957 if (upl_flags & UPL_PHYS_CONTIG) {
5958 *io_type = IO_CONTIG;
5959 } else if (iov_len >= min_length) {
5960 *io_type = IO_DIRECT;
5961 } else {
5962 *io_type = IO_COPY;
5963 }
5964 } else {
5965 /*
5966 * nothing left to do for this uio
5967 */
5968 *io_length = 0;
5969 *io_type = IO_UNKNOWN;
5970 }
5971 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_END, iov_base, *io_type, *io_length, retval, 0);
5972
5973 if (*io_type == IO_DIRECT &&
5974 vm_map_page_shift(current_map()) < PAGE_SHIFT) {
5975 /* no direct I/O for sub-page-size address spaces */
5976 DEBUG4K_VFS("io_type IO_DIRECT -> IO_COPY\n");
5977 *io_type = IO_COPY;
5978 }
5979
5980 return retval;
5981 }
5982
5983
5984 /*
5985 * generate advisory I/O's in the largest chunks possible
5986 * the completed pages will be released into the VM cache
5987 */
5988 int
advisory_read(vnode_t vp,off_t filesize,off_t f_offset,int resid)5989 advisory_read(vnode_t vp, off_t filesize, off_t f_offset, int resid)
5990 {
5991 return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE);
5992 }
5993
5994 int
advisory_read_ext(vnode_t vp,off_t filesize,off_t f_offset,int resid,int (* callback)(buf_t,void *),void * callback_arg,int bflag)5995 advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
5996 {
5997 upl_page_info_t *pl;
5998 upl_t upl = NULL;
5999 vm_offset_t upl_offset;
6000 int upl_size;
6001 off_t upl_f_offset;
6002 int start_offset;
6003 int start_pg;
6004 int last_pg;
6005 int pages_in_upl;
6006 off_t max_size;
6007 int io_size;
6008 kern_return_t kret;
6009 int retval = 0;
6010 int issued_io;
6011 int skip_range;
6012 uint32_t max_io_size;
6013
6014
6015 if (!UBCINFOEXISTS(vp)) {
6016 return EINVAL;
6017 }
6018
6019 if (f_offset < 0 || resid < 0) {
6020 return EINVAL;
6021 }
6022
6023 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
6024
6025 if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
6026 if (max_io_size > speculative_prefetch_max_iosize) {
6027 max_io_size = speculative_prefetch_max_iosize;
6028 }
6029 }
6030
6031 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
6032 (int)f_offset, resid, (int)filesize, 0, 0);
6033
6034 while (resid && f_offset < filesize && retval == 0) {
6035 /*
6036 * compute the size of the upl needed to encompass
6037 * the requested read... limit each call to cluster_io
6038 * to the maximum UPL size... cluster_io will clip if
6039 * this exceeds the maximum io_size for the device,
6040 * make sure to account for
6041 * a starting offset that's not page aligned
6042 */
6043 start_offset = (int)(f_offset & PAGE_MASK_64);
6044 upl_f_offset = f_offset - (off_t)start_offset;
6045 max_size = filesize - f_offset;
6046
6047 if (resid < max_size) {
6048 io_size = resid;
6049 } else {
6050 io_size = (int)max_size;
6051 }
6052
6053 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
6054 if ((uint32_t)upl_size > max_io_size) {
6055 upl_size = max_io_size;
6056 }
6057
6058 skip_range = 0;
6059 /*
6060 * return the number of contiguously present pages in the cache
6061 * starting at upl_f_offset within the file
6062 */
6063 ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range);
6064
6065 if (skip_range) {
6066 /*
6067 * skip over pages already present in the cache
6068 */
6069 io_size = skip_range - start_offset;
6070
6071 f_offset += io_size;
6072 resid -= io_size;
6073
6074 if (skip_range == upl_size) {
6075 continue;
6076 }
6077 /*
6078 * have to issue some real I/O
6079 * at this point, we know it's starting on a page boundary
6080 * because we've skipped over at least the first page in the request
6081 */
6082 start_offset = 0;
6083 upl_f_offset += skip_range;
6084 upl_size -= skip_range;
6085 }
6086 pages_in_upl = upl_size / PAGE_SIZE;
6087
6088 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START,
6089 upl, (int)upl_f_offset, upl_size, start_offset, 0);
6090
6091 kret = ubc_create_upl_kernel(vp,
6092 upl_f_offset,
6093 upl_size,
6094 &upl,
6095 &pl,
6096 UPL_RET_ONLY_ABSENT | UPL_SET_LITE,
6097 VM_KERN_MEMORY_FILE);
6098 if (kret != KERN_SUCCESS) {
6099 return retval;
6100 }
6101 issued_io = 0;
6102
6103 /*
6104 * before we start marching forward, we must make sure we end on
6105 * a present page, otherwise we will be working with a freed
6106 * upl
6107 */
6108 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
6109 if (upl_page_present(pl, last_pg)) {
6110 break;
6111 }
6112 }
6113 pages_in_upl = last_pg + 1;
6114
6115
6116 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END,
6117 upl, (int)upl_f_offset, upl_size, start_offset, 0);
6118
6119
6120 for (last_pg = 0; last_pg < pages_in_upl;) {
6121 /*
6122 * scan from the beginning of the upl looking for the first
6123 * page that is present.... this will become the first page in
6124 * the request we're going to make to 'cluster_io'... if all
6125 * of the pages are absent, we won't call through to 'cluster_io'
6126 */
6127 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
6128 if (upl_page_present(pl, start_pg)) {
6129 break;
6130 }
6131 }
6132
6133 /*
6134 * scan from the starting present page looking for an absent
6135 * page before the end of the upl is reached, if we
6136 * find one, then it will terminate the range of pages being
6137 * presented to 'cluster_io'
6138 */
6139 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
6140 if (!upl_page_present(pl, last_pg)) {
6141 break;
6142 }
6143 }
6144
6145 if (last_pg > start_pg) {
6146 /*
6147 * we found a range of pages that must be filled
6148 * if the last page in this range is the last page of the file
6149 * we may have to clip the size of it to keep from reading past
6150 * the end of the last physical block associated with the file
6151 */
6152 upl_offset = start_pg * PAGE_SIZE;
6153 io_size = (last_pg - start_pg) * PAGE_SIZE;
6154
6155 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) {
6156 io_size = (int)(filesize - (upl_f_offset + upl_offset));
6157 }
6158
6159 /*
6160 * issue an asynchronous read to cluster_io
6161 */
6162 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
6163 CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
6164
6165 issued_io = 1;
6166 }
6167 }
6168 if (issued_io == 0) {
6169 ubc_upl_abort(upl, 0);
6170 }
6171
6172 io_size = upl_size - start_offset;
6173
6174 if (io_size > resid) {
6175 io_size = resid;
6176 }
6177 f_offset += io_size;
6178 resid -= io_size;
6179 }
6180
6181 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END,
6182 (int)f_offset, resid, retval, 0, 0);
6183
6184 return retval;
6185 }
6186
6187
6188 int
cluster_push(vnode_t vp,int flags)6189 cluster_push(vnode_t vp, int flags)
6190 {
6191 return cluster_push_ext(vp, flags, NULL, NULL);
6192 }
6193
6194
6195 int
cluster_push_ext(vnode_t vp,int flags,int (* callback)(buf_t,void *),void * callback_arg)6196 cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg)
6197 {
6198 return cluster_push_err(vp, flags, callback, callback_arg, NULL);
6199 }
6200
6201 /* write errors via err, but return the number of clusters written */
6202 extern uint32_t system_inshutdown;
6203 uint32_t cl_sparse_push_error = 0;
6204 int
cluster_push_err(vnode_t vp,int flags,int (* callback)(buf_t,void *),void * callback_arg,int * err)6205 cluster_push_err(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg, int *err)
6206 {
6207 int retval;
6208 int my_sparse_wait = 0;
6209 struct cl_writebehind *wbp;
6210 int local_err = 0;
6211
6212 if (err) {
6213 *err = 0;
6214 }
6215
6216 if (!UBCINFOEXISTS(vp)) {
6217 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -1, 0);
6218 return 0;
6219 }
6220 /* return if deferred write is set */
6221 if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) {
6222 return 0;
6223 }
6224 if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) {
6225 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -2, 0);
6226 return 0;
6227 }
6228 if (!ISSET(flags, IO_SYNC) && wbp->cl_number == 0 && wbp->cl_scmap == NULL) {
6229 lck_mtx_unlock(&wbp->cl_lockw);
6230
6231 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -3, 0);
6232 return 0;
6233 }
6234 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START,
6235 wbp->cl_scmap, wbp->cl_number, flags, 0, 0);
6236
6237 /*
6238 * if we have an fsync in progress, we don't want to allow any additional
6239 * sync/fsync/close(s) to occur until it finishes.
6240 * note that its possible for writes to continue to occur to this file
6241 * while we're waiting and also once the fsync starts to clean if we're
6242 * in the sparse map case
6243 */
6244 while (wbp->cl_sparse_wait) {
6245 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, kdebug_vnode(vp), 0, 0, 0, 0);
6246
6247 msleep((caddr_t)&wbp->cl_sparse_wait, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
6248
6249 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, kdebug_vnode(vp), 0, 0, 0, 0);
6250 }
6251 if (flags & IO_SYNC) {
6252 my_sparse_wait = 1;
6253 wbp->cl_sparse_wait = 1;
6254
6255 /*
6256 * this is an fsync (or equivalent)... we must wait for any existing async
6257 * cleaning operations to complete before we evaulate the current state
6258 * and finish cleaning... this insures that all writes issued before this
6259 * fsync actually get cleaned to the disk before this fsync returns
6260 */
6261 while (wbp->cl_sparse_pushes) {
6262 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_START, kdebug_vnode(vp), 0, 0, 0, 0);
6263
6264 msleep((caddr_t)&wbp->cl_sparse_pushes, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
6265
6266 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_END, kdebug_vnode(vp), 0, 0, 0, 0);
6267 }
6268 }
6269 if (wbp->cl_scmap) {
6270 void *scmap;
6271
6272 if (wbp->cl_sparse_pushes < SPARSE_PUSH_LIMIT) {
6273 scmap = wbp->cl_scmap;
6274 wbp->cl_scmap = NULL;
6275
6276 wbp->cl_sparse_pushes++;
6277
6278 lck_mtx_unlock(&wbp->cl_lockw);
6279
6280 retval = sparse_cluster_push(wbp, &scmap, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE);
6281
6282 lck_mtx_lock(&wbp->cl_lockw);
6283
6284 wbp->cl_sparse_pushes--;
6285
6286 if (retval) {
6287 if (wbp->cl_scmap != NULL) {
6288 /*
6289 * panic("cluster_push_err: Expected NULL cl_scmap\n");
6290 *
6291 * This can happen if we get an error from the underlying FS
6292 * e.g. ENOSPC, EPERM or EIO etc. We hope that these errors
6293 * are transient and the I/Os will succeed at a later point.
6294 *
6295 * The tricky part here is that a new sparse cluster has been
6296 * allocated and tracking a different set of dirty pages. So these
6297 * pages are not going to be pushed out with the next sparse_cluster_push.
6298 * An explicit msync or file close will, however, push the pages out.
6299 *
6300 * What if those calls still don't work? And so, during shutdown we keep
6301 * trying till we succeed...
6302 */
6303
6304 if (system_inshutdown) {
6305 if ((retval == ENOSPC) && (vp->v_mount->mnt_flag & (MNT_LOCAL | MNT_REMOVABLE)) == MNT_LOCAL) {
6306 os_atomic_inc(&cl_sparse_push_error, relaxed);
6307 }
6308 } else {
6309 vfs_drt_control(&scmap, 0); /* emit stats and free this memory. Dirty pages stay intact. */
6310 scmap = NULL;
6311 }
6312 } else {
6313 wbp->cl_scmap = scmap;
6314 }
6315 }
6316
6317 if (wbp->cl_sparse_wait && wbp->cl_sparse_pushes == 0) {
6318 wakeup((caddr_t)&wbp->cl_sparse_pushes);
6319 }
6320 } else {
6321 retval = sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE);
6322 }
6323
6324 local_err = retval;
6325
6326 if (err) {
6327 *err = retval;
6328 }
6329 retval = 1;
6330 } else {
6331 retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, &local_err, FALSE);
6332 if (err) {
6333 *err = local_err;
6334 }
6335 }
6336 lck_mtx_unlock(&wbp->cl_lockw);
6337
6338 if (flags & IO_SYNC) {
6339 (void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push");
6340 }
6341
6342 if (my_sparse_wait) {
6343 /*
6344 * I'm the owner of the serialization token
6345 * clear it and wakeup anyone that is waiting
6346 * for me to finish
6347 */
6348 lck_mtx_lock(&wbp->cl_lockw);
6349
6350 wbp->cl_sparse_wait = 0;
6351 wakeup((caddr_t)&wbp->cl_sparse_wait);
6352
6353 lck_mtx_unlock(&wbp->cl_lockw);
6354 }
6355 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
6356 wbp->cl_scmap, wbp->cl_number, retval, local_err, 0);
6357
6358 return retval;
6359 }
6360
6361
6362 __private_extern__ void
cluster_release(struct ubc_info * ubc)6363 cluster_release(struct ubc_info *ubc)
6364 {
6365 struct cl_writebehind *wbp;
6366 struct cl_readahead *rap;
6367
6368 if ((wbp = ubc->cl_wbehind)) {
6369 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, wbp->cl_scmap, 0, 0, 0);
6370
6371 if (wbp->cl_scmap) {
6372 vfs_drt_control(&(wbp->cl_scmap), 0);
6373 }
6374 lck_mtx_destroy(&wbp->cl_lockw, &cl_mtx_grp);
6375 zfree(cl_wr_zone, wbp);
6376 ubc->cl_wbehind = NULL;
6377 } else {
6378 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, 0, 0, 0, 0);
6379 }
6380
6381 if ((rap = ubc->cl_rahead)) {
6382 lck_mtx_destroy(&rap->cl_lockr, &cl_mtx_grp);
6383 zfree(cl_rd_zone, rap);
6384 ubc->cl_rahead = NULL;
6385 }
6386
6387 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, ubc, rap, wbp, 0, 0);
6388 }
6389
6390
6391 static int
cluster_try_push(struct cl_writebehind * wbp,vnode_t vp,off_t EOF,int push_flag,int io_flags,int (* callback)(buf_t,void *),void * callback_arg,int * err,boolean_t vm_initiated)6392 cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg, int *err, boolean_t vm_initiated)
6393 {
6394 int cl_index;
6395 int cl_index1;
6396 int min_index;
6397 int cl_len;
6398 int cl_pushed = 0;
6399 struct cl_wextent l_clusters[MAX_CLUSTERS];
6400 u_int max_cluster_pgcount;
6401 int error = 0;
6402
6403 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
6404 /*
6405 * the write behind context exists and has
6406 * already been locked...
6407 */
6408 if (wbp->cl_number == 0) {
6409 /*
6410 * no clusters to push
6411 * return number of empty slots
6412 */
6413 return MAX_CLUSTERS;
6414 }
6415
6416 /*
6417 * make a local 'sorted' copy of the clusters
6418 * and clear wbp->cl_number so that new clusters can
6419 * be developed
6420 */
6421 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
6422 for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) {
6423 if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr) {
6424 continue;
6425 }
6426 if (min_index == -1) {
6427 min_index = cl_index1;
6428 } else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr) {
6429 min_index = cl_index1;
6430 }
6431 }
6432 if (min_index == -1) {
6433 break;
6434 }
6435
6436 l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr;
6437 l_clusters[cl_index].e_addr = wbp->cl_clusters[min_index].e_addr;
6438 l_clusters[cl_index].io_flags = wbp->cl_clusters[min_index].io_flags;
6439
6440 wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr;
6441 }
6442 wbp->cl_number = 0;
6443
6444 cl_len = cl_index;
6445
6446 /* skip switching to the sparse cluster mechanism if on diskimage */
6447 if (((push_flag & PUSH_DELAY) && cl_len == MAX_CLUSTERS) &&
6448 !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV)) {
6449 int i;
6450
6451 /*
6452 * determine if we appear to be writing the file sequentially
6453 * if not, by returning without having pushed any clusters
6454 * we will cause this vnode to be pushed into the sparse cluster mechanism
6455 * used for managing more random I/O patterns
6456 *
6457 * we know that we've got all clusters currently in use and the next write doesn't fit into one of them...
6458 * that's why we're in try_push with PUSH_DELAY...
6459 *
6460 * check to make sure that all the clusters except the last one are 'full'... and that each cluster
6461 * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above
6462 * so we can just make a simple pass through, up to, but not including the last one...
6463 * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they
6464 * are sequential
6465 *
6466 * we let the last one be partial as long as it was adjacent to the previous one...
6467 * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out
6468 * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
6469 */
6470 for (i = 0; i < MAX_CLUSTERS - 1; i++) {
6471 if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != max_cluster_pgcount) {
6472 goto dont_try;
6473 }
6474 if (l_clusters[i].e_addr != l_clusters[i + 1].b_addr) {
6475 goto dont_try;
6476 }
6477 }
6478 }
6479 if (vm_initiated == TRUE) {
6480 lck_mtx_unlock(&wbp->cl_lockw);
6481 }
6482
6483 for (cl_index = 0; cl_index < cl_len; cl_index++) {
6484 int flags;
6485 struct cl_extent cl;
6486 int retval;
6487
6488 flags = io_flags & (IO_PASSIVE | IO_CLOSE);
6489
6490 /*
6491 * try to push each cluster in turn...
6492 */
6493 if (l_clusters[cl_index].io_flags & CLW_IONOCACHE) {
6494 flags |= IO_NOCACHE;
6495 }
6496
6497 if (l_clusters[cl_index].io_flags & CLW_IOPASSIVE) {
6498 flags |= IO_PASSIVE;
6499 }
6500
6501 if (push_flag & PUSH_SYNC) {
6502 flags |= IO_SYNC;
6503 }
6504
6505 cl.b_addr = l_clusters[cl_index].b_addr;
6506 cl.e_addr = l_clusters[cl_index].e_addr;
6507
6508 retval = cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg, vm_initiated);
6509
6510 if (retval == 0) {
6511 cl_pushed++;
6512
6513 l_clusters[cl_index].b_addr = 0;
6514 l_clusters[cl_index].e_addr = 0;
6515 } else if (error == 0) {
6516 error = retval;
6517 }
6518
6519 if (!(push_flag & PUSH_ALL)) {
6520 break;
6521 }
6522 }
6523 if (vm_initiated == TRUE) {
6524 lck_mtx_lock(&wbp->cl_lockw);
6525 }
6526
6527 if (err) {
6528 *err = error;
6529 }
6530
6531 dont_try:
6532 if (cl_len > cl_pushed) {
6533 /*
6534 * we didn't push all of the clusters, so
6535 * lets try to merge them back in to the vnode
6536 */
6537 if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) {
6538 /*
6539 * we picked up some new clusters while we were trying to
6540 * push the old ones... this can happen because I've dropped
6541 * the vnode lock... the sum of the
6542 * leftovers plus the new cluster count exceeds our ability
6543 * to represent them, so switch to the sparse cluster mechanism
6544 *
6545 * collect the active public clusters...
6546 */
6547 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated);
6548
6549 for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) {
6550 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) {
6551 continue;
6552 }
6553 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
6554 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
6555 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
6556
6557 cl_index1++;
6558 }
6559 /*
6560 * update the cluster count
6561 */
6562 wbp->cl_number = cl_index1;
6563
6564 /*
6565 * and collect the original clusters that were moved into the
6566 * local storage for sorting purposes
6567 */
6568 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated);
6569 } else {
6570 /*
6571 * we've got room to merge the leftovers back in
6572 * just append them starting at the next 'hole'
6573 * represented by wbp->cl_number
6574 */
6575 for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) {
6576 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) {
6577 continue;
6578 }
6579
6580 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
6581 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
6582 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
6583
6584 cl_index1++;
6585 }
6586 /*
6587 * update the cluster count
6588 */
6589 wbp->cl_number = cl_index1;
6590 }
6591 }
6592 return MAX_CLUSTERS - wbp->cl_number;
6593 }
6594
6595
6596
6597 static int
cluster_push_now(vnode_t vp,struct cl_extent * cl,off_t EOF,int flags,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6598 cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags,
6599 int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6600 {
6601 upl_page_info_t *pl;
6602 upl_t upl;
6603 vm_offset_t upl_offset;
6604 int upl_size;
6605 off_t upl_f_offset;
6606 int pages_in_upl;
6607 int start_pg;
6608 int last_pg;
6609 int io_size;
6610 int io_flags;
6611 int upl_flags;
6612 int bflag;
6613 int size;
6614 int error = 0;
6615 int retval;
6616 kern_return_t kret;
6617
6618 if (flags & IO_PASSIVE) {
6619 bflag = CL_PASSIVE;
6620 } else {
6621 bflag = 0;
6622 }
6623
6624 if (flags & IO_SKIP_ENCRYPTION) {
6625 bflag |= CL_ENCRYPTED;
6626 }
6627
6628 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START,
6629 (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0);
6630
6631 if ((pages_in_upl = (int)(cl->e_addr - cl->b_addr)) == 0) {
6632 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0);
6633
6634 return 0;
6635 }
6636 upl_size = pages_in_upl * PAGE_SIZE;
6637 upl_f_offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
6638
6639 if (upl_f_offset + upl_size >= EOF) {
6640 if (upl_f_offset >= EOF) {
6641 /*
6642 * must have truncated the file and missed
6643 * clearing a dangling cluster (i.e. it's completely
6644 * beyond the new EOF
6645 */
6646 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0);
6647
6648 return 0;
6649 }
6650 size = (int)(EOF - upl_f_offset);
6651
6652 upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
6653 pages_in_upl = upl_size / PAGE_SIZE;
6654 } else {
6655 size = upl_size;
6656 }
6657
6658
6659 if (vm_initiated) {
6660 vnode_pageout(vp, NULL, (upl_offset_t)0, upl_f_offset, (upl_size_t)upl_size,
6661 UPL_MSYNC | UPL_VNODE_PAGER | UPL_KEEPCACHED, &error);
6662
6663 return error;
6664 }
6665 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0);
6666
6667 /*
6668 * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior
6669 *
6670 * - only pages that are currently dirty are returned... these are the ones we need to clean
6671 * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
6672 * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
6673 * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if
6674 * someone dirties this page while the I/O is in progress, we don't lose track of the new state
6675 *
6676 * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard)
6677 */
6678
6679 if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE)) {
6680 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED;
6681 } else {
6682 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE;
6683 }
6684
6685 kret = ubc_create_upl_kernel(vp,
6686 upl_f_offset,
6687 upl_size,
6688 &upl,
6689 &pl,
6690 upl_flags,
6691 VM_KERN_MEMORY_FILE);
6692 if (kret != KERN_SUCCESS) {
6693 panic("cluster_push: failed to get pagelist");
6694 }
6695
6696 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, upl, upl_f_offset, 0, 0, 0);
6697
6698 /*
6699 * since we only asked for the dirty pages back
6700 * it's possible that we may only get a few or even none, so...
6701 * before we start marching forward, we must make sure we know
6702 * where the last present page is in the UPL, otherwise we could
6703 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
6704 * employed by commit_range and abort_range.
6705 */
6706 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
6707 if (upl_page_present(pl, last_pg)) {
6708 break;
6709 }
6710 }
6711 pages_in_upl = last_pg + 1;
6712
6713 if (pages_in_upl == 0) {
6714 ubc_upl_abort(upl, 0);
6715
6716 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0);
6717 return 0;
6718 }
6719
6720 for (last_pg = 0; last_pg < pages_in_upl;) {
6721 /*
6722 * find the next dirty page in the UPL
6723 * this will become the first page in the
6724 * next I/O to generate
6725 */
6726 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
6727 if (upl_dirty_page(pl, start_pg)) {
6728 break;
6729 }
6730 if (upl_page_present(pl, start_pg)) {
6731 /*
6732 * RET_ONLY_DIRTY will return non-dirty 'precious' pages
6733 * just release these unchanged since we're not going
6734 * to steal them or change their state
6735 */
6736 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
6737 }
6738 }
6739 if (start_pg >= pages_in_upl) {
6740 /*
6741 * done... no more dirty pages to push
6742 */
6743 break;
6744 }
6745 if (start_pg > last_pg) {
6746 /*
6747 * skipped over some non-dirty pages
6748 */
6749 size -= ((start_pg - last_pg) * PAGE_SIZE);
6750 }
6751
6752 /*
6753 * find a range of dirty pages to write
6754 */
6755 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
6756 if (!upl_dirty_page(pl, last_pg)) {
6757 break;
6758 }
6759 }
6760 upl_offset = start_pg * PAGE_SIZE;
6761
6762 io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
6763
6764 io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | bflag;
6765
6766 if (!(flags & IO_SYNC)) {
6767 io_flags |= CL_ASYNC;
6768 }
6769
6770 if (flags & IO_CLOSE) {
6771 io_flags |= CL_CLOSE;
6772 }
6773
6774 if (flags & IO_NOCACHE) {
6775 io_flags |= CL_NOCACHE;
6776 }
6777
6778 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
6779 io_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
6780
6781 if (error == 0 && retval) {
6782 error = retval;
6783 }
6784
6785 size -= io_size;
6786 }
6787 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, error, 0, 0);
6788
6789 return error;
6790 }
6791
6792
6793 /*
6794 * sparse_cluster_switch is called with the write behind lock held
6795 */
6796 static int
sparse_cluster_switch(struct cl_writebehind * wbp,vnode_t vp,off_t EOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6797 sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6798 {
6799 int cl_index;
6800 int error = 0;
6801
6802 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, 0, 0);
6803
6804 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
6805 int flags;
6806 struct cl_extent cl;
6807
6808 for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) {
6809 if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) {
6810 if (flags & UPL_POP_DIRTY) {
6811 cl.e_addr = cl.b_addr + 1;
6812
6813 error = sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg, vm_initiated);
6814
6815 if (error) {
6816 break;
6817 }
6818 }
6819 }
6820 }
6821 }
6822 wbp->cl_number -= cl_index;
6823
6824 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, error, 0);
6825
6826 return error;
6827 }
6828
6829
6830 /*
6831 * sparse_cluster_push must be called with the write-behind lock held if the scmap is
6832 * still associated with the write-behind context... however, if the scmap has been disassociated
6833 * from the write-behind context (the cluster_push case), the wb lock is not held
6834 */
6835 static int
sparse_cluster_push(struct cl_writebehind * wbp,void ** scmap,vnode_t vp,off_t EOF,int push_flag,int io_flags,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6836 sparse_cluster_push(struct cl_writebehind *wbp, void **scmap, vnode_t vp, off_t EOF, int push_flag,
6837 int io_flags, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6838 {
6839 struct cl_extent cl;
6840 off_t offset;
6841 u_int length;
6842 void *l_scmap;
6843 int error = 0;
6844
6845 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, kdebug_vnode(vp), (*scmap), 0, push_flag, 0);
6846
6847 if (push_flag & PUSH_ALL) {
6848 vfs_drt_control(scmap, 1);
6849 }
6850
6851 l_scmap = *scmap;
6852
6853 for (;;) {
6854 int retval;
6855
6856 if (vfs_drt_get_cluster(scmap, &offset, &length) != KERN_SUCCESS) {
6857 /*
6858 * Not finding anything to push will return KERN_FAILURE.
6859 * Confusing since it isn't really a failure. But that's the
6860 * reason we don't set 'error' here like we do below.
6861 */
6862 break;
6863 }
6864
6865 if (vm_initiated == TRUE) {
6866 lck_mtx_unlock(&wbp->cl_lockw);
6867 }
6868
6869 cl.b_addr = (daddr64_t)(offset / PAGE_SIZE_64);
6870 cl.e_addr = (daddr64_t)((offset + length) / PAGE_SIZE_64);
6871
6872 retval = cluster_push_now(vp, &cl, EOF, io_flags, callback, callback_arg, vm_initiated);
6873 if (error == 0 && retval) {
6874 error = retval;
6875 }
6876
6877 if (vm_initiated == TRUE) {
6878 lck_mtx_lock(&wbp->cl_lockw);
6879
6880 if (*scmap != l_scmap) {
6881 break;
6882 }
6883 }
6884
6885 if (error) {
6886 if (vfs_drt_mark_pages(scmap, offset, length, NULL) != KERN_SUCCESS) {
6887 panic("Failed to restore dirty state on failure");
6888 }
6889
6890 break;
6891 }
6892
6893 if (!(push_flag & PUSH_ALL)) {
6894 break;
6895 }
6896 }
6897 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0);
6898
6899 return error;
6900 }
6901
6902
6903 /*
6904 * sparse_cluster_add is called with the write behind lock held
6905 */
6906 static int
sparse_cluster_add(struct cl_writebehind * wbp,void ** scmap,vnode_t vp,struct cl_extent * cl,off_t EOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6907 sparse_cluster_add(struct cl_writebehind *wbp, void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF,
6908 int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6909 {
6910 u_int new_dirty;
6911 u_int length;
6912 off_t offset;
6913 int error = 0;
6914 int push_flag = 0; /* Is this a valid value? */
6915
6916 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (*scmap), 0, cl->b_addr, (int)cl->e_addr, 0);
6917
6918 offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
6919 length = ((u_int)(cl->e_addr - cl->b_addr)) * PAGE_SIZE;
6920
6921 while (vfs_drt_mark_pages(scmap, offset, length, &new_dirty) != KERN_SUCCESS) {
6922 /*
6923 * no room left in the map
6924 * only a partial update was done
6925 * push out some pages and try again
6926 */
6927
6928 if (vfs_get_scmap_push_behavior_internal(scmap, &push_flag)) {
6929 push_flag = 0;
6930 }
6931
6932 error = sparse_cluster_push(wbp, scmap, vp, EOF, push_flag, 0, callback, callback_arg, vm_initiated);
6933
6934 if (error) {
6935 break;
6936 }
6937
6938 offset += (new_dirty * PAGE_SIZE_64);
6939 length -= (new_dirty * PAGE_SIZE);
6940 }
6941 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0);
6942
6943 return error;
6944 }
6945
6946
6947 static int
cluster_align_phys_io(vnode_t vp,struct uio * uio,addr64_t usr_paddr,u_int32_t xsize,int flags,int (* callback)(buf_t,void *),void * callback_arg)6948 cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
6949 {
6950 upl_page_info_t *pl;
6951 upl_t upl;
6952 addr64_t ubc_paddr;
6953 kern_return_t kret;
6954 int error = 0;
6955 int did_read = 0;
6956 int abort_flags;
6957 int upl_flags;
6958 int bflag;
6959
6960 if (flags & IO_PASSIVE) {
6961 bflag = CL_PASSIVE;
6962 } else {
6963 bflag = 0;
6964 }
6965
6966 if (flags & IO_NOCACHE) {
6967 bflag |= CL_NOCACHE;
6968 }
6969
6970 upl_flags = UPL_SET_LITE;
6971
6972 if (!(flags & CL_READ)) {
6973 /*
6974 * "write" operation: let the UPL subsystem know
6975 * that we intend to modify the buffer cache pages
6976 * we're gathering.
6977 */
6978 upl_flags |= UPL_WILL_MODIFY;
6979 } else {
6980 /*
6981 * indicate that there is no need to pull the
6982 * mapping for this page... we're only going
6983 * to read from it, not modify it.
6984 */
6985 upl_flags |= UPL_FILE_IO;
6986 }
6987 kret = ubc_create_upl_kernel(vp,
6988 uio->uio_offset & ~PAGE_MASK_64,
6989 PAGE_SIZE,
6990 &upl,
6991 &pl,
6992 upl_flags,
6993 VM_KERN_MEMORY_FILE);
6994
6995 if (kret != KERN_SUCCESS) {
6996 return EINVAL;
6997 }
6998
6999 if (!upl_valid_page(pl, 0)) {
7000 /*
7001 * issue a synchronous read to cluster_io
7002 */
7003 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
7004 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
7005 if (error) {
7006 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
7007
7008 return error;
7009 }
7010 did_read = 1;
7011 }
7012 ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)(uio->uio_offset & PAGE_MASK_64);
7013
7014 /*
7015 * NOTE: There is no prototype for the following in BSD. It, and the definitions
7016 * of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
7017 * osfmk/ppc/mappings.h. They are not included here because there appears to be no
7018 * way to do so without exporting them to kexts as well.
7019 */
7020 if (flags & CL_READ) {
7021 // copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk); /* Copy physical to physical and flush the destination */
7022 copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 4); /* Copy physical to physical and flush the destination */
7023 } else {
7024 // copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc); /* Copy physical to physical and flush the source */
7025 copypv(usr_paddr, ubc_paddr, xsize, 2 | 1 | 8); /* Copy physical to physical and flush the source */
7026 }
7027 if (!(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) {
7028 /*
7029 * issue a synchronous write to cluster_io
7030 */
7031 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
7032 bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
7033 }
7034 if (error == 0) {
7035 uio_update(uio, (user_size_t)xsize);
7036 }
7037
7038 if (did_read) {
7039 abort_flags = UPL_ABORT_FREE_ON_EMPTY;
7040 } else {
7041 abort_flags = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
7042 }
7043
7044 ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags);
7045
7046 return error;
7047 }
7048
7049 int
cluster_copy_upl_data(struct uio * uio,upl_t upl,int upl_offset,int * io_resid)7050 cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid)
7051 {
7052 int pg_offset;
7053 int pg_index;
7054 int csize;
7055 int segflg;
7056 int retval = 0;
7057 int xsize;
7058 upl_page_info_t *pl;
7059 int dirty_count;
7060
7061 xsize = *io_resid;
7062
7063 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
7064 (int)uio->uio_offset, upl_offset, xsize, 0, 0);
7065
7066 segflg = uio->uio_segflg;
7067
7068 switch (segflg) {
7069 case UIO_USERSPACE32:
7070 case UIO_USERISPACE32:
7071 uio->uio_segflg = UIO_PHYS_USERSPACE32;
7072 break;
7073
7074 case UIO_USERSPACE:
7075 case UIO_USERISPACE:
7076 uio->uio_segflg = UIO_PHYS_USERSPACE;
7077 break;
7078
7079 case UIO_USERSPACE64:
7080 case UIO_USERISPACE64:
7081 uio->uio_segflg = UIO_PHYS_USERSPACE64;
7082 break;
7083
7084 case UIO_SYSSPACE:
7085 uio->uio_segflg = UIO_PHYS_SYSSPACE;
7086 break;
7087 }
7088 pl = ubc_upl_pageinfo(upl);
7089
7090 pg_index = upl_offset / PAGE_SIZE;
7091 pg_offset = upl_offset & PAGE_MASK;
7092 csize = min(PAGE_SIZE - pg_offset, xsize);
7093
7094 dirty_count = 0;
7095 while (xsize && retval == 0) {
7096 addr64_t paddr;
7097
7098 paddr = ((addr64_t)upl_phys_page(pl, pg_index) << PAGE_SHIFT) + pg_offset;
7099 if ((uio->uio_rw == UIO_WRITE) && (upl_dirty_page(pl, pg_index) == FALSE)) {
7100 dirty_count++;
7101 }
7102
7103 retval = uiomove64(paddr, csize, uio);
7104
7105 pg_index += 1;
7106 pg_offset = 0;
7107 xsize -= csize;
7108 csize = min(PAGE_SIZE, xsize);
7109 }
7110 *io_resid = xsize;
7111
7112 uio->uio_segflg = segflg;
7113
7114 if (dirty_count) {
7115 task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, upl_lookup_vnode(upl));
7116 }
7117
7118 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7119 (int)uio->uio_offset, xsize, retval, segflg, 0);
7120
7121 return retval;
7122 }
7123
7124
7125 int
cluster_copy_ubc_data(vnode_t vp,struct uio * uio,int * io_resid,int mark_dirty)7126 cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty)
7127 {
7128 return cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1);
7129 }
7130
7131
7132 static int
cluster_copy_ubc_data_internal(vnode_t vp,struct uio * uio,int * io_resid,int mark_dirty,int take_reference)7133 cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference)
7134 {
7135 int segflg;
7136 int io_size;
7137 int xsize;
7138 int start_offset;
7139 int retval = 0;
7140 memory_object_control_t control;
7141
7142 io_size = *io_resid;
7143
7144 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
7145 (int)uio->uio_offset, io_size, mark_dirty, take_reference, 0);
7146
7147 control = ubc_getobject(vp, UBC_FLAGS_NONE);
7148
7149 if (control == MEMORY_OBJECT_CONTROL_NULL) {
7150 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7151 (int)uio->uio_offset, io_size, retval, 3, 0);
7152
7153 return 0;
7154 }
7155 segflg = uio->uio_segflg;
7156
7157 switch (segflg) {
7158 case UIO_USERSPACE32:
7159 case UIO_USERISPACE32:
7160 uio->uio_segflg = UIO_PHYS_USERSPACE32;
7161 break;
7162
7163 case UIO_USERSPACE64:
7164 case UIO_USERISPACE64:
7165 uio->uio_segflg = UIO_PHYS_USERSPACE64;
7166 break;
7167
7168 case UIO_USERSPACE:
7169 case UIO_USERISPACE:
7170 uio->uio_segflg = UIO_PHYS_USERSPACE;
7171 break;
7172
7173 case UIO_SYSSPACE:
7174 uio->uio_segflg = UIO_PHYS_SYSSPACE;
7175 break;
7176 }
7177
7178 if ((io_size = *io_resid)) {
7179 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
7180 xsize = (int)uio_resid(uio);
7181
7182 retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio,
7183 start_offset, io_size, mark_dirty, take_reference);
7184 xsize -= uio_resid(uio);
7185
7186 int num_bytes_copied = xsize;
7187 if (num_bytes_copied && uio_rw(uio)) {
7188 task_update_logical_writes(current_task(), num_bytes_copied, TASK_WRITE_DEFERRED, vp);
7189 }
7190 io_size -= xsize;
7191 }
7192 uio->uio_segflg = segflg;
7193 *io_resid = io_size;
7194
7195 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7196 (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);
7197
7198 return retval;
7199 }
7200
7201
7202 int
is_file_clean(vnode_t vp,off_t filesize)7203 is_file_clean(vnode_t vp, off_t filesize)
7204 {
7205 off_t f_offset;
7206 int flags;
7207 int total_dirty = 0;
7208
7209 for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) {
7210 if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {
7211 if (flags & UPL_POP_DIRTY) {
7212 total_dirty++;
7213 }
7214 }
7215 }
7216 if (total_dirty) {
7217 return EINVAL;
7218 }
7219
7220 return 0;
7221 }
7222
7223
7224
7225 /*
7226 * Dirty region tracking/clustering mechanism.
7227 *
7228 * This code (vfs_drt_*) provides a mechanism for tracking and clustering
7229 * dirty regions within a larger space (file). It is primarily intended to
7230 * support clustering in large files with many dirty areas.
7231 *
7232 * The implementation assumes that the dirty regions are pages.
7233 *
7234 * To represent dirty pages within the file, we store bit vectors in a
7235 * variable-size circular hash.
7236 */
7237
7238 /*
7239 * Bitvector size. This determines the number of pages we group in a
7240 * single hashtable entry. Each hashtable entry is aligned to this
7241 * size within the file.
7242 */
7243 #define DRT_BITVECTOR_PAGES ((1024 * 256) / PAGE_SIZE)
7244
7245 /*
7246 * File offset handling.
7247 *
7248 * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
7249 * the correct formula is (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1))
7250 */
7251 #define DRT_ADDRESS_MASK (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1))
7252 #define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK)
7253
7254 /*
7255 * Hashtable address field handling.
7256 *
7257 * The low-order bits of the hashtable address are used to conserve
7258 * space.
7259 *
7260 * DRT_HASH_COUNT_MASK must be large enough to store the range
7261 * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
7262 * to indicate that the bucket is actually unoccupied.
7263 */
7264 #define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
7265 #define DRT_HASH_SET_ADDRESS(scm, i, a) \
7266 do { \
7267 (scm)->scm_hashtable[(i)].dhe_control = \
7268 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
7269 } while (0)
7270 #define DRT_HASH_COUNT_MASK 0x1ff
7271 #define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
7272 #define DRT_HASH_SET_COUNT(scm, i, c) \
7273 do { \
7274 (scm)->scm_hashtable[(i)].dhe_control = \
7275 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \
7276 } while (0)
7277 #define DRT_HASH_CLEAR(scm, i) \
7278 do { \
7279 (scm)->scm_hashtable[(i)].dhe_control = 0; \
7280 } while (0)
7281 #define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
7282 #define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
7283 #define DRT_HASH_COPY(oscm, oi, scm, i) \
7284 do { \
7285 (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \
7286 DRT_BITVECTOR_COPY(oscm, oi, scm, i); \
7287 } while(0);
7288
7289
7290 #if !defined(XNU_TARGET_OS_OSX)
7291 /*
7292 * Hash table moduli.
7293 *
7294 * Since the hashtable entry's size is dependent on the size of
7295 * the bitvector, and since the hashtable size is constrained to
7296 * both being prime and fitting within the desired allocation
7297 * size, these values need to be manually determined.
7298 *
7299 * For DRT_BITVECTOR_SIZE = 64, the entry size is 16 bytes.
7300 *
7301 * The small hashtable allocation is 4096 bytes, so the modulus is 251.
7302 * The large hashtable allocation is 32768 bytes, so the modulus is 2039.
7303 * The xlarge hashtable allocation is 131072 bytes, so the modulus is 8179.
7304 */
7305
7306 #define DRT_HASH_SMALL_MODULUS 251
7307 #define DRT_HASH_LARGE_MODULUS 2039
7308 #define DRT_HASH_XLARGE_MODULUS 8179
7309
7310 /*
7311 * Physical memory required before the large hash modulus is permitted.
7312 *
7313 * On small memory systems, the large hash modulus can lead to phsyical
7314 * memory starvation, so we avoid using it there.
7315 */
7316 #define DRT_HASH_LARGE_MEMORY_REQUIRED (1024LL * 1024LL * 1024LL) /* 1GiB */
7317 #define DRT_HASH_XLARGE_MEMORY_REQUIRED (8 * 1024LL * 1024LL * 1024LL) /* 8GiB */
7318
7319 #define DRT_SMALL_ALLOCATION 4096 /* 80 bytes spare */
7320 #define DRT_LARGE_ALLOCATION 32768 /* 144 bytes spare */
7321 #define DRT_XLARGE_ALLOCATION 131072 /* 208 bytes spare */
7322
7323 #else /* XNU_TARGET_OS_OSX */
7324 /*
7325 * Hash table moduli.
7326 *
7327 * Since the hashtable entry's size is dependent on the size of
7328 * the bitvector, and since the hashtable size is constrained to
7329 * both being prime and fitting within the desired allocation
7330 * size, these values need to be manually determined.
7331 *
7332 * For DRT_BITVECTOR_SIZE = 64, the entry size is 16 bytes.
7333 *
7334 * The small hashtable allocation is 16384 bytes, so the modulus is 1019.
7335 * The large hashtable allocation is 131072 bytes, so the modulus is 8179.
7336 * The xlarge hashtable allocation is 524288 bytes, so the modulus is 32749.
7337 */
7338
7339 #define DRT_HASH_SMALL_MODULUS 1019
7340 #define DRT_HASH_LARGE_MODULUS 8179
7341 #define DRT_HASH_XLARGE_MODULUS 32749
7342
7343 /*
7344 * Physical memory required before the large hash modulus is permitted.
7345 *
7346 * On small memory systems, the large hash modulus can lead to phsyical
7347 * memory starvation, so we avoid using it there.
7348 */
7349 #define DRT_HASH_LARGE_MEMORY_REQUIRED (4 * 1024LL * 1024LL * 1024LL) /* 4GiB */
7350 #define DRT_HASH_XLARGE_MEMORY_REQUIRED (32 * 1024LL * 1024LL * 1024LL) /* 32GiB */
7351
7352 #define DRT_SMALL_ALLOCATION 16384 /* 80 bytes spare */
7353 #define DRT_LARGE_ALLOCATION 131072 /* 208 bytes spare */
7354 #define DRT_XLARGE_ALLOCATION 524288 /* 304 bytes spare */
7355
7356 #endif /* ! XNU_TARGET_OS_OSX */
7357
7358 /* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
7359
7360 /*
7361 * Hashtable entry.
7362 */
7363 struct vfs_drt_hashentry {
7364 u_int64_t dhe_control;
7365 /*
7366 * dhe_bitvector was declared as dhe_bitvector[DRT_BITVECTOR_PAGES / 32];
7367 * DRT_BITVECTOR_PAGES is defined as ((1024 * 256) / PAGE_SIZE)
7368 * Since PAGE_SIZE is only known at boot time,
7369 * -define MAX_DRT_BITVECTOR_PAGES for smallest supported page size (4k)
7370 * -declare dhe_bitvector array for largest possible length
7371 */
7372 #define MAX_DRT_BITVECTOR_PAGES (1024 * 256)/( 4 * 1024)
7373 u_int32_t dhe_bitvector[MAX_DRT_BITVECTOR_PAGES / 32];
7374 };
7375
7376 /*
7377 * Hashtable bitvector handling.
7378 *
7379 * Bitvector fields are 32 bits long.
7380 */
7381
7382 #define DRT_HASH_SET_BIT(scm, i, bit) \
7383 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
7384
7385 #define DRT_HASH_CLEAR_BIT(scm, i, bit) \
7386 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
7387
7388 #define DRT_HASH_TEST_BIT(scm, i, bit) \
7389 ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
7390
7391 #define DRT_BITVECTOR_CLEAR(scm, i) \
7392 bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
7393
7394 #define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \
7395 bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \
7396 &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \
7397 (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
7398
7399 /*
7400 * Dirty Region Tracking structure.
7401 *
7402 * The hashtable is allocated entirely inside the DRT structure.
7403 *
7404 * The hash is a simple circular prime modulus arrangement, the structure
7405 * is resized from small to large if it overflows.
7406 */
7407
7408 struct vfs_drt_clustermap {
7409 u_int32_t scm_magic; /* sanity/detection */
7410 #define DRT_SCM_MAGIC 0x12020003
7411 u_int32_t scm_modulus; /* current ring size */
7412 u_int32_t scm_buckets; /* number of occupied buckets */
7413 u_int32_t scm_lastclean; /* last entry we cleaned */
7414 u_int32_t scm_iskips; /* number of slot skips */
7415
7416 struct vfs_drt_hashentry scm_hashtable[0];
7417 };
7418
7419
7420 #define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus)
7421 #define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus)
7422
7423 /*
7424 * Debugging codes and arguments.
7425 */
7426 #define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
7427 #define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
7428 #define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
7429 #define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
7430 #define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
7431 * dirty */
7432 /* 0, setcount */
7433 /* 1 (clean, no map) */
7434 /* 2 (map alloc fail) */
7435 /* 3, resid (partial) */
7436 #define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87))
7437 #define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
7438 * lastclean, iskips */
7439
7440
7441 static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp);
7442 static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap);
7443 static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap,
7444 u_int64_t offset, int *indexp);
7445 static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp,
7446 u_int64_t offset,
7447 int *indexp,
7448 int recursed);
7449 static kern_return_t vfs_drt_do_mark_pages(
7450 void **cmapp,
7451 u_int64_t offset,
7452 u_int length,
7453 u_int *setcountp,
7454 int dirty);
7455 static void vfs_drt_trace(
7456 struct vfs_drt_clustermap *cmap,
7457 int code,
7458 int arg1,
7459 int arg2,
7460 int arg3,
7461 int arg4);
7462
7463
7464 /*
7465 * Allocate and initialise a sparse cluster map.
7466 *
7467 * Will allocate a new map, resize or compact an existing map.
7468 *
7469 * XXX we should probably have at least one intermediate map size,
7470 * as the 1:16 ratio seems a bit drastic.
7471 */
7472 static kern_return_t
vfs_drt_alloc_map(struct vfs_drt_clustermap ** cmapp)7473 vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp)
7474 {
7475 struct vfs_drt_clustermap *cmap = NULL, *ocmap = NULL;
7476 kern_return_t kret = KERN_SUCCESS;
7477 u_int64_t offset = 0;
7478 u_int32_t i = 0;
7479 int modulus_size = 0, map_size = 0, active_buckets = 0, index = 0, copycount = 0;
7480
7481 ocmap = NULL;
7482 if (cmapp != NULL) {
7483 ocmap = *cmapp;
7484 }
7485
7486 /*
7487 * Decide on the size of the new map.
7488 */
7489 if (ocmap == NULL) {
7490 modulus_size = DRT_HASH_SMALL_MODULUS;
7491 map_size = DRT_SMALL_ALLOCATION;
7492 } else {
7493 /* count the number of active buckets in the old map */
7494 active_buckets = 0;
7495 for (i = 0; i < ocmap->scm_modulus; i++) {
7496 if (!DRT_HASH_VACANT(ocmap, i) &&
7497 (DRT_HASH_GET_COUNT(ocmap, i) != 0)) {
7498 active_buckets++;
7499 }
7500 }
7501 /*
7502 * If we're currently using the small allocation, check to
7503 * see whether we should grow to the large one.
7504 */
7505 if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
7506 /*
7507 * If the ring is nearly full and we are allowed to
7508 * use the large modulus, upgrade.
7509 */
7510 if ((active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) &&
7511 (max_mem >= DRT_HASH_LARGE_MEMORY_REQUIRED)) {
7512 modulus_size = DRT_HASH_LARGE_MODULUS;
7513 map_size = DRT_LARGE_ALLOCATION;
7514 } else {
7515 modulus_size = DRT_HASH_SMALL_MODULUS;
7516 map_size = DRT_SMALL_ALLOCATION;
7517 }
7518 } else if (ocmap->scm_modulus == DRT_HASH_LARGE_MODULUS) {
7519 if ((active_buckets > (DRT_HASH_LARGE_MODULUS - 5)) &&
7520 (max_mem >= DRT_HASH_XLARGE_MEMORY_REQUIRED)) {
7521 modulus_size = DRT_HASH_XLARGE_MODULUS;
7522 map_size = DRT_XLARGE_ALLOCATION;
7523 } else {
7524 /*
7525 * If the ring is completely full and we can't
7526 * expand, there's nothing useful for us to do.
7527 * Behave as though we had compacted into the new
7528 * array and return.
7529 */
7530 return KERN_SUCCESS;
7531 }
7532 } else {
7533 /* already using the xlarge modulus */
7534 modulus_size = DRT_HASH_XLARGE_MODULUS;
7535 map_size = DRT_XLARGE_ALLOCATION;
7536
7537 /*
7538 * If the ring is completely full, there's
7539 * nothing useful for us to do. Behave as
7540 * though we had compacted into the new
7541 * array and return.
7542 */
7543 if (active_buckets >= DRT_HASH_XLARGE_MODULUS) {
7544 return KERN_SUCCESS;
7545 }
7546 }
7547 }
7548
7549 /*
7550 * Allocate and initialise the new map.
7551 */
7552
7553 kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap, map_size,
7554 KMA_DATA, VM_KERN_MEMORY_FILE);
7555 if (kret != KERN_SUCCESS) {
7556 return kret;
7557 }
7558 cmap->scm_magic = DRT_SCM_MAGIC;
7559 cmap->scm_modulus = modulus_size;
7560 cmap->scm_buckets = 0;
7561 cmap->scm_lastclean = 0;
7562 cmap->scm_iskips = 0;
7563 for (i = 0; i < cmap->scm_modulus; i++) {
7564 DRT_HASH_CLEAR(cmap, i);
7565 DRT_HASH_VACATE(cmap, i);
7566 DRT_BITVECTOR_CLEAR(cmap, i);
7567 }
7568
7569 /*
7570 * If there's an old map, re-hash entries from it into the new map.
7571 */
7572 copycount = 0;
7573 if (ocmap != NULL) {
7574 for (i = 0; i < ocmap->scm_modulus; i++) {
7575 /* skip empty buckets */
7576 if (DRT_HASH_VACANT(ocmap, i) ||
7577 (DRT_HASH_GET_COUNT(ocmap, i) == 0)) {
7578 continue;
7579 }
7580 /* get new index */
7581 offset = DRT_HASH_GET_ADDRESS(ocmap, i);
7582 kret = vfs_drt_get_index(&cmap, offset, &index, 1);
7583 if (kret != KERN_SUCCESS) {
7584 /* XXX need to bail out gracefully here */
7585 panic("vfs_drt: new cluster map mysteriously too small");
7586 index = 0;
7587 }
7588 /* copy */
7589 DRT_HASH_COPY(ocmap, i, cmap, index);
7590 copycount++;
7591 }
7592 }
7593
7594 /* log what we've done */
7595 vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0);
7596
7597 /*
7598 * It's important to ensure that *cmapp always points to
7599 * a valid map, so we must overwrite it before freeing
7600 * the old map.
7601 */
7602 *cmapp = cmap;
7603 if (ocmap != NULL) {
7604 /* emit stats into trace buffer */
7605 vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA,
7606 ocmap->scm_modulus,
7607 ocmap->scm_buckets,
7608 ocmap->scm_lastclean,
7609 ocmap->scm_iskips);
7610
7611 vfs_drt_free_map(ocmap);
7612 }
7613 return KERN_SUCCESS;
7614 }
7615
7616
7617 /*
7618 * Free a sparse cluster map.
7619 */
7620 static kern_return_t
vfs_drt_free_map(struct vfs_drt_clustermap * cmap)7621 vfs_drt_free_map(struct vfs_drt_clustermap *cmap)
7622 {
7623 vm_size_t map_size = 0;
7624
7625 if (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
7626 map_size = DRT_SMALL_ALLOCATION;
7627 } else if (cmap->scm_modulus == DRT_HASH_LARGE_MODULUS) {
7628 map_size = DRT_LARGE_ALLOCATION;
7629 } else if (cmap->scm_modulus == DRT_HASH_XLARGE_MODULUS) {
7630 map_size = DRT_XLARGE_ALLOCATION;
7631 } else {
7632 panic("vfs_drt_free_map: Invalid modulus %d", cmap->scm_modulus);
7633 }
7634
7635 kmem_free(kernel_map, (vm_offset_t)cmap, map_size);
7636 return KERN_SUCCESS;
7637 }
7638
7639
7640 /*
7641 * Find the hashtable slot currently occupied by an entry for the supplied offset.
7642 */
7643 static kern_return_t
vfs_drt_search_index(struct vfs_drt_clustermap * cmap,u_int64_t offset,int * indexp)7644 vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp)
7645 {
7646 int index;
7647 u_int32_t i;
7648
7649 offset = DRT_ALIGN_ADDRESS(offset);
7650 index = DRT_HASH(cmap, offset);
7651
7652 /* traverse the hashtable */
7653 for (i = 0; i < cmap->scm_modulus; i++) {
7654 /*
7655 * If the slot is vacant, we can stop.
7656 */
7657 if (DRT_HASH_VACANT(cmap, index)) {
7658 break;
7659 }
7660
7661 /*
7662 * If the address matches our offset, we have success.
7663 */
7664 if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) {
7665 *indexp = index;
7666 return KERN_SUCCESS;
7667 }
7668
7669 /*
7670 * Move to the next slot, try again.
7671 */
7672 index = DRT_HASH_NEXT(cmap, index);
7673 }
7674 /*
7675 * It's not there.
7676 */
7677 return KERN_FAILURE;
7678 }
7679
7680 /*
7681 * Find the hashtable slot for the supplied offset. If we haven't allocated
7682 * one yet, allocate one and populate the address field. Note that it will
7683 * not have a nonzero page count and thus will still technically be free, so
7684 * in the case where we are called to clean pages, the slot will remain free.
7685 */
7686 static kern_return_t
vfs_drt_get_index(struct vfs_drt_clustermap ** cmapp,u_int64_t offset,int * indexp,int recursed)7687 vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed)
7688 {
7689 struct vfs_drt_clustermap *cmap;
7690 kern_return_t kret;
7691 u_int32_t index;
7692 u_int32_t i;
7693
7694 cmap = *cmapp;
7695
7696 /* look for an existing entry */
7697 kret = vfs_drt_search_index(cmap, offset, indexp);
7698 if (kret == KERN_SUCCESS) {
7699 return kret;
7700 }
7701
7702 /* need to allocate an entry */
7703 offset = DRT_ALIGN_ADDRESS(offset);
7704 index = DRT_HASH(cmap, offset);
7705
7706 /* scan from the index forwards looking for a vacant slot */
7707 for (i = 0; i < cmap->scm_modulus; i++) {
7708 /* slot vacant? */
7709 if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap, index) == 0) {
7710 cmap->scm_buckets++;
7711 if (index < cmap->scm_lastclean) {
7712 cmap->scm_lastclean = index;
7713 }
7714 DRT_HASH_SET_ADDRESS(cmap, index, offset);
7715 DRT_HASH_SET_COUNT(cmap, index, 0);
7716 DRT_BITVECTOR_CLEAR(cmap, index);
7717 *indexp = index;
7718 vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0);
7719 return KERN_SUCCESS;
7720 }
7721 cmap->scm_iskips += i;
7722 index = DRT_HASH_NEXT(cmap, index);
7723 }
7724
7725 /*
7726 * We haven't found a vacant slot, so the map is full. If we're not
7727 * already recursed, try reallocating/compacting it.
7728 */
7729 if (recursed) {
7730 return KERN_FAILURE;
7731 }
7732 kret = vfs_drt_alloc_map(cmapp);
7733 if (kret == KERN_SUCCESS) {
7734 /* now try to insert again */
7735 kret = vfs_drt_get_index(cmapp, offset, indexp, 1);
7736 }
7737 return kret;
7738 }
7739
7740 /*
7741 * Implementation of set dirty/clean.
7742 *
7743 * In the 'clean' case, not finding a map is OK.
7744 */
7745 static kern_return_t
vfs_drt_do_mark_pages(void ** private,u_int64_t offset,u_int length,u_int * setcountp,int dirty)7746 vfs_drt_do_mark_pages(
7747 void **private,
7748 u_int64_t offset,
7749 u_int length,
7750 u_int *setcountp,
7751 int dirty)
7752 {
7753 struct vfs_drt_clustermap *cmap, **cmapp;
7754 kern_return_t kret;
7755 int i, index, pgoff, pgcount, setcount, ecount;
7756
7757 cmapp = (struct vfs_drt_clustermap **)private;
7758 cmap = *cmapp;
7759
7760 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0);
7761
7762 if (setcountp != NULL) {
7763 *setcountp = 0;
7764 }
7765
7766 /* allocate a cluster map if we don't already have one */
7767 if (cmap == NULL) {
7768 /* no cluster map, nothing to clean */
7769 if (!dirty) {
7770 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0);
7771 return KERN_SUCCESS;
7772 }
7773 kret = vfs_drt_alloc_map(cmapp);
7774 if (kret != KERN_SUCCESS) {
7775 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0);
7776 return kret;
7777 }
7778 }
7779 setcount = 0;
7780
7781 /*
7782 * Iterate over the length of the region.
7783 */
7784 while (length > 0) {
7785 /*
7786 * Get the hashtable index for this offset.
7787 *
7788 * XXX this will add blank entries if we are clearing a range
7789 * that hasn't been dirtied.
7790 */
7791 kret = vfs_drt_get_index(cmapp, offset, &index, 0);
7792 cmap = *cmapp; /* may have changed! */
7793 /* this may be a partial-success return */
7794 if (kret != KERN_SUCCESS) {
7795 if (setcountp != NULL) {
7796 *setcountp = setcount;
7797 }
7798 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0);
7799
7800 return kret;
7801 }
7802
7803 /*
7804 * Work out how many pages we're modifying in this
7805 * hashtable entry.
7806 */
7807 pgoff = (int)((offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE);
7808 pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff));
7809
7810 /*
7811 * Iterate over pages, dirty/clearing as we go.
7812 */
7813 ecount = DRT_HASH_GET_COUNT(cmap, index);
7814 for (i = 0; i < pgcount; i++) {
7815 if (dirty) {
7816 if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
7817 if (ecount >= DRT_BITVECTOR_PAGES) {
7818 panic("ecount >= DRT_BITVECTOR_PAGES, cmap = %p, index = %d, bit = %d", cmap, index, pgoff + i);
7819 }
7820 DRT_HASH_SET_BIT(cmap, index, pgoff + i);
7821 ecount++;
7822 setcount++;
7823 }
7824 } else {
7825 if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
7826 if (ecount <= 0) {
7827 panic("ecount <= 0, cmap = %p, index = %d, bit = %d", cmap, index, pgoff + i);
7828 }
7829 assert(ecount > 0);
7830 DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i);
7831 ecount--;
7832 setcount++;
7833 }
7834 }
7835 }
7836 DRT_HASH_SET_COUNT(cmap, index, ecount);
7837
7838 offset += pgcount * PAGE_SIZE;
7839 length -= pgcount * PAGE_SIZE;
7840 }
7841 if (setcountp != NULL) {
7842 *setcountp = setcount;
7843 }
7844
7845 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0);
7846
7847 return KERN_SUCCESS;
7848 }
7849
7850 /*
7851 * Mark a set of pages as dirty/clean.
7852 *
7853 * This is a public interface.
7854 *
7855 * cmapp
7856 * Pointer to storage suitable for holding a pointer. Note that
7857 * this must either be NULL or a value set by this function.
7858 *
7859 * size
7860 * Current file size in bytes.
7861 *
7862 * offset
7863 * Offset of the first page to be marked as dirty, in bytes. Must be
7864 * page-aligned.
7865 *
7866 * length
7867 * Length of dirty region, in bytes. Must be a multiple of PAGE_SIZE.
7868 *
7869 * setcountp
7870 * Number of pages newly marked dirty by this call (optional).
7871 *
7872 * Returns KERN_SUCCESS if all the pages were successfully marked.
7873 */
7874 static kern_return_t
vfs_drt_mark_pages(void ** cmapp,off_t offset,u_int length,u_int * setcountp)7875 vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp)
7876 {
7877 /* XXX size unused, drop from interface */
7878 return vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1);
7879 }
7880
7881 #if 0
7882 static kern_return_t
7883 vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length)
7884 {
7885 return vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
7886 }
7887 #endif
7888
7889 /*
7890 * Get a cluster of dirty pages.
7891 *
7892 * This is a public interface.
7893 *
7894 * cmapp
7895 * Pointer to storage managed by drt_mark_pages. Note that this must
7896 * be NULL or a value set by drt_mark_pages.
7897 *
7898 * offsetp
7899 * Returns the byte offset into the file of the first page in the cluster.
7900 *
7901 * lengthp
7902 * Returns the length in bytes of the cluster of dirty pages.
7903 *
7904 * Returns success if a cluster was found. If KERN_FAILURE is returned, there
7905 * are no dirty pages meeting the minmum size criteria. Private storage will
7906 * be released if there are no more dirty pages left in the map
7907 *
7908 */
7909 static kern_return_t
vfs_drt_get_cluster(void ** cmapp,off_t * offsetp,u_int * lengthp)7910 vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp)
7911 {
7912 struct vfs_drt_clustermap *cmap;
7913 u_int64_t offset;
7914 u_int length;
7915 u_int32_t j;
7916 int index, i, fs, ls;
7917
7918 /* sanity */
7919 if ((cmapp == NULL) || (*cmapp == NULL)) {
7920 return KERN_FAILURE;
7921 }
7922 cmap = *cmapp;
7923
7924 /* walk the hashtable */
7925 for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) {
7926 index = DRT_HASH(cmap, offset);
7927
7928 if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0)) {
7929 continue;
7930 }
7931
7932 /* scan the bitfield for a string of bits */
7933 fs = -1;
7934
7935 for (i = 0; i < DRT_BITVECTOR_PAGES; i++) {
7936 if (DRT_HASH_TEST_BIT(cmap, index, i)) {
7937 fs = i;
7938 break;
7939 }
7940 }
7941 if (fs == -1) {
7942 /* didn't find any bits set */
7943 panic("vfs_drt: entry summary count > 0 but no bits set in map, cmap = %p, index = %d, count = %lld",
7944 cmap, index, DRT_HASH_GET_COUNT(cmap, index));
7945 }
7946 for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) {
7947 if (!DRT_HASH_TEST_BIT(cmap, index, i)) {
7948 break;
7949 }
7950 }
7951
7952 /* compute offset and length, mark pages clean */
7953 offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs);
7954 length = ls * PAGE_SIZE;
7955 vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
7956 cmap->scm_lastclean = index;
7957
7958 /* return successful */
7959 *offsetp = (off_t)offset;
7960 *lengthp = length;
7961
7962 vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0);
7963 return KERN_SUCCESS;
7964 }
7965 /*
7966 * We didn't find anything... hashtable is empty
7967 * emit stats into trace buffer and
7968 * then free it
7969 */
7970 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
7971 cmap->scm_modulus,
7972 cmap->scm_buckets,
7973 cmap->scm_lastclean,
7974 cmap->scm_iskips);
7975
7976 vfs_drt_free_map(cmap);
7977 *cmapp = NULL;
7978
7979 return KERN_FAILURE;
7980 }
7981
7982
7983 static kern_return_t
vfs_drt_control(void ** cmapp,int op_type)7984 vfs_drt_control(void **cmapp, int op_type)
7985 {
7986 struct vfs_drt_clustermap *cmap;
7987
7988 /* sanity */
7989 if ((cmapp == NULL) || (*cmapp == NULL)) {
7990 return KERN_FAILURE;
7991 }
7992 cmap = *cmapp;
7993
7994 switch (op_type) {
7995 case 0:
7996 /* emit stats into trace buffer */
7997 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
7998 cmap->scm_modulus,
7999 cmap->scm_buckets,
8000 cmap->scm_lastclean,
8001 cmap->scm_iskips);
8002
8003 vfs_drt_free_map(cmap);
8004 *cmapp = NULL;
8005 break;
8006
8007 case 1:
8008 cmap->scm_lastclean = 0;
8009 break;
8010 }
8011 return KERN_SUCCESS;
8012 }
8013
8014
8015
8016 /*
8017 * Emit a summary of the state of the clustermap into the trace buffer
8018 * along with some caller-provided data.
8019 */
8020 #if KDEBUG
8021 static void
vfs_drt_trace(__unused struct vfs_drt_clustermap * cmap,int code,int arg1,int arg2,int arg3,int arg4)8022 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, int code, int arg1, int arg2, int arg3, int arg4)
8023 {
8024 KERNEL_DEBUG(code, arg1, arg2, arg3, arg4, 0);
8025 }
8026 #else
8027 static void
vfs_drt_trace(__unused struct vfs_drt_clustermap * cmap,__unused int code,__unused int arg1,__unused int arg2,__unused int arg3,__unused int arg4)8028 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code,
8029 __unused int arg1, __unused int arg2, __unused int arg3,
8030 __unused int arg4)
8031 {
8032 }
8033 #endif
8034
8035 #if 0
8036 /*
8037 * Perform basic sanity check on the hash entry summary count
8038 * vs. the actual bits set in the entry.
8039 */
8040 static void
8041 vfs_drt_sanity(struct vfs_drt_clustermap *cmap)
8042 {
8043 int index, i;
8044 int bits_on;
8045
8046 for (index = 0; index < cmap->scm_modulus; index++) {
8047 if (DRT_HASH_VACANT(cmap, index)) {
8048 continue;
8049 }
8050
8051 for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) {
8052 if (DRT_HASH_TEST_BIT(cmap, index, i)) {
8053 bits_on++;
8054 }
8055 }
8056 if (bits_on != DRT_HASH_GET_COUNT(cmap, index)) {
8057 panic("bits_on = %d, index = %d", bits_on, index);
8058 }
8059 }
8060 }
8061 #endif
8062
8063 /*
8064 * Internal interface only.
8065 */
8066 static kern_return_t
vfs_get_scmap_push_behavior_internal(void ** cmapp,int * push_flag)8067 vfs_get_scmap_push_behavior_internal(void **cmapp, int *push_flag)
8068 {
8069 struct vfs_drt_clustermap *cmap;
8070
8071 /* sanity */
8072 if ((cmapp == NULL) || (*cmapp == NULL) || (push_flag == NULL)) {
8073 return KERN_FAILURE;
8074 }
8075 cmap = *cmapp;
8076
8077 if (cmap->scm_modulus == DRT_HASH_XLARGE_MODULUS) {
8078 /*
8079 * If we have a full xlarge sparse cluster,
8080 * we push it out all at once so the cluster
8081 * map can be available to absorb more I/Os.
8082 * This is done on large memory configs so
8083 * the small I/Os don't interfere with the
8084 * pro workloads.
8085 */
8086 *push_flag = PUSH_ALL;
8087 }
8088 return KERN_SUCCESS;
8089 }
8090