1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)vfs_cluster.c 8.10 (Berkeley) 3/28/95
62 */
63
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/buf_internal.h>
67 #include <sys/mount_internal.h>
68 #include <sys/vnode_internal.h>
69 #include <sys/trace.h>
70 #include <kern/kalloc.h>
71 #include <sys/time.h>
72 #include <sys/kernel.h>
73 #include <sys/resourcevar.h>
74 #include <miscfs/specfs/specdev.h>
75 #include <sys/uio_internal.h>
76 #include <libkern/libkern.h>
77 #include <machine/machine_routines.h>
78
79 #include <sys/ubc_internal.h>
80 #include <vm/vnode_pager.h>
81
82 #include <mach/mach_types.h>
83 #include <mach/memory_object_types.h>
84 #include <mach/vm_map.h>
85 #include <mach/upl.h>
86 #include <kern/task.h>
87 #include <kern/policy_internal.h>
88
89 #include <vm/vm_kern.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_fault.h>
93
94 #include <sys/kdebug.h>
95 #include <sys/kdebug_triage.h>
96 #include <libkern/OSAtomic.h>
97
98 #include <sys/sdt.h>
99
100 #include <stdbool.h>
101
102 #include <vfs/vfs_disk_conditioner.h>
103
104 #if 0
105 #undef KERNEL_DEBUG
106 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
107 #endif
108
109
110 #define CL_READ 0x01
111 #define CL_WRITE 0x02
112 #define CL_ASYNC 0x04
113 #define CL_COMMIT 0x08
114 #define CL_PAGEOUT 0x10
115 #define CL_AGE 0x20
116 #define CL_NOZERO 0x40
117 #define CL_PAGEIN 0x80
118 #define CL_DEV_MEMORY 0x100
119 #define CL_PRESERVE 0x200
120 #define CL_THROTTLE 0x400
121 #define CL_KEEPCACHED 0x800
122 #define CL_DIRECT_IO 0x1000
123 #define CL_PASSIVE 0x2000
124 #define CL_IOSTREAMING 0x4000
125 #define CL_CLOSE 0x8000
126 #define CL_ENCRYPTED 0x10000
127 #define CL_RAW_ENCRYPTED 0x20000
128 #define CL_NOCACHE 0x40000
129
130 #define MAX_VECTOR_UPL_SIZE (2 * MAX_UPL_SIZE_BYTES)
131
132 #define CLUSTER_IO_WAITING ((buf_t)1)
133
134 extern upl_t vector_upl_create(vm_offset_t, uint32_t);
135 extern uint32_t vector_upl_max_upls(upl_t);
136 extern boolean_t vector_upl_is_valid(upl_t);
137 extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t);
138 extern void vector_upl_set_pagelist(upl_t);
139 extern void vector_upl_set_iostate(upl_t, upl_t, vm_offset_t, u_int32_t);
140
141 struct clios {
142 lck_mtx_t io_mtxp;
143 u_int io_completed; /* amount of io that has currently completed */
144 u_int io_issued; /* amount of io that was successfully issued */
145 int io_error; /* error code of first error encountered */
146 int io_wanted; /* someone is sleeping waiting for a change in state */
147 };
148
149 struct cl_direct_read_lock {
150 LIST_ENTRY(cl_direct_read_lock) chain;
151 int32_t ref_count;
152 vnode_t vp;
153 lck_rw_t rw_lock;
154 };
155
156 #define CL_DIRECT_READ_LOCK_BUCKETS 61
157
158 static LIST_HEAD(cl_direct_read_locks, cl_direct_read_lock)
159 cl_direct_read_locks[CL_DIRECT_READ_LOCK_BUCKETS];
160
161 static LCK_GRP_DECLARE(cl_mtx_grp, "cluster I/O");
162 static LCK_MTX_DECLARE(cl_transaction_mtxp, &cl_mtx_grp);
163 static LCK_SPIN_DECLARE(cl_direct_read_spin_lock, &cl_mtx_grp);
164
165 static ZONE_DEFINE(cl_rd_zone, "cluster_read",
166 sizeof(struct cl_readahead), ZC_ZFREE_CLEARMEM);
167
168 static ZONE_DEFINE(cl_wr_zone, "cluster_write",
169 sizeof(struct cl_writebehind), ZC_ZFREE_CLEARMEM);
170
171 #define IO_UNKNOWN 0
172 #define IO_DIRECT 1
173 #define IO_CONTIG 2
174 #define IO_COPY 3
175
176 #define PUSH_DELAY 0x01
177 #define PUSH_ALL 0x02
178 #define PUSH_SYNC 0x04
179
180
181 static void cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset, size_t verify_block_size);
182 static void cluster_wait_IO(buf_t cbp_head, int async);
183 static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait);
184
185 static int cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length);
186
187 static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
188 int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg);
189 static int cluster_iodone(buf_t bp, void *callback_arg);
190 static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp);
191 static int cluster_is_throttled(vnode_t vp);
192
193 static void cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name);
194
195 static void cluster_syncup(vnode_t vp, off_t newEOF, int (*)(buf_t, void *), void *callback_arg, int flags);
196
197 static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference);
198 static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference);
199
200 static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags,
201 int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
202 static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
203 int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
204 static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
205 int (*)(buf_t, void *), void *callback_arg, int flags) __attribute__((noinline));
206
207 static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF,
208 off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
209 static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF,
210 int *write_type, u_int32_t *write_length, int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
211 static int cluster_write_direct_small(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
212 int flags, int (*callback)(buf_t, void *), void *callback_arg, uint32_t min_io_size) __attribute__((noinline));
213 static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF,
214 int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag) __attribute__((noinline));
215
216 static void cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes, boolean_t *first_pass,
217 off_t write_off, int write_cnt, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
218
219 static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg);
220
221 static int cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
222 static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra,
223 int (*callback)(buf_t, void *), void *callback_arg, int bflag);
224
225 static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_ioitiated);
226
227 static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *),
228 void *callback_arg, int *err, boolean_t vm_initiated);
229
230 static int sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
231 static int sparse_cluster_push(struct cl_writebehind *, void **cmapp, vnode_t vp, off_t EOF, int push_flag,
232 int io_flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
233 static int sparse_cluster_add(struct cl_writebehind *, void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF,
234 int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
235
236 static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp);
237 static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp);
238 static kern_return_t vfs_drt_control(void **cmapp, int op_type);
239 static kern_return_t vfs_get_scmap_push_behavior_internal(void **cmapp, int *push_flag);
240
241
242 /*
243 * For throttled IO to check whether
244 * a block is cached by the boot cache
245 * and thus it can avoid delaying the IO.
246 *
247 * bootcache_contains_block is initially
248 * NULL. The BootCache will set it while
249 * the cache is active and clear it when
250 * the cache is jettisoned.
251 *
252 * Returns 0 if the block is not
253 * contained in the cache, 1 if it is
254 * contained.
255 *
256 * The function pointer remains valid
257 * after the cache has been evicted even
258 * if bootcache_contains_block has been
259 * cleared.
260 *
261 * See rdar://9974130 The new throttling mechanism breaks the boot cache for throttled IOs
262 */
263 int (*bootcache_contains_block)(dev_t device, u_int64_t blkno) = NULL;
264
265
266 /*
267 * limit the internal I/O size so that we
268 * can represent it in a 32 bit int
269 */
270 #define MAX_IO_REQUEST_SIZE (1024 * 1024 * 512)
271 #define MAX_IO_CONTIG_SIZE MAX_UPL_SIZE_BYTES
272 #define MAX_VECTS 16
273 /*
274 * The MIN_DIRECT_WRITE_SIZE governs how much I/O should be issued before we consider
275 * allowing the caller to bypass the buffer cache. For small I/Os (less than 16k),
276 * we have not historically allowed the write to bypass the UBC.
277 */
278 #define MIN_DIRECT_WRITE_SIZE (16384)
279
280 #define WRITE_THROTTLE 6
281 #define WRITE_THROTTLE_SSD 2
282 #define WRITE_BEHIND 1
283 #define WRITE_BEHIND_SSD 1
284
285 #if !defined(XNU_TARGET_OS_OSX)
286 #define PREFETCH 1
287 #define PREFETCH_SSD 1
288 uint32_t speculative_prefetch_max = (2048 * 1024); /* maximum bytes in a specluative read-ahead */
289 uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead */
290 #else /* XNU_TARGET_OS_OSX */
291 #define PREFETCH 3
292 #define PREFETCH_SSD 2
293 uint32_t speculative_prefetch_max = (MAX_UPL_SIZE_BYTES * 3); /* maximum bytes in a specluative read-ahead */
294 uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead on SSDs*/
295 #endif /* ! XNU_TARGET_OS_OSX */
296
297 /* maximum bytes for read-ahead */
298 uint32_t prefetch_max = (1024 * 1024 * 1024);
299 /* maximum bytes for outstanding reads */
300 uint32_t overlapping_read_max = (1024 * 1024 * 1024);
301 /* maximum bytes for outstanding writes */
302 uint32_t overlapping_write_max = (1024 * 1024 * 1024);
303
304 #define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * (base))
305 #define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
306
307 int speculative_reads_disabled = 0;
308
309 /*
310 * throttle the number of async writes that
311 * can be outstanding on a single vnode
312 * before we issue a synchronous write
313 */
314 #define THROTTLE_MAXCNT 0
315
316 uint32_t throttle_max_iosize = (128 * 1024);
317
318 #define THROTTLE_MAX_IOSIZE (throttle_max_iosize)
319
320 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_max_iosize, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_max_iosize, 0, "");
321
322
323 void
cluster_init(void)324 cluster_init(void)
325 {
326 for (int i = 0; i < CL_DIRECT_READ_LOCK_BUCKETS; ++i) {
327 LIST_INIT(&cl_direct_read_locks[i]);
328 }
329 }
330
331
332 uint32_t
cluster_max_io_size(mount_t mp,int type)333 cluster_max_io_size(mount_t mp, int type)
334 {
335 uint32_t max_io_size;
336 uint32_t segcnt;
337 uint32_t maxcnt;
338
339 switch (type) {
340 case CL_READ:
341 segcnt = mp->mnt_segreadcnt;
342 maxcnt = mp->mnt_maxreadcnt;
343 break;
344 case CL_WRITE:
345 segcnt = mp->mnt_segwritecnt;
346 maxcnt = mp->mnt_maxwritecnt;
347 break;
348 default:
349 segcnt = min(mp->mnt_segreadcnt, mp->mnt_segwritecnt);
350 maxcnt = min(mp->mnt_maxreadcnt, mp->mnt_maxwritecnt);
351 break;
352 }
353 if (segcnt > (MAX_UPL_SIZE_BYTES >> PAGE_SHIFT)) {
354 /*
355 * don't allow a size beyond the max UPL size we can create
356 */
357 segcnt = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
358 }
359 max_io_size = min((segcnt * PAGE_SIZE), maxcnt);
360
361 if (max_io_size < MAX_UPL_TRANSFER_BYTES) {
362 /*
363 * don't allow a size smaller than the old fixed limit
364 */
365 max_io_size = MAX_UPL_TRANSFER_BYTES;
366 } else {
367 /*
368 * make sure the size specified is a multiple of PAGE_SIZE
369 */
370 max_io_size &= ~PAGE_MASK;
371 }
372 return max_io_size;
373 }
374
375 /*
376 * Returns max prefetch value. If the value overflows or exceeds the specified
377 * 'prefetch_limit', it will be capped at 'prefetch_limit' value.
378 */
379 static inline uint32_t
cluster_max_prefetch(vnode_t vp,uint32_t max_io_size,uint32_t prefetch_limit)380 cluster_max_prefetch(vnode_t vp, uint32_t max_io_size, uint32_t prefetch_limit)
381 {
382 bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
383 uint32_t io_scale = IO_SCALE(vp, is_ssd ? PREFETCH_SSD : PREFETCH);
384 uint32_t prefetch = 0;
385
386 if (__improbable(os_mul_overflow(max_io_size, io_scale, &prefetch) ||
387 (prefetch > prefetch_limit))) {
388 prefetch = prefetch_limit;
389 }
390
391 return prefetch;
392 }
393
394 static inline uint32_t
calculate_max_throttle_size(vnode_t vp)395 calculate_max_throttle_size(vnode_t vp)
396 {
397 bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
398 uint32_t io_scale = IO_SCALE(vp, is_ssd ? 2 : 1);
399
400 return MIN(io_scale * THROTTLE_MAX_IOSIZE, MAX_UPL_TRANSFER_BYTES);
401 }
402
403 static inline uint32_t
calculate_max_throttle_cnt(vnode_t vp)404 calculate_max_throttle_cnt(vnode_t vp)
405 {
406 bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
407 uint32_t io_scale = IO_SCALE(vp, 1);
408
409 return is_ssd ? MIN(io_scale, 4) : THROTTLE_MAXCNT;
410 }
411
412 #define CLW_ALLOCATE 0x01
413 #define CLW_RETURNLOCKED 0x02
414 #define CLW_IONOCACHE 0x04
415 #define CLW_IOPASSIVE 0x08
416
417 /*
418 * if the read ahead context doesn't yet exist,
419 * allocate and initialize it...
420 * the vnode lock serializes multiple callers
421 * during the actual assignment... first one
422 * to grab the lock wins... the other callers
423 * will release the now unnecessary storage
424 *
425 * once the context is present, try to grab (but don't block on)
426 * the lock associated with it... if someone
427 * else currently owns it, than the read
428 * will run without read-ahead. this allows
429 * multiple readers to run in parallel and
430 * since there's only 1 read ahead context,
431 * there's no real loss in only allowing 1
432 * reader to have read-ahead enabled.
433 */
434 static struct cl_readahead *
cluster_get_rap(vnode_t vp)435 cluster_get_rap(vnode_t vp)
436 {
437 struct ubc_info *ubc;
438 struct cl_readahead *rap;
439
440 ubc = vp->v_ubcinfo;
441
442 if ((rap = ubc->cl_rahead) == NULL) {
443 rap = zalloc_flags(cl_rd_zone, Z_WAITOK | Z_ZERO);
444 rap->cl_lastr = -1;
445 lck_mtx_init(&rap->cl_lockr, &cl_mtx_grp, LCK_ATTR_NULL);
446
447 vnode_lock(vp);
448
449 if (ubc->cl_rahead == NULL) {
450 ubc->cl_rahead = rap;
451 } else {
452 lck_mtx_destroy(&rap->cl_lockr, &cl_mtx_grp);
453 zfree(cl_rd_zone, rap);
454 rap = ubc->cl_rahead;
455 }
456 vnode_unlock(vp);
457 }
458 if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE) {
459 return rap;
460 }
461
462 return (struct cl_readahead *)NULL;
463 }
464
465
466 /*
467 * if the write behind context doesn't yet exist,
468 * and CLW_ALLOCATE is specified, allocate and initialize it...
469 * the vnode lock serializes multiple callers
470 * during the actual assignment... first one
471 * to grab the lock wins... the other callers
472 * will release the now unnecessary storage
473 *
474 * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
475 * the lock associated with the write behind context before
476 * returning
477 */
478
479 static struct cl_writebehind *
cluster_get_wbp(vnode_t vp,int flags)480 cluster_get_wbp(vnode_t vp, int flags)
481 {
482 struct ubc_info *ubc;
483 struct cl_writebehind *wbp;
484
485 ubc = vp->v_ubcinfo;
486
487 if ((wbp = ubc->cl_wbehind) == NULL) {
488 if (!(flags & CLW_ALLOCATE)) {
489 return (struct cl_writebehind *)NULL;
490 }
491
492 wbp = zalloc_flags(cl_wr_zone, Z_WAITOK | Z_ZERO);
493
494 lck_mtx_init(&wbp->cl_lockw, &cl_mtx_grp, LCK_ATTR_NULL);
495
496 vnode_lock(vp);
497
498 if (ubc->cl_wbehind == NULL) {
499 ubc->cl_wbehind = wbp;
500 } else {
501 lck_mtx_destroy(&wbp->cl_lockw, &cl_mtx_grp);
502 zfree(cl_wr_zone, wbp);
503 wbp = ubc->cl_wbehind;
504 }
505 vnode_unlock(vp);
506 }
507 if (flags & CLW_RETURNLOCKED) {
508 lck_mtx_lock(&wbp->cl_lockw);
509 }
510
511 return wbp;
512 }
513
514
515 static void
cluster_syncup(vnode_t vp,off_t newEOF,int (* callback)(buf_t,void *),void * callback_arg,int flags)516 cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, int flags)
517 {
518 struct cl_writebehind *wbp;
519
520 if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
521 if (wbp->cl_number) {
522 lck_mtx_lock(&wbp->cl_lockw);
523
524 cluster_try_push(wbp, vp, newEOF, PUSH_ALL | flags, 0, callback, callback_arg, NULL, FALSE);
525
526 lck_mtx_unlock(&wbp->cl_lockw);
527 }
528 }
529 }
530
531
532 static int
cluster_io_present_in_BC(vnode_t vp,off_t f_offset)533 cluster_io_present_in_BC(vnode_t vp, off_t f_offset)
534 {
535 daddr64_t blkno;
536 size_t io_size;
537 int (*bootcache_check_fn)(dev_t device, u_int64_t blkno) = bootcache_contains_block;
538
539 if (bootcache_check_fn && vp->v_mount && vp->v_mount->mnt_devvp) {
540 if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL)) {
541 return 0;
542 }
543
544 if (io_size == 0) {
545 return 0;
546 }
547
548 if (bootcache_check_fn(vp->v_mount->mnt_devvp->v_rdev, blkno)) {
549 return 1;
550 }
551 }
552 return 0;
553 }
554
555
556 static int
cluster_is_throttled(vnode_t vp)557 cluster_is_throttled(vnode_t vp)
558 {
559 return throttle_io_will_be_throttled(-1, vp->v_mount);
560 }
561
562
563 static void
cluster_iostate_wait(struct clios * iostate,u_int target,const char * wait_name)564 cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name)
565 {
566 lck_mtx_lock(&iostate->io_mtxp);
567
568 while ((iostate->io_issued - iostate->io_completed) > target) {
569 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
570 iostate->io_issued, iostate->io_completed, target, 0, 0);
571
572 iostate->io_wanted = 1;
573 msleep((caddr_t)&iostate->io_wanted, &iostate->io_mtxp, PRIBIO + 1, wait_name, NULL);
574
575 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
576 iostate->io_issued, iostate->io_completed, target, 0, 0);
577 }
578 lck_mtx_unlock(&iostate->io_mtxp);
579 }
580
581 static void
cluster_handle_associated_upl(struct clios * iostate,upl_t upl,upl_offset_t upl_offset,upl_size_t size)582 cluster_handle_associated_upl(struct clios *iostate, upl_t upl,
583 upl_offset_t upl_offset, upl_size_t size)
584 {
585 if (!size) {
586 return;
587 }
588
589 upl_t associated_upl = upl_associated_upl(upl);
590
591 if (!associated_upl) {
592 return;
593 }
594
595 #if 0
596 printf("1: %d %d\n", upl_offset, upl_offset + size);
597 #endif
598
599 /*
600 * The associated UPL is page aligned to file offsets whereas the
601 * UPL it's attached to has different alignment requirements. The
602 * upl_offset that we have refers to @upl. The code that follows
603 * has to deal with the first and last pages in this transaction
604 * which might straddle pages in the associated UPL. To keep
605 * track of these pages, we use the mark bits: if the mark bit is
606 * set, we know another transaction has completed its part of that
607 * page and so we can unlock that page here.
608 *
609 * The following illustrates what we have to deal with:
610 *
611 * MEM u <------------ 1 PAGE ------------> e
612 * +-------------+----------------------+-----------------
613 * | |######################|#################
614 * +-------------+----------------------+-----------------
615 * FILE | <--- a ---> o <------------ 1 PAGE ------------>
616 *
617 * So here we show a write to offset @o. The data that is to be
618 * written is in a buffer that is not page aligned; it has offset
619 * @a in the page. The upl that carries the data starts in memory
620 * at @u. The associated upl starts in the file at offset @o. A
621 * transaction will always end on a page boundary (like @e above)
622 * except for the very last transaction in the group. We cannot
623 * unlock the page at @o in the associated upl until both the
624 * transaction ending at @e and the following transaction (that
625 * starts at @e) has completed.
626 */
627
628 /*
629 * We record whether or not the two UPLs are aligned as the mark
630 * bit in the first page of @upl.
631 */
632 upl_page_info_t *pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
633 bool is_unaligned = upl_page_get_mark(pl, 0);
634
635 if (is_unaligned) {
636 upl_page_info_t *assoc_pl = UPL_GET_INTERNAL_PAGE_LIST(associated_upl);
637
638 upl_offset_t upl_end = upl_offset + size;
639 assert(upl_end >= PAGE_SIZE);
640
641 upl_size_t assoc_upl_size = upl_get_size(associated_upl);
642
643 /*
644 * In the very first transaction in the group, upl_offset will
645 * not be page aligned, but after that it will be and in that
646 * case we want the preceding page in the associated UPL hence
647 * the minus one.
648 */
649 assert(upl_offset);
650 if (upl_offset) {
651 upl_offset = trunc_page_32(upl_offset - 1);
652 }
653
654 lck_mtx_lock_spin(&iostate->io_mtxp);
655
656 // Look at the first page...
657 if (upl_offset
658 && !upl_page_get_mark(assoc_pl, upl_offset >> PAGE_SHIFT)) {
659 /*
660 * The first page isn't marked so let another transaction
661 * completion handle it.
662 */
663 upl_page_set_mark(assoc_pl, upl_offset >> PAGE_SHIFT, true);
664 upl_offset += PAGE_SIZE;
665 }
666
667 // And now the last page...
668
669 /*
670 * This needs to be > rather than >= because if it's equal, it
671 * means there's another transaction that is sharing the last
672 * page.
673 */
674 if (upl_end > assoc_upl_size) {
675 upl_end = assoc_upl_size;
676 } else {
677 upl_end = trunc_page_32(upl_end);
678 const int last_pg = (upl_end >> PAGE_SHIFT) - 1;
679
680 if (!upl_page_get_mark(assoc_pl, last_pg)) {
681 /*
682 * The last page isn't marked so mark the page and let another
683 * transaction completion handle it.
684 */
685 upl_page_set_mark(assoc_pl, last_pg, true);
686 upl_end -= PAGE_SIZE;
687 }
688 }
689
690 lck_mtx_unlock(&iostate->io_mtxp);
691
692 #if 0
693 printf("2: %d %d\n", upl_offset, upl_end);
694 #endif
695
696 if (upl_end <= upl_offset) {
697 return;
698 }
699
700 size = upl_end - upl_offset;
701 } else {
702 assert(!(upl_offset & PAGE_MASK));
703 assert(!(size & PAGE_MASK));
704 }
705
706 boolean_t empty;
707
708 /*
709 * We can unlock these pages now and as this is for a
710 * direct/uncached write, we want to dump the pages too.
711 */
712 kern_return_t kr = upl_abort_range(associated_upl, upl_offset, size,
713 UPL_ABORT_DUMP_PAGES, &empty);
714
715 assert(!kr);
716
717 if (!kr && empty) {
718 upl_set_associated_upl(upl, NULL);
719 upl_deallocate(associated_upl);
720 }
721 }
722
723 static int
cluster_ioerror(upl_t upl,int upl_offset,int abort_size,int error,int io_flags,vnode_t vp)724 cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp)
725 {
726 int upl_abort_code = 0;
727 int page_in = 0;
728 int page_out = 0;
729
730 if ((io_flags & (B_PHYS | B_CACHE)) == (B_PHYS | B_CACHE)) {
731 /*
732 * direct write of any flavor, or a direct read that wasn't aligned
733 */
734 ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY);
735 } else {
736 if (io_flags & B_PAGEIO) {
737 if (io_flags & B_READ) {
738 page_in = 1;
739 } else {
740 page_out = 1;
741 }
742 }
743 if (io_flags & B_CACHE) {
744 /*
745 * leave pages in the cache unchanged on error
746 */
747 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
748 } else if (((io_flags & B_READ) == 0) && ((error != ENXIO) || vnode_isswap(vp))) {
749 /*
750 * transient error on pageout/write path... leave pages unchanged
751 */
752 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
753 } else if (page_in) {
754 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
755 } else {
756 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
757 }
758
759 ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code);
760 }
761 return upl_abort_code;
762 }
763
764
765 static int
cluster_iodone(buf_t bp,void * callback_arg)766 cluster_iodone(buf_t bp, void *callback_arg)
767 {
768 int b_flags;
769 int error;
770 int total_size;
771 int total_resid;
772 int upl_offset;
773 int zero_offset;
774 int pg_offset = 0;
775 int commit_size = 0;
776 int upl_flags = 0;
777 int transaction_size = 0;
778 upl_t upl;
779 buf_t cbp;
780 buf_t cbp_head;
781 buf_t cbp_next;
782 buf_t real_bp;
783 vnode_t vp;
784 struct clios *iostate;
785 void *verify_ctx;
786 boolean_t transaction_complete = FALSE;
787
788 __IGNORE_WCASTALIGN(cbp_head = (buf_t)(bp->b_trans_head));
789
790 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START,
791 cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
792
793 if (cbp_head->b_trans_next || !(cbp_head->b_flags & B_EOT)) {
794 lck_mtx_lock_spin(&cl_transaction_mtxp);
795
796 bp->b_flags |= B_TDONE;
797
798 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
799 /*
800 * all I/O requests that are part of this transaction
801 * have to complete before we can process it
802 */
803 if (!(cbp->b_flags & B_TDONE)) {
804 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
805 cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
806
807 lck_mtx_unlock(&cl_transaction_mtxp);
808
809 return 0;
810 }
811
812 if (cbp->b_trans_next == CLUSTER_IO_WAITING) {
813 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
814 cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
815
816 lck_mtx_unlock(&cl_transaction_mtxp);
817 wakeup(cbp);
818
819 return 0;
820 }
821
822 if (cbp->b_flags & B_EOT) {
823 transaction_complete = TRUE;
824 }
825 }
826 lck_mtx_unlock(&cl_transaction_mtxp);
827
828 if (transaction_complete == FALSE) {
829 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
830 cbp_head, 0, 0, 0, 0);
831 return 0;
832 }
833 }
834 error = 0;
835 total_size = 0;
836 total_resid = 0;
837
838 cbp = cbp_head;
839 vp = cbp->b_vp;
840 upl_offset = cbp->b_uploffset;
841 upl = cbp->b_upl;
842 b_flags = cbp->b_flags;
843 real_bp = cbp->b_real_bp;
844 zero_offset = cbp->b_validend;
845 iostate = (struct clios *)cbp->b_iostate;
846
847 if (real_bp) {
848 real_bp->b_dev = cbp->b_dev;
849 }
850
851 while (cbp) {
852 if ((cbp->b_flags & B_ERROR) && error == 0) {
853 error = cbp->b_error;
854 }
855
856 total_resid += cbp->b_resid;
857 total_size += cbp->b_bcount;
858
859 cbp_next = cbp->b_trans_next;
860
861 if (cbp_next == NULL) {
862 /*
863 * compute the overall size of the transaction
864 * in case we created one that has 'holes' in it
865 * 'total_size' represents the amount of I/O we
866 * did, not the span of the transaction w/r to the UPL
867 */
868 transaction_size = cbp->b_uploffset + cbp->b_bcount - upl_offset;
869 }
870
871 if (cbp != cbp_head) {
872 free_io_buf(cbp);
873 }
874
875 cbp = cbp_next;
876 }
877
878 if (ISSET(b_flags, B_COMMIT_UPL)) {
879 cluster_handle_associated_upl(iostate,
880 cbp_head->b_upl,
881 upl_offset,
882 transaction_size);
883 }
884
885 if (error == 0 && total_resid) {
886 error = EIO;
887 }
888
889 if (error == 0) {
890 int (*cliodone_func)(buf_t, void *) = (int (*)(buf_t, void *))(cbp_head->b_cliodone);
891
892 if (cliodone_func != NULL) {
893 cbp_head->b_bcount = transaction_size;
894
895 error = (*cliodone_func)(cbp_head, callback_arg);
896 }
897 }
898 if (zero_offset) {
899 cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
900 }
901
902 verify_ctx = cbp_head->b_attr.ba_verify_ctx;
903 cbp_head->b_attr.ba_verify_ctx = NULL;
904 if (verify_ctx) {
905 vnode_verify_flags_t verify_flags = VNODE_VERIFY_CONTEXT_FREE;
906 caddr_t verify_buf = NULL;
907 off_t start_off = cbp_head->b_lblkno * cbp_head->b_lblksize;
908 size_t verify_length = transaction_size;
909 vm_offset_t vaddr;
910
911 if (!error) {
912 verify_flags |= VNODE_VERIFY_WITH_CONTEXT;
913 error = ubc_upl_map_range(upl, upl_offset, round_page(transaction_size), VM_PROT_DEFAULT, &vaddr); /* Map it in */
914 if (error) {
915 panic("ubc_upl_map_range returned error %d, upl = %p, upl_offset = %d, size = %d",
916 error, upl, (int)upl_offset, (int)round_page(transaction_size));
917 } else {
918 verify_buf = (caddr_t)vaddr;
919 }
920 }
921
922 error = VNOP_VERIFY(vp, start_off, (uint8_t *)verify_buf, verify_length, 0, &verify_ctx, verify_flags, NULL);
923
924 if (verify_buf) {
925 (void)ubc_upl_unmap_range(upl, upl_offset, round_page(transaction_size));
926 verify_buf = NULL;
927 }
928 } else if (cbp_head->b_attr.ba_flags & BA_WILL_VERIFY) {
929 error = EBADMSG;
930 }
931
932 free_io_buf(cbp_head);
933
934 if (iostate) {
935 int need_wakeup = 0;
936
937 /*
938 * someone has issued multiple I/Os asynchrounsly
939 * and is waiting for them to complete (streaming)
940 */
941 lck_mtx_lock_spin(&iostate->io_mtxp);
942
943 if (error && iostate->io_error == 0) {
944 iostate->io_error = error;
945 }
946
947 iostate->io_completed += total_size;
948
949 if (iostate->io_wanted) {
950 /*
951 * someone is waiting for the state of
952 * this io stream to change
953 */
954 iostate->io_wanted = 0;
955 need_wakeup = 1;
956 }
957 lck_mtx_unlock(&iostate->io_mtxp);
958
959 if (need_wakeup) {
960 wakeup((caddr_t)&iostate->io_wanted);
961 }
962 }
963
964 if (b_flags & B_COMMIT_UPL) {
965 pg_offset = upl_offset & PAGE_MASK;
966 commit_size = (pg_offset + transaction_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
967
968 if (error) {
969 upl_set_iodone_error(upl, error);
970
971 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags, vp);
972 } else {
973 upl_flags = UPL_COMMIT_FREE_ON_EMPTY;
974
975 if ((b_flags & B_PHYS) && (b_flags & B_READ)) {
976 upl_flags |= UPL_COMMIT_SET_DIRTY;
977 }
978
979 if (b_flags & B_AGE) {
980 upl_flags |= UPL_COMMIT_INACTIVATE;
981 }
982
983 ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags);
984 }
985 }
986 if (real_bp) {
987 if (error) {
988 real_bp->b_flags |= B_ERROR;
989 real_bp->b_error = error;
990 }
991 real_bp->b_resid = total_resid;
992
993 buf_biodone(real_bp);
994 }
995 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
996 upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0);
997
998 return error;
999 }
1000
1001
1002 uint32_t
cluster_throttle_io_limit(vnode_t vp,uint32_t * limit)1003 cluster_throttle_io_limit(vnode_t vp, uint32_t *limit)
1004 {
1005 if (cluster_is_throttled(vp)) {
1006 *limit = calculate_max_throttle_size(vp);
1007 return 1;
1008 }
1009 return 0;
1010 }
1011
1012
1013 void
cluster_zero(upl_t upl,upl_offset_t upl_offset,int size,buf_t bp)1014 cluster_zero(upl_t upl, upl_offset_t upl_offset, int size, buf_t bp)
1015 {
1016 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START,
1017 upl_offset, size, bp, 0, 0);
1018
1019 if (bp == NULL || bp->b_datap == 0) {
1020 upl_page_info_t *pl;
1021 addr64_t zero_addr;
1022
1023 pl = ubc_upl_pageinfo(upl);
1024
1025 if (upl_device_page(pl) == TRUE) {
1026 zero_addr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + upl_offset;
1027
1028 bzero_phys_nc(zero_addr, size);
1029 } else {
1030 while (size) {
1031 int page_offset;
1032 int page_index;
1033 int zero_cnt;
1034
1035 page_index = upl_offset / PAGE_SIZE;
1036 page_offset = upl_offset & PAGE_MASK;
1037
1038 zero_addr = ((addr64_t)upl_phys_page(pl, page_index) << PAGE_SHIFT) + page_offset;
1039 zero_cnt = min(PAGE_SIZE - page_offset, size);
1040
1041 bzero_phys(zero_addr, zero_cnt);
1042
1043 size -= zero_cnt;
1044 upl_offset += zero_cnt;
1045 }
1046 }
1047 } else {
1048 bzero((caddr_t)((vm_offset_t)bp->b_datap + upl_offset), size);
1049 }
1050
1051 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END,
1052 upl_offset, size, 0, 0, 0);
1053 }
1054
1055
1056 static void
cluster_EOT(buf_t cbp_head,buf_t cbp_tail,int zero_offset,size_t verify_block_size)1057 cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset, size_t verify_block_size)
1058 {
1059 /*
1060 * We will assign a verification context to cbp_head.
1061 * This will be passed back to the filesystem when
1062 * verifying (in cluster_iodone).
1063 */
1064 if (verify_block_size) {
1065 off_t start_off = cbp_head->b_lblkno * cbp_head->b_lblksize;
1066 size_t length;
1067 void *verify_ctx = NULL;
1068 int error = 0;
1069 vnode_t vp = buf_vnode(cbp_head);
1070
1071 if (cbp_head == cbp_tail) {
1072 length = cbp_head->b_bcount;
1073 } else {
1074 length = ((cbp_tail->b_lblkno * cbp_tail->b_lblksize) + cbp_tail->b_bcount) - start_off;
1075 }
1076
1077 /*
1078 * zero_offset is non zero for the transaction containing the EOF
1079 * (if the filesize is not page aligned). In that case we might
1080 * have the transaction size not be page/verify block size aligned
1081 */
1082 if ((zero_offset == 0) &&
1083 ((length < verify_block_size) || (length % verify_block_size)) != 0) {
1084 panic("%s length = %zu, verify_block_size = %zu",
1085 __FUNCTION__, length, verify_block_size);
1086 }
1087
1088 error = VNOP_VERIFY(vp, start_off, NULL, length,
1089 &verify_block_size, &verify_ctx, VNODE_VERIFY_CONTEXT_ALLOC, NULL);
1090
1091 cbp_head->b_attr.ba_verify_ctx = verify_ctx;
1092 } else {
1093 cbp_head->b_attr.ba_verify_ctx = NULL;
1094 }
1095
1096 cbp_head->b_validend = zero_offset;
1097 cbp_tail->b_flags |= B_EOT;
1098 }
1099
1100 static void
cluster_wait_IO(buf_t cbp_head,int async)1101 cluster_wait_IO(buf_t cbp_head, int async)
1102 {
1103 buf_t cbp;
1104
1105 if (async) {
1106 /*
1107 * Async callback completion will not normally generate a
1108 * wakeup upon I/O completion. To get woken up, we set
1109 * b_trans_next (which is safe for us to modify) on the last
1110 * buffer to CLUSTER_IO_WAITING so that cluster_iodone knows
1111 * to wake us up when all buffers as part of this transaction
1112 * are completed. This is done under the umbrella of
1113 * cl_transaction_mtxp which is also taken in cluster_iodone.
1114 */
1115 bool done = true;
1116 buf_t last = NULL;
1117
1118 lck_mtx_lock_spin(&cl_transaction_mtxp);
1119
1120 for (cbp = cbp_head; cbp; last = cbp, cbp = cbp->b_trans_next) {
1121 if (!ISSET(cbp->b_flags, B_TDONE)) {
1122 done = false;
1123 }
1124 }
1125
1126 if (!done) {
1127 last->b_trans_next = CLUSTER_IO_WAITING;
1128
1129 DTRACE_IO1(wait__start, buf_t, last);
1130 do {
1131 msleep(last, &cl_transaction_mtxp, PSPIN | (PRIBIO + 1), "cluster_wait_IO", NULL);
1132
1133 /*
1134 * We should only have been woken up if all the
1135 * buffers are completed, but just in case...
1136 */
1137 done = true;
1138 for (cbp = cbp_head; cbp != CLUSTER_IO_WAITING; cbp = cbp->b_trans_next) {
1139 if (!ISSET(cbp->b_flags, B_TDONE)) {
1140 done = false;
1141 break;
1142 }
1143 }
1144 } while (!done);
1145 DTRACE_IO1(wait__done, buf_t, last);
1146
1147 last->b_trans_next = NULL;
1148 }
1149
1150 lck_mtx_unlock(&cl_transaction_mtxp);
1151 } else { // !async
1152 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
1153 buf_biowait(cbp);
1154 }
1155 }
1156 }
1157
1158 static void
cluster_complete_transaction(buf_t * cbp_head,void * callback_arg,int * retval,int flags,int needwait)1159 cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait)
1160 {
1161 buf_t cbp;
1162 int error;
1163 boolean_t isswapout = FALSE;
1164
1165 /*
1166 * cluster_complete_transaction will
1167 * only be called if we've issued a complete chain in synchronous mode
1168 * or, we've already done a cluster_wait_IO on an incomplete chain
1169 */
1170 if (needwait) {
1171 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) {
1172 buf_biowait(cbp);
1173 }
1174 }
1175 /*
1176 * we've already waited on all of the I/Os in this transaction,
1177 * so mark all of the buf_t's in this transaction as B_TDONE
1178 * so that cluster_iodone sees the transaction as completed
1179 */
1180 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) {
1181 cbp->b_flags |= B_TDONE;
1182 }
1183 cbp = *cbp_head;
1184
1185 if ((flags & (CL_ASYNC | CL_PAGEOUT)) == CL_PAGEOUT && vnode_isswap(cbp->b_vp)) {
1186 isswapout = TRUE;
1187 }
1188
1189 error = cluster_iodone(cbp, callback_arg);
1190
1191 if (!(flags & CL_ASYNC) && error && *retval == 0) {
1192 if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO)) {
1193 *retval = error;
1194 } else if (isswapout == TRUE) {
1195 *retval = error;
1196 }
1197 }
1198 *cbp_head = (buf_t)NULL;
1199 }
1200
1201
1202 static int
cluster_io(vnode_t vp,upl_t upl,vm_offset_t upl_offset,off_t f_offset,int non_rounded_size,int flags,buf_t real_bp,struct clios * iostate,int (* callback)(buf_t,void *),void * callback_arg)1203 cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
1204 int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
1205 {
1206 buf_t cbp;
1207 u_int size;
1208 u_int io_size;
1209 int io_flags;
1210 int bmap_flags;
1211 int error = 0;
1212 int retval = 0;
1213 buf_t cbp_head = NULL;
1214 buf_t cbp_tail = NULL;
1215 int trans_count = 0;
1216 int max_trans_count;
1217 u_int pg_count;
1218 int pg_offset;
1219 u_int max_iosize;
1220 u_int max_vectors;
1221 int priv;
1222 int zero_offset = 0;
1223 int async_throttle = 0;
1224 mount_t mp;
1225 vm_offset_t upl_end_offset;
1226 boolean_t need_EOT = FALSE;
1227 size_t verify_block_size = 0;
1228
1229 /*
1230 * we currently don't support buffers larger than a page
1231 */
1232 if (real_bp && non_rounded_size > PAGE_SIZE) {
1233 panic("%s(): Called with real buffer of size %d bytes which "
1234 "is greater than the maximum allowed size of "
1235 "%d bytes (the system PAGE_SIZE).\n",
1236 __FUNCTION__, non_rounded_size, PAGE_SIZE);
1237 }
1238
1239 mp = vp->v_mount;
1240
1241 /*
1242 * we don't want to do any funny rounding of the size for IO requests
1243 * coming through the DIRECT or CONTIGUOUS paths... those pages don't
1244 * belong to us... we can't extend (nor do we need to) the I/O to fill
1245 * out a page
1246 */
1247 if (mp->mnt_devblocksize > 1 && !(flags & (CL_DEV_MEMORY | CL_DIRECT_IO))) {
1248 /*
1249 * round the requested size up so that this I/O ends on a
1250 * page boundary in case this is a 'write'... if the filesystem
1251 * has blocks allocated to back the page beyond the EOF, we want to
1252 * make sure to write out the zero's that are sitting beyond the EOF
1253 * so that in case the filesystem doesn't explicitly zero this area
1254 * if a hole is created via a lseek/write beyond the current EOF,
1255 * it will return zeros when it's read back from the disk. If the
1256 * physical allocation doesn't extend for the whole page, we'll
1257 * only write/read from the disk up to the end of this allocation
1258 * via the extent info returned from the VNOP_BLOCKMAP call.
1259 */
1260 pg_offset = upl_offset & PAGE_MASK;
1261
1262 size = (((non_rounded_size + pg_offset) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - pg_offset;
1263 } else {
1264 /*
1265 * anyone advertising a blocksize of 1 byte probably
1266 * can't deal with us rounding up the request size
1267 * AFP is one such filesystem/device
1268 */
1269 size = non_rounded_size;
1270 }
1271 upl_end_offset = upl_offset + size;
1272
1273 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, (int)f_offset, size, upl_offset, flags, 0);
1274
1275 /*
1276 * Set the maximum transaction size to the maximum desired number of
1277 * buffers.
1278 */
1279 max_trans_count = 8;
1280 if (flags & CL_DEV_MEMORY) {
1281 max_trans_count = 16;
1282 }
1283
1284 if (flags & CL_READ) {
1285 io_flags = B_READ;
1286 bmap_flags = VNODE_READ;
1287
1288 max_iosize = mp->mnt_maxreadcnt;
1289 max_vectors = mp->mnt_segreadcnt;
1290
1291 if ((flags & CL_PAGEIN) && /* Cluster layer verification will be limited to pagein for now */
1292 !(mp->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1293 (VNOP_VERIFY(vp, f_offset, NULL, 0, &verify_block_size, NULL, VNODE_VERIFY_DEFAULT, NULL) == 0) &&
1294 verify_block_size) {
1295 if (verify_block_size != PAGE_SIZE) {
1296 verify_block_size = 0;
1297 }
1298 if (real_bp && verify_block_size) {
1299 panic("%s(): Called with real buffer and needs verification ",
1300 __FUNCTION__);
1301 }
1302 }
1303 } else {
1304 io_flags = B_WRITE;
1305 bmap_flags = VNODE_WRITE;
1306
1307 max_iosize = mp->mnt_maxwritecnt;
1308 max_vectors = mp->mnt_segwritecnt;
1309 }
1310 if (verify_block_size) {
1311 bmap_flags |= VNODE_CLUSTER_VERIFY;
1312 }
1313 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_NONE, max_iosize, max_vectors, mp->mnt_devblocksize, 0, 0);
1314
1315 /*
1316 * make sure the maximum iosize is a
1317 * multiple of the page size
1318 */
1319 max_iosize &= ~PAGE_MASK;
1320
1321 /*
1322 * Ensure the maximum iosize is sensible.
1323 */
1324 if (!max_iosize) {
1325 max_iosize = PAGE_SIZE;
1326 }
1327
1328 if (flags & CL_THROTTLE) {
1329 if (!(flags & CL_PAGEOUT) && cluster_is_throttled(vp)) {
1330 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
1331
1332 if (max_iosize > max_throttle_size) {
1333 max_iosize = max_throttle_size;
1334 }
1335 async_throttle = calculate_max_throttle_cnt(vp);
1336 } else {
1337 if ((flags & CL_DEV_MEMORY)) {
1338 async_throttle = IO_SCALE(vp, VNODE_ASYNC_THROTTLE);
1339 } else {
1340 u_int max_cluster;
1341 u_int max_cluster_size;
1342 u_int scale;
1343
1344 if (vp->v_mount->mnt_minsaturationbytecount) {
1345 max_cluster_size = vp->v_mount->mnt_minsaturationbytecount;
1346
1347 scale = 1;
1348 } else {
1349 max_cluster_size = MAX_CLUSTER_SIZE(vp);
1350
1351 if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
1352 scale = WRITE_THROTTLE_SSD;
1353 } else {
1354 scale = WRITE_THROTTLE;
1355 }
1356 }
1357 if (max_iosize > max_cluster_size) {
1358 max_cluster = max_cluster_size;
1359 } else {
1360 max_cluster = max_iosize;
1361 }
1362
1363 if (size < max_cluster) {
1364 max_cluster = size;
1365 }
1366
1367 if (flags & CL_CLOSE) {
1368 scale += MAX_CLUSTERS;
1369 }
1370
1371 async_throttle = min(IO_SCALE(vp, VNODE_ASYNC_THROTTLE), ((scale * max_cluster_size) / max_cluster) - 1);
1372 }
1373 }
1374 }
1375 if (flags & CL_AGE) {
1376 io_flags |= B_AGE;
1377 }
1378 if (flags & (CL_PAGEIN | CL_PAGEOUT)) {
1379 io_flags |= B_PAGEIO;
1380 }
1381 if (flags & (CL_IOSTREAMING)) {
1382 io_flags |= B_IOSTREAMING;
1383 }
1384 if (flags & CL_COMMIT) {
1385 io_flags |= B_COMMIT_UPL;
1386 }
1387 if (flags & CL_DIRECT_IO) {
1388 io_flags |= B_PHYS;
1389 }
1390 if (flags & (CL_PRESERVE | CL_KEEPCACHED)) {
1391 io_flags |= B_CACHE;
1392 }
1393 if (flags & CL_PASSIVE) {
1394 io_flags |= B_PASSIVE;
1395 }
1396 if (flags & CL_ENCRYPTED) {
1397 io_flags |= B_ENCRYPTED_IO;
1398 }
1399
1400 if (vp->v_flag & VSYSTEM) {
1401 io_flags |= B_META;
1402 }
1403
1404 if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
1405 /*
1406 * then we are going to end up
1407 * with a page that we can't complete (the file size wasn't a multiple
1408 * of PAGE_SIZE and we're trying to read to the end of the file
1409 * so we'll go ahead and zero out the portion of the page we can't
1410 * read in from the file
1411 */
1412 zero_offset = (int)(upl_offset + non_rounded_size);
1413 } else if (!ISSET(flags, CL_READ) && ISSET(flags, CL_DIRECT_IO) &&
1414 (!proc_allow_nocache_write_fs_blksize(current_proc()) ||
1415 (page_aligned(f_offset) && page_aligned(non_rounded_size)))) {
1416 assert(ISSET(flags, CL_COMMIT));
1417
1418 // For a direct/uncached write, we need to lock pages...
1419 //
1420 // ...except if the write offsets and lengths are not page aligned.
1421 // This specific case is allowed to enhance performance with the
1422 // understanding that the caller is responsible for synchronization
1423 // between writers and other writers/readers.
1424
1425 upl_t cached_upl;
1426
1427 /*
1428 * Create a UPL to lock the pages in the cache whilst the
1429 * write is in progress.
1430 */
1431 ubc_create_upl_kernel(vp, f_offset, non_rounded_size, &cached_upl,
1432 NULL, UPL_SET_LITE, VM_KERN_MEMORY_FILE);
1433
1434 /*
1435 * Attach this UPL to the other UPL so that we can find it
1436 * later.
1437 */
1438 upl_set_associated_upl(upl, cached_upl);
1439
1440 if (upl_offset & PAGE_MASK) {
1441 /*
1442 * The two UPLs are not aligned, so mark the first page in
1443 * @upl so that cluster_handle_associated_upl can handle
1444 * it accordingly.
1445 */
1446 upl_page_info_t *pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1447 upl_page_set_mark(pl, 0, true);
1448 }
1449 }
1450
1451 while (size) {
1452 daddr64_t blkno;
1453 daddr64_t lblkno;
1454 size_t io_size_tmp;
1455 u_int io_size_wanted;
1456 uint32_t lblksize;
1457
1458 if (size > max_iosize) {
1459 io_size = max_iosize;
1460 } else {
1461 io_size = size;
1462 }
1463
1464 io_size_wanted = io_size;
1465 io_size_tmp = (size_t)io_size;
1466
1467 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL))) {
1468 break;
1469 }
1470
1471 if (io_size_tmp > io_size_wanted) {
1472 io_size = io_size_wanted;
1473 } else {
1474 io_size = (u_int)io_size_tmp;
1475 }
1476
1477 if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno)) {
1478 real_bp->b_blkno = blkno;
1479 }
1480
1481 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE,
1482 (int)f_offset, (int)(blkno >> 32), (int)blkno, io_size, 0);
1483
1484 if (io_size == 0) {
1485 /*
1486 * vnop_blockmap didn't return an error... however, it did
1487 * return an extent size of 0 which means we can't
1488 * make forward progress on this I/O... a hole in the
1489 * file would be returned as a blkno of -1 with a non-zero io_size
1490 * a real extent is returned with a blkno != -1 and a non-zero io_size
1491 */
1492 error = EINVAL;
1493 break;
1494 }
1495 if (!(flags & CL_READ) && blkno == -1) {
1496 off_t e_offset;
1497 int pageout_flags;
1498
1499 if (upl_get_internal_vectorupl(upl)) {
1500 panic("Vector UPLs should not take this code-path");
1501 }
1502 /*
1503 * we're writing into a 'hole'
1504 */
1505 if (flags & CL_PAGEOUT) {
1506 /*
1507 * if we got here via cluster_pageout
1508 * then just error the request and return
1509 * the 'hole' should already have been covered
1510 */
1511 error = EINVAL;
1512 break;
1513 }
1514 /*
1515 * we can get here if the cluster code happens to
1516 * pick up a page that was dirtied via mmap vs
1517 * a 'write' and the page targets a 'hole'...
1518 * i.e. the writes to the cluster were sparse
1519 * and the file was being written for the first time
1520 *
1521 * we can also get here if the filesystem supports
1522 * 'holes' that are less than PAGE_SIZE.... because
1523 * we can't know if the range in the page that covers
1524 * the 'hole' has been dirtied via an mmap or not,
1525 * we have to assume the worst and try to push the
1526 * entire page to storage.
1527 *
1528 * Try paging out the page individually before
1529 * giving up entirely and dumping it (the pageout
1530 * path will insure that the zero extent accounting
1531 * has been taken care of before we get back into cluster_io)
1532 *
1533 * go direct to vnode_pageout so that we don't have to
1534 * unbusy the page from the UPL... we used to do this
1535 * so that we could call ubc_msync, but that results
1536 * in a potential deadlock if someone else races us to acquire
1537 * that page and wins and in addition needs one of the pages
1538 * we're continuing to hold in the UPL
1539 */
1540 pageout_flags = UPL_MSYNC | UPL_VNODE_PAGER | UPL_NESTED_PAGEOUT;
1541
1542 if (!(flags & CL_ASYNC)) {
1543 pageout_flags |= UPL_IOSYNC;
1544 }
1545 if (!(flags & CL_COMMIT)) {
1546 pageout_flags |= UPL_NOCOMMIT;
1547 }
1548
1549 if (cbp_head) {
1550 buf_t prev_cbp;
1551 uint32_t bytes_in_last_page;
1552
1553 /*
1554 * first we have to wait for the the current outstanding I/Os
1555 * to complete... EOT hasn't been set yet on this transaction
1556 * so the pages won't be released
1557 */
1558 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1559
1560 bytes_in_last_page = cbp_head->b_uploffset & PAGE_MASK;
1561 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
1562 bytes_in_last_page += cbp->b_bcount;
1563 }
1564 bytes_in_last_page &= PAGE_MASK;
1565
1566 while (bytes_in_last_page) {
1567 /*
1568 * we've got a transcation that
1569 * includes the page we're about to push out through vnode_pageout...
1570 * find the bp's in the list which intersect this page and either
1571 * remove them entirely from the transaction (there could be multiple bp's), or
1572 * round it's iosize down to the page boundary (there can only be one)...
1573 *
1574 * find the last bp in the list and act on it
1575 */
1576 for (prev_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next) {
1577 prev_cbp = cbp;
1578 }
1579
1580 if (bytes_in_last_page >= cbp->b_bcount) {
1581 /*
1582 * this buf no longer has any I/O associated with it
1583 */
1584 bytes_in_last_page -= cbp->b_bcount;
1585 cbp->b_bcount = 0;
1586
1587 free_io_buf(cbp);
1588
1589 if (cbp == cbp_head) {
1590 assert(bytes_in_last_page == 0);
1591 /*
1592 * the buf we just freed was the only buf in
1593 * this transaction... so there's no I/O to do
1594 */
1595 cbp_head = NULL;
1596 cbp_tail = NULL;
1597 } else {
1598 /*
1599 * remove the buf we just freed from
1600 * the transaction list
1601 */
1602 prev_cbp->b_trans_next = NULL;
1603 cbp_tail = prev_cbp;
1604 }
1605 } else {
1606 /*
1607 * this is the last bp that has I/O
1608 * intersecting the page of interest
1609 * only some of the I/O is in the intersection
1610 * so clip the size but keep it in the transaction list
1611 */
1612 cbp->b_bcount -= bytes_in_last_page;
1613 cbp_tail = cbp;
1614 bytes_in_last_page = 0;
1615 }
1616 }
1617 if (cbp_head) {
1618 /*
1619 * there was more to the current transaction
1620 * than just the page we are pushing out via vnode_pageout...
1621 * mark it as finished and complete it... we've already
1622 * waited for the I/Os to complete above in the call to cluster_wait_IO
1623 */
1624 cluster_EOT(cbp_head, cbp_tail, 0, 0);
1625
1626 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1627
1628 trans_count = 0;
1629 }
1630 }
1631 if (vnode_pageout(vp, upl, (upl_offset_t)trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
1632 error = EINVAL;
1633 }
1634 e_offset = round_page_64(f_offset + 1);
1635 io_size = (u_int)(e_offset - f_offset);
1636
1637 f_offset += io_size;
1638 upl_offset += io_size;
1639
1640 if (size >= io_size) {
1641 size -= io_size;
1642 } else {
1643 size = 0;
1644 }
1645 /*
1646 * keep track of how much of the original request
1647 * that we've actually completed... non_rounded_size
1648 * may go negative due to us rounding the request
1649 * to a page size multiple (i.e. size > non_rounded_size)
1650 */
1651 non_rounded_size -= io_size;
1652
1653 if (non_rounded_size <= 0) {
1654 /*
1655 * we've transferred all of the data in the original
1656 * request, but we were unable to complete the tail
1657 * of the last page because the file didn't have
1658 * an allocation to back that portion... this is ok.
1659 */
1660 size = 0;
1661 }
1662 if (error) {
1663 if (size == 0) {
1664 flags &= ~CL_COMMIT;
1665 }
1666 break;
1667 }
1668 continue;
1669 }
1670
1671 lblksize = CLUSTER_IO_BLOCK_SIZE;
1672 lblkno = (daddr64_t)(f_offset / lblksize);
1673
1674 /*
1675 * we have now figured out how much I/O we can do - this is in 'io_size'
1676 * pg_offset is the starting point in the first page for the I/O
1677 * pg_count is the number of full and partial pages that 'io_size' encompasses
1678 */
1679 pg_offset = upl_offset & PAGE_MASK;
1680
1681 if (flags & CL_DEV_MEMORY) {
1682 /*
1683 * treat physical requests as one 'giant' page
1684 */
1685 pg_count = 1;
1686 } else {
1687 pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
1688 }
1689
1690 if ((flags & CL_READ) && blkno == -1) {
1691 vm_offset_t commit_offset;
1692 int bytes_to_zero;
1693 int complete_transaction_now = 0;
1694
1695 /*
1696 * if we're reading and blkno == -1, then we've got a
1697 * 'hole' in the file that we need to deal with by zeroing
1698 * out the affected area in the upl
1699 */
1700 if (io_size >= (u_int)non_rounded_size) {
1701 /*
1702 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
1703 * than 'zero_offset' will be non-zero
1704 * if the 'hole' returned by vnop_blockmap extends all the way to the eof
1705 * (indicated by the io_size finishing off the I/O request for this UPL)
1706 * than we're not going to issue an I/O for the
1707 * last page in this upl... we need to zero both the hole and the tail
1708 * of the page beyond the EOF, since the delayed zero-fill won't kick in
1709 */
1710 bytes_to_zero = non_rounded_size;
1711 if (!(flags & CL_NOZERO)) {
1712 bytes_to_zero = (int)((((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset);
1713 }
1714
1715 zero_offset = 0;
1716 } else {
1717 bytes_to_zero = io_size;
1718 }
1719
1720 pg_count = 0;
1721
1722 cluster_zero(upl, (upl_offset_t)upl_offset, bytes_to_zero, real_bp);
1723
1724 if (cbp_head) {
1725 int pg_resid;
1726
1727 /*
1728 * if there is a current I/O chain pending
1729 * then the first page of the group we just zero'd
1730 * will be handled by the I/O completion if the zero
1731 * fill started in the middle of the page
1732 */
1733 commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1734
1735 pg_resid = (int)(commit_offset - upl_offset);
1736
1737 if (bytes_to_zero >= pg_resid) {
1738 /*
1739 * the last page of the current I/O
1740 * has been completed...
1741 * compute the number of fully zero'd
1742 * pages that are beyond it
1743 * plus the last page if its partial
1744 * and we have no more I/O to issue...
1745 * otherwise a partial page is left
1746 * to begin the next I/O
1747 */
1748 if ((int)io_size >= non_rounded_size) {
1749 pg_count = (bytes_to_zero - pg_resid + (PAGE_SIZE - 1)) / PAGE_SIZE;
1750 } else {
1751 pg_count = (bytes_to_zero - pg_resid) / PAGE_SIZE;
1752 }
1753
1754 complete_transaction_now = 1;
1755 }
1756 } else {
1757 /*
1758 * no pending I/O to deal with
1759 * so, commit all of the fully zero'd pages
1760 * plus the last page if its partial
1761 * and we have no more I/O to issue...
1762 * otherwise a partial page is left
1763 * to begin the next I/O
1764 */
1765 if ((int)io_size >= non_rounded_size) {
1766 pg_count = (pg_offset + bytes_to_zero + (PAGE_SIZE - 1)) / PAGE_SIZE;
1767 } else {
1768 pg_count = (pg_offset + bytes_to_zero) / PAGE_SIZE;
1769 }
1770
1771 commit_offset = upl_offset & ~PAGE_MASK;
1772 }
1773
1774 // Associated UPL is currently only used in the direct write path
1775 assert(!upl_associated_upl(upl));
1776
1777 if ((flags & CL_COMMIT) && pg_count) {
1778 ubc_upl_commit_range(upl, (upl_offset_t)commit_offset,
1779 pg_count * PAGE_SIZE,
1780 UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
1781 }
1782 upl_offset += io_size;
1783 f_offset += io_size;
1784 size -= io_size;
1785
1786 /*
1787 * keep track of how much of the original request
1788 * that we've actually completed... non_rounded_size
1789 * may go negative due to us rounding the request
1790 * to a page size multiple (i.e. size > non_rounded_size)
1791 */
1792 non_rounded_size -= io_size;
1793
1794 if (non_rounded_size <= 0) {
1795 /*
1796 * we've transferred all of the data in the original
1797 * request, but we were unable to complete the tail
1798 * of the last page because the file didn't have
1799 * an allocation to back that portion... this is ok.
1800 */
1801 size = 0;
1802 }
1803 if (cbp_head && (complete_transaction_now || size == 0)) {
1804 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1805
1806 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0, verify_block_size);
1807
1808 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1809
1810 trans_count = 0;
1811 }
1812 continue;
1813 }
1814 if (pg_count > max_vectors) {
1815 if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
1816 io_size = PAGE_SIZE - pg_offset;
1817 pg_count = 1;
1818 } else {
1819 io_size -= (pg_count - max_vectors) * PAGE_SIZE;
1820 pg_count = max_vectors;
1821 }
1822 }
1823 /*
1824 * If the transaction is going to reach the maximum number of
1825 * desired elements, truncate the i/o to the nearest page so
1826 * that the actual i/o is initiated after this buffer is
1827 * created and added to the i/o chain.
1828 *
1829 * I/O directed to physically contiguous memory
1830 * doesn't have a requirement to make sure we 'fill' a page
1831 */
1832 if (!(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count &&
1833 ((upl_offset + io_size) & PAGE_MASK)) {
1834 vm_offset_t aligned_ofs;
1835
1836 aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
1837 /*
1838 * If the io_size does not actually finish off even a
1839 * single page we have to keep adding buffers to the
1840 * transaction despite having reached the desired limit.
1841 *
1842 * Eventually we get here with the page being finished
1843 * off (and exceeded) and then we truncate the size of
1844 * this i/o request so that it is page aligned so that
1845 * we can finally issue the i/o on the transaction.
1846 */
1847 if (aligned_ofs > upl_offset) {
1848 io_size = (u_int)(aligned_ofs - upl_offset);
1849 pg_count--;
1850 }
1851 }
1852
1853 if (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV)) {
1854 /*
1855 * if we're not targeting a virtual device i.e. a disk image
1856 * it's safe to dip into the reserve pool since real devices
1857 * can complete this I/O request without requiring additional
1858 * bufs from the alloc_io_buf pool
1859 */
1860 priv = 1;
1861 } else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT) && !cbp_head) {
1862 /*
1863 * Throttle the speculative IO
1864 *
1865 * We can only throttle this if it is the first iobuf
1866 * for the transaction. alloc_io_buf implements
1867 * additional restrictions for diskimages anyway.
1868 */
1869 priv = 0;
1870 } else {
1871 priv = 1;
1872 }
1873
1874 cbp = alloc_io_buf(vp, priv);
1875
1876 if (flags & CL_PAGEOUT) {
1877 u_int i;
1878
1879 /*
1880 * since blocks are in offsets of lblksize (CLUSTER_IO_BLOCK_SIZE), scale
1881 * iteration to (PAGE_SIZE * pg_count) of blks.
1882 */
1883 for (i = 0; i < (PAGE_SIZE * pg_count) / lblksize; i++) {
1884 if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY) {
1885 panic("BUSY bp found in cluster_io");
1886 }
1887 }
1888 }
1889 if (flags & CL_ASYNC) {
1890 if (buf_setcallback(cbp, (void *)cluster_iodone, callback_arg)) {
1891 panic("buf_setcallback failed");
1892 }
1893 }
1894 cbp->b_cliodone = (void *)callback;
1895 cbp->b_flags |= io_flags;
1896 if (flags & CL_NOCACHE) {
1897 cbp->b_attr.ba_flags |= BA_NOCACHE;
1898 }
1899 if (verify_block_size) {
1900 cbp->b_attr.ba_flags |= BA_WILL_VERIFY;
1901 }
1902
1903 cbp->b_lblkno = lblkno;
1904 cbp->b_lblksize = lblksize;
1905 cbp->b_blkno = blkno;
1906 cbp->b_bcount = io_size;
1907
1908 if (buf_setupl(cbp, upl, (uint32_t)upl_offset)) {
1909 panic("buf_setupl failed");
1910 }
1911 #if CONFIG_IOSCHED
1912 upl_set_blkno(upl, upl_offset, io_size, blkno);
1913 #endif
1914 cbp->b_trans_next = (buf_t)NULL;
1915
1916 if ((cbp->b_iostate = (void *)iostate)) {
1917 /*
1918 * caller wants to track the state of this
1919 * io... bump the amount issued against this stream
1920 */
1921 iostate->io_issued += io_size;
1922 }
1923
1924 if (flags & CL_READ) {
1925 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE,
1926 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1927 } else {
1928 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE,
1929 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1930 }
1931
1932 if (cbp_head) {
1933 cbp_tail->b_trans_next = cbp;
1934 cbp_tail = cbp;
1935 } else {
1936 cbp_head = cbp;
1937 cbp_tail = cbp;
1938
1939 if ((cbp_head->b_real_bp = real_bp)) {
1940 real_bp = (buf_t)NULL;
1941 }
1942 }
1943 *(buf_t *)(&cbp->b_trans_head) = cbp_head;
1944
1945 trans_count++;
1946
1947 upl_offset += io_size;
1948 f_offset += io_size;
1949 size -= io_size;
1950 /*
1951 * keep track of how much of the original request
1952 * that we've actually completed... non_rounded_size
1953 * may go negative due to us rounding the request
1954 * to a page size multiple (i.e. size > non_rounded_size)
1955 */
1956 non_rounded_size -= io_size;
1957
1958 if (non_rounded_size <= 0) {
1959 /*
1960 * we've transferred all of the data in the original
1961 * request, but we were unable to complete the tail
1962 * of the last page because the file didn't have
1963 * an allocation to back that portion... this is ok.
1964 */
1965 size = 0;
1966 }
1967 if (size == 0) {
1968 /*
1969 * we have no more I/O to issue, so go
1970 * finish the final transaction
1971 */
1972 need_EOT = TRUE;
1973 } else if (((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) &&
1974 ((flags & CL_ASYNC) || trans_count > max_trans_count)) {
1975 /*
1976 * I/O directed to physically contiguous memory...
1977 * which doesn't have a requirement to make sure we 'fill' a page
1978 * or...
1979 * the current I/O we've prepared fully
1980 * completes the last page in this request
1981 * and ...
1982 * it's either an ASYNC request or
1983 * we've already accumulated more than 8 I/O's into
1984 * this transaction so mark it as complete so that
1985 * it can finish asynchronously or via the cluster_complete_transaction
1986 * below if the request is synchronous
1987 */
1988 need_EOT = TRUE;
1989 }
1990 if (need_EOT == TRUE) {
1991 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0, verify_block_size);
1992 }
1993
1994 if (flags & CL_THROTTLE) {
1995 (void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io");
1996 }
1997
1998 if (!(io_flags & B_READ)) {
1999 vnode_startwrite(vp);
2000 }
2001
2002 if (flags & CL_RAW_ENCRYPTED) {
2003 /*
2004 * User requested raw encrypted bytes.
2005 * Twiddle the bit in the ba_flags for the buffer
2006 */
2007 cbp->b_attr.ba_flags |= BA_RAW_ENCRYPTED_IO;
2008 }
2009
2010 (void) VNOP_STRATEGY(cbp);
2011
2012 if (need_EOT == TRUE) {
2013 if (!(flags & CL_ASYNC)) {
2014 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1);
2015 }
2016
2017 need_EOT = FALSE;
2018 trans_count = 0;
2019 cbp_head = NULL;
2020 }
2021 }
2022 if (error) {
2023 int abort_size;
2024
2025 io_size = 0;
2026
2027 if (cbp_head) {
2028 /*
2029 * Wait until all of the outstanding I/O
2030 * for this partial transaction has completed
2031 */
2032 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
2033
2034 /*
2035 * Rewind the upl offset to the beginning of the
2036 * transaction.
2037 */
2038 upl_offset = cbp_head->b_uploffset;
2039 }
2040
2041 if (ISSET(flags, CL_COMMIT)) {
2042 cluster_handle_associated_upl(iostate, upl,
2043 (upl_offset_t)upl_offset,
2044 (upl_size_t)(upl_end_offset - upl_offset));
2045 }
2046
2047 // Free all the IO buffers in this transaction
2048 for (cbp = cbp_head; cbp;) {
2049 buf_t cbp_next;
2050
2051 size += cbp->b_bcount;
2052 io_size += cbp->b_bcount;
2053
2054 cbp_next = cbp->b_trans_next;
2055 free_io_buf(cbp);
2056 cbp = cbp_next;
2057 }
2058
2059 if (iostate) {
2060 int need_wakeup = 0;
2061
2062 /*
2063 * update the error condition for this stream
2064 * since we never really issued the io
2065 * just go ahead and adjust it back
2066 */
2067 lck_mtx_lock_spin(&iostate->io_mtxp);
2068
2069 if (iostate->io_error == 0) {
2070 iostate->io_error = error;
2071 }
2072 iostate->io_issued -= io_size;
2073
2074 if (iostate->io_wanted) {
2075 /*
2076 * someone is waiting for the state of
2077 * this io stream to change
2078 */
2079 iostate->io_wanted = 0;
2080 need_wakeup = 1;
2081 }
2082 lck_mtx_unlock(&iostate->io_mtxp);
2083
2084 if (need_wakeup) {
2085 wakeup((caddr_t)&iostate->io_wanted);
2086 }
2087 }
2088
2089 if (flags & CL_COMMIT) {
2090 int upl_flags;
2091
2092 pg_offset = upl_offset & PAGE_MASK;
2093 abort_size = (int)((upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK);
2094
2095 upl_flags = cluster_ioerror(upl, (int)(upl_offset - pg_offset),
2096 abort_size, error, io_flags, vp);
2097
2098 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
2099 upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
2100 }
2101 if (retval == 0) {
2102 retval = error;
2103 }
2104 } else if (cbp_head) {
2105 panic("%s(): cbp_head is not NULL.", __FUNCTION__);
2106 }
2107
2108 if (real_bp) {
2109 /*
2110 * can get here if we either encountered an error
2111 * or we completely zero-filled the request and
2112 * no I/O was issued
2113 */
2114 if (error) {
2115 real_bp->b_flags |= B_ERROR;
2116 real_bp->b_error = error;
2117 }
2118 buf_biodone(real_bp);
2119 }
2120 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, (int)f_offset, size, upl_offset, retval, 0);
2121
2122 return retval;
2123 }
2124
2125 #define reset_vector_run_state() \
2126 issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0;
2127
2128 static int
vector_cluster_io(vnode_t vp,upl_t vector_upl,vm_offset_t vector_upl_offset,off_t v_upl_uio_offset,int vector_upl_iosize,int io_flag,buf_t real_bp,struct clios * iostate,int (* callback)(buf_t,void *),void * callback_arg)2129 vector_cluster_io(vnode_t vp, upl_t vector_upl, vm_offset_t vector_upl_offset, off_t v_upl_uio_offset, int vector_upl_iosize,
2130 int io_flag, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
2131 {
2132 vector_upl_set_pagelist(vector_upl);
2133
2134 if (io_flag & CL_READ) {
2135 if (vector_upl_offset == 0 && ((vector_upl_iosize & PAGE_MASK) == 0)) {
2136 io_flag &= ~CL_PRESERVE; /*don't zero fill*/
2137 } else {
2138 io_flag |= CL_PRESERVE; /*zero fill*/
2139 }
2140 }
2141 return cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg);
2142 }
2143
2144 static int
cluster_read_prefetch(vnode_t vp,off_t f_offset,u_int size,off_t filesize,int (* callback)(buf_t,void *),void * callback_arg,int bflag)2145 cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
2146 {
2147 int pages_in_prefetch;
2148
2149 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
2150 (int)f_offset, size, (int)filesize, 0, 0);
2151
2152 if (f_offset >= filesize) {
2153 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
2154 (int)f_offset, 0, 0, 0, 0);
2155 return 0;
2156 }
2157 if ((off_t)size > (filesize - f_offset)) {
2158 size = (u_int)(filesize - f_offset);
2159 }
2160 pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
2161
2162 advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag);
2163
2164 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
2165 (int)f_offset + size, pages_in_prefetch, 0, 1, 0);
2166
2167 return pages_in_prefetch;
2168 }
2169
2170
2171
2172 static void
cluster_read_ahead(vnode_t vp,struct cl_extent * extent,off_t filesize,struct cl_readahead * rap,int (* callback)(buf_t,void *),void * callback_arg,int bflag)2173 cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg,
2174 int bflag)
2175 {
2176 daddr64_t r_addr;
2177 off_t f_offset;
2178 int size_of_prefetch;
2179 u_int max_prefetch;
2180
2181
2182 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
2183 (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0);
2184
2185 if (extent->b_addr == rap->cl_lastr && extent->b_addr == extent->e_addr) {
2186 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2187 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0);
2188 return;
2189 }
2190 if (rap->cl_lastr == -1 || (extent->b_addr != rap->cl_lastr && extent->b_addr != (rap->cl_lastr + 1))) {
2191 rap->cl_ralen = 0;
2192 rap->cl_maxra = 0;
2193
2194 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2195 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0);
2196
2197 return;
2198 }
2199
2200 max_prefetch = cluster_max_prefetch(vp,
2201 cluster_max_io_size(vp->v_mount, CL_READ), speculative_prefetch_max);
2202
2203 if (max_prefetch <= PAGE_SIZE) {
2204 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2205 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 6, 0);
2206 return;
2207 }
2208 if (extent->e_addr < rap->cl_maxra && rap->cl_ralen >= 4) {
2209 if ((rap->cl_maxra - extent->e_addr) > (rap->cl_ralen / 4)) {
2210 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2211 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0);
2212 return;
2213 }
2214 }
2215 r_addr = MAX(extent->e_addr, rap->cl_maxra) + 1;
2216 f_offset = (off_t)(r_addr * PAGE_SIZE_64);
2217
2218 size_of_prefetch = 0;
2219
2220 ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
2221
2222 if (size_of_prefetch) {
2223 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2224 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0);
2225 return;
2226 }
2227 if (f_offset < filesize) {
2228 daddr64_t read_size;
2229
2230 rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1;
2231
2232 read_size = (extent->e_addr + 1) - extent->b_addr;
2233
2234 if (read_size > rap->cl_ralen) {
2235 if (read_size > max_prefetch / PAGE_SIZE) {
2236 rap->cl_ralen = max_prefetch / PAGE_SIZE;
2237 } else {
2238 rap->cl_ralen = (int)read_size;
2239 }
2240 }
2241 size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag);
2242
2243 if (size_of_prefetch) {
2244 rap->cl_maxra = (r_addr + size_of_prefetch) - 1;
2245 }
2246 }
2247 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2248 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0);
2249 }
2250
2251
2252 int
cluster_pageout(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags)2253 cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2254 int size, off_t filesize, int flags)
2255 {
2256 return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
2257 }
2258
2259
2260 int
cluster_pageout_ext(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)2261 cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2262 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2263 {
2264 int io_size;
2265 int rounded_size;
2266 off_t max_size;
2267 int local_flags;
2268
2269 local_flags = CL_PAGEOUT | CL_THROTTLE;
2270
2271 if ((flags & UPL_IOSYNC) == 0) {
2272 local_flags |= CL_ASYNC;
2273 }
2274 if ((flags & UPL_NOCOMMIT) == 0) {
2275 local_flags |= CL_COMMIT;
2276 }
2277 if ((flags & UPL_KEEPCACHED)) {
2278 local_flags |= CL_KEEPCACHED;
2279 }
2280 if (flags & UPL_PAGING_ENCRYPTED) {
2281 local_flags |= CL_ENCRYPTED;
2282 }
2283
2284
2285 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
2286 (int)f_offset, size, (int)filesize, local_flags, 0);
2287
2288 /*
2289 * If they didn't specify any I/O, then we are done...
2290 * we can't issue an abort because we don't know how
2291 * big the upl really is
2292 */
2293 if (size <= 0) {
2294 return EINVAL;
2295 }
2296
2297 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
2298 if (local_flags & CL_COMMIT) {
2299 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
2300 }
2301 return EROFS;
2302 }
2303 /*
2304 * can't page-in from a negative offset
2305 * or if we're starting beyond the EOF
2306 * or if the file offset isn't page aligned
2307 * or the size requested isn't a multiple of PAGE_SIZE
2308 */
2309 if (f_offset < 0 || f_offset >= filesize ||
2310 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) {
2311 if (local_flags & CL_COMMIT) {
2312 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
2313 }
2314 return EINVAL;
2315 }
2316 max_size = filesize - f_offset;
2317
2318 if (size < max_size) {
2319 io_size = size;
2320 } else {
2321 io_size = (int)max_size;
2322 }
2323
2324 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2325
2326 if (size > rounded_size) {
2327 if (local_flags & CL_COMMIT) {
2328 ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
2329 UPL_ABORT_FREE_ON_EMPTY);
2330 }
2331 }
2332 return cluster_io(vp, upl, upl_offset, f_offset, io_size,
2333 local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2334 }
2335
2336
2337 int
cluster_pagein(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags)2338 cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2339 int size, off_t filesize, int flags)
2340 {
2341 return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
2342 }
2343
2344
2345 int
cluster_pagein_ext(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)2346 cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2347 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2348 {
2349 u_int io_size;
2350 int rounded_size;
2351 off_t max_size;
2352 int retval;
2353 int local_flags = 0;
2354
2355 if (upl == NULL || size < 0) {
2356 panic("cluster_pagein: NULL upl passed in");
2357 }
2358
2359 if ((flags & UPL_IOSYNC) == 0) {
2360 local_flags |= CL_ASYNC;
2361 }
2362 if ((flags & UPL_NOCOMMIT) == 0) {
2363 local_flags |= CL_COMMIT;
2364 }
2365 if (flags & UPL_IOSTREAMING) {
2366 local_flags |= CL_IOSTREAMING;
2367 }
2368 if (flags & UPL_PAGING_ENCRYPTED) {
2369 local_flags |= CL_ENCRYPTED;
2370 }
2371
2372
2373 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
2374 (int)f_offset, size, (int)filesize, local_flags, 0);
2375
2376 /*
2377 * can't page-in from a negative offset
2378 * or if we're starting beyond the EOF
2379 * or if the file offset isn't page aligned
2380 * or the size requested isn't a multiple of PAGE_SIZE
2381 */
2382 if (f_offset < 0 || f_offset >= filesize ||
2383 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) {
2384 if (local_flags & CL_COMMIT) {
2385 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2386 }
2387
2388 if (f_offset >= filesize) {
2389 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CLUSTER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CL_PGIN_PAST_EOF), 0 /* arg */);
2390 }
2391
2392 return EINVAL;
2393 }
2394 max_size = filesize - f_offset;
2395
2396 if (size < max_size) {
2397 io_size = size;
2398 } else {
2399 io_size = (int)max_size;
2400 }
2401
2402 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2403
2404 if (size > rounded_size && (local_flags & CL_COMMIT)) {
2405 ubc_upl_abort_range(upl, upl_offset + rounded_size,
2406 size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2407 }
2408
2409 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
2410 local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2411
2412 return retval;
2413 }
2414
2415
2416 int
cluster_bp(buf_t bp)2417 cluster_bp(buf_t bp)
2418 {
2419 return cluster_bp_ext(bp, NULL, NULL);
2420 }
2421
2422
2423 int
cluster_bp_ext(buf_t bp,int (* callback)(buf_t,void *),void * callback_arg)2424 cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg)
2425 {
2426 off_t f_offset;
2427 int flags;
2428
2429 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
2430 bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
2431
2432 if (bp->b_flags & B_READ) {
2433 flags = CL_ASYNC | CL_READ;
2434 } else {
2435 flags = CL_ASYNC;
2436 }
2437 if (bp->b_flags & B_PASSIVE) {
2438 flags |= CL_PASSIVE;
2439 }
2440
2441 f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
2442
2443 return cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg);
2444 }
2445
2446
2447
2448 int
cluster_write(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int xflags)2449 cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags)
2450 {
2451 return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL);
2452 }
2453
2454
2455 int
cluster_write_ext(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int xflags,int (* callback)(buf_t,void *),void * callback_arg)2456 cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff,
2457 int xflags, int (*callback)(buf_t, void *), void *callback_arg)
2458 {
2459 user_ssize_t cur_resid;
2460 int retval = 0;
2461 int flags;
2462 int zflags;
2463 int bflag;
2464 int write_type = IO_COPY;
2465 u_int32_t write_length;
2466 uint32_t min_direct_size = MIN_DIRECT_WRITE_SIZE;
2467
2468 flags = xflags;
2469
2470 if (flags & IO_PASSIVE) {
2471 bflag = CL_PASSIVE;
2472 } else {
2473 bflag = 0;
2474 }
2475
2476 if (vp->v_flag & VNOCACHE_DATA) {
2477 flags |= IO_NOCACHE;
2478 bflag |= CL_NOCACHE;
2479 }
2480 if (uio == NULL) {
2481 /*
2482 * no user data...
2483 * this call is being made to zero-fill some range in the file
2484 */
2485 retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
2486
2487 return retval;
2488 }
2489 /*
2490 * do a write through the cache if one of the following is true....
2491 * NOCACHE is not true or NODIRECT is true
2492 * the uio request doesn't target USERSPACE
2493 * otherwise, find out if we want the direct or contig variant for
2494 * the first vector in the uio request
2495 */
2496 if (((flags & (IO_NOCACHE | IO_NODIRECT)) == IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
2497 if (proc_allow_nocache_write_fs_blksize(current_proc())) {
2498 uint32_t fs_bsize = vp->v_mount->mnt_vfsstat.f_bsize;
2499
2500 if (fs_bsize && (fs_bsize < PAGE_SIZE_64) &&
2501 ((fs_bsize & (fs_bsize - 1)) == 0)) {
2502 min_direct_size = fs_bsize;
2503 }
2504 }
2505 retval = cluster_io_type(uio, &write_type, &write_length, min_direct_size);
2506 }
2507
2508 if ((flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT) {
2509 /*
2510 * must go through the cached variant in this case
2511 */
2512 write_type = IO_COPY;
2513 }
2514
2515 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < newEOF && retval == 0) {
2516 switch (write_type) {
2517 case IO_COPY:
2518 /*
2519 * make sure the uio_resid isn't too big...
2520 * internally, we want to handle all of the I/O in
2521 * chunk sizes that fit in a 32 bit int
2522 */
2523 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
2524 /*
2525 * we're going to have to call cluster_write_copy
2526 * more than once...
2527 *
2528 * only want the last call to cluster_write_copy to
2529 * have the IO_TAILZEROFILL flag set and only the
2530 * first call should have IO_HEADZEROFILL
2531 */
2532 zflags = flags & ~IO_TAILZEROFILL;
2533 flags &= ~IO_HEADZEROFILL;
2534
2535 write_length = MAX_IO_REQUEST_SIZE;
2536 } else {
2537 /*
2538 * last call to cluster_write_copy
2539 */
2540 zflags = flags;
2541
2542 write_length = (u_int32_t)cur_resid;
2543 }
2544 retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg);
2545 break;
2546
2547 case IO_CONTIG:
2548 zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL);
2549
2550 if (flags & IO_HEADZEROFILL) {
2551 /*
2552 * only do this once per request
2553 */
2554 flags &= ~IO_HEADZEROFILL;
2555
2556 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset,
2557 headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
2558 if (retval) {
2559 break;
2560 }
2561 }
2562 retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag);
2563
2564 if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) {
2565 /*
2566 * we're done with the data from the user specified buffer(s)
2567 * and we've been requested to zero fill at the tail
2568 * treat this as an IO_HEADZEROFILL which doesn't require a uio
2569 * by rearranging the args and passing in IO_HEADZEROFILL
2570 */
2571
2572 /*
2573 * Update the oldEOF to reflect the current EOF. If the UPL page
2574 * to zero-fill is not valid (when F_NOCACHE is set), the
2575 * cluster_write_copy() will perform RMW on the UPL page when
2576 * the oldEOF is not aligned on page boundary due to unaligned
2577 * write.
2578 */
2579 if (uio->uio_offset > oldEOF) {
2580 oldEOF = uio->uio_offset;
2581 }
2582 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)oldEOF, tailOff, uio->uio_offset,
2583 (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
2584 }
2585 break;
2586
2587 case IO_DIRECT:
2588 /*
2589 * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
2590 */
2591 if (min_direct_size >= MIN_DIRECT_WRITE_SIZE) {
2592 retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg);
2593 } else {
2594 retval = cluster_write_direct_small(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg, min_direct_size);
2595 }
2596
2597 break;
2598
2599 case IO_UNKNOWN:
2600 retval = cluster_io_type(uio, &write_type, &write_length, min_direct_size);
2601 break;
2602 }
2603 /*
2604 * in case we end up calling cluster_write_copy (from cluster_write_direct)
2605 * multiple times to service a multi-vector request that is not aligned properly
2606 * we need to update the oldEOF so that we
2607 * don't zero-fill the head of a page if we've successfully written
2608 * data to that area... 'cluster_write_copy' will zero-fill the head of a
2609 * page that is beyond the oldEOF if the write is unaligned... we only
2610 * want that to happen for the very first page of the cluster_write,
2611 * NOT the first page of each vector making up a multi-vector write.
2612 */
2613 if (uio->uio_offset > oldEOF) {
2614 oldEOF = uio->uio_offset;
2615 }
2616 }
2617 return retval;
2618 }
2619
2620
2621 static int
cluster_write_direct(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,int * write_type,u_int32_t * write_length,int flags,int (* callback)(buf_t,void *),void * callback_arg)2622 cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
2623 int flags, int (*callback)(buf_t, void *), void *callback_arg)
2624 {
2625 upl_t upl = NULL;
2626 upl_page_info_t *pl;
2627 vm_offset_t upl_offset;
2628 vm_offset_t vector_upl_offset = 0;
2629 u_int32_t io_req_size;
2630 u_int32_t offset_in_file;
2631 u_int32_t offset_in_iovbase;
2632 u_int32_t io_size;
2633 int io_flag = 0;
2634 upl_size_t upl_size = 0, vector_upl_size = 0;
2635 vm_size_t upl_needed_size;
2636 mach_msg_type_number_t pages_in_pl = 0;
2637 upl_control_flags_t upl_flags;
2638 kern_return_t kret = KERN_SUCCESS;
2639 mach_msg_type_number_t i = 0;
2640 int force_data_sync;
2641 int retval = 0;
2642 int first_IO = 1;
2643 struct clios iostate;
2644 user_addr_t iov_base;
2645 u_int32_t mem_alignment_mask;
2646 u_int32_t devblocksize;
2647 u_int32_t max_io_size;
2648 u_int32_t max_upl_size;
2649 u_int32_t max_vector_size;
2650 u_int32_t bytes_outstanding_limit;
2651 boolean_t io_throttled = FALSE;
2652
2653 u_int32_t vector_upl_iosize = 0;
2654 int issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1);
2655 off_t v_upl_uio_offset = 0;
2656 int vector_upl_index = 0;
2657 upl_t vector_upl = NULL;
2658
2659
2660 /*
2661 * When we enter this routine, we know
2662 * -- the resid will not exceed iov_len
2663 */
2664 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
2665 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
2666
2667 assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT);
2668
2669 max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
2670
2671 io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO;
2672
2673 if (flags & IO_PASSIVE) {
2674 io_flag |= CL_PASSIVE;
2675 }
2676
2677 if (flags & IO_NOCACHE) {
2678 io_flag |= CL_NOCACHE;
2679 }
2680
2681 if (flags & IO_SKIP_ENCRYPTION) {
2682 io_flag |= CL_ENCRYPTED;
2683 }
2684
2685 iostate.io_completed = 0;
2686 iostate.io_issued = 0;
2687 iostate.io_error = 0;
2688 iostate.io_wanted = 0;
2689
2690 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
2691
2692 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
2693 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2694
2695 if (devblocksize == 1) {
2696 /*
2697 * the AFP client advertises a devblocksize of 1
2698 * however, its BLOCKMAP routine maps to physical
2699 * blocks that are PAGE_SIZE in size...
2700 * therefore we can't ask for I/Os that aren't page aligned
2701 * or aren't multiples of PAGE_SIZE in size
2702 * by setting devblocksize to PAGE_SIZE, we re-instate
2703 * the old behavior we had before the mem_alignment_mask
2704 * changes went in...
2705 */
2706 devblocksize = PAGE_SIZE;
2707 }
2708
2709 next_dwrite:
2710 io_req_size = *write_length;
2711 iov_base = uio_curriovbase(uio);
2712
2713 offset_in_file = (u_int32_t)uio->uio_offset & PAGE_MASK;
2714 offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
2715
2716 if (offset_in_file || offset_in_iovbase) {
2717 /*
2718 * one of the 2 important offsets is misaligned
2719 * so fire an I/O through the cache for this entire vector
2720 */
2721 goto wait_for_dwrites;
2722 }
2723 if (iov_base & (devblocksize - 1)) {
2724 /*
2725 * the offset in memory must be on a device block boundary
2726 * so that we can guarantee that we can generate an
2727 * I/O that ends on a page boundary in cluster_io
2728 */
2729 goto wait_for_dwrites;
2730 }
2731
2732 task_update_logical_writes(current_task(), (io_req_size & ~PAGE_MASK), TASK_WRITE_IMMEDIATE, vp);
2733 while (io_req_size >= PAGE_SIZE && uio->uio_offset < newEOF && retval == 0) {
2734 int throttle_type;
2735
2736 if ((throttle_type = cluster_is_throttled(vp))) {
2737 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
2738
2739 /*
2740 * we're in the throttle window, at the very least
2741 * we want to limit the size of the I/O we're about
2742 * to issue
2743 */
2744 if ((flags & IO_RETURN_ON_THROTTLE) && throttle_type == THROTTLE_NOW) {
2745 /*
2746 * we're in the throttle window and at least 1 I/O
2747 * has already been issued by a throttleable thread
2748 * in this window, so return with EAGAIN to indicate
2749 * to the FS issuing the cluster_write call that it
2750 * should now throttle after dropping any locks
2751 */
2752 throttle_info_update_by_mount(vp->v_mount);
2753
2754 io_throttled = TRUE;
2755 goto wait_for_dwrites;
2756 }
2757 max_vector_size = max_throttle_size;
2758 max_io_size = max_throttle_size;
2759 } else {
2760 max_vector_size = MAX_VECTOR_UPL_SIZE;
2761 max_io_size = max_upl_size;
2762 }
2763
2764 if (first_IO) {
2765 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
2766 first_IO = 0;
2767 }
2768 io_size = io_req_size & ~PAGE_MASK;
2769 iov_base = uio_curriovbase(uio);
2770
2771 if (io_size > max_io_size) {
2772 io_size = max_io_size;
2773 }
2774
2775 if (useVectorUPL && (iov_base & PAGE_MASK)) {
2776 /*
2777 * We have an iov_base that's not page-aligned.
2778 * Issue all I/O's that have been collected within
2779 * this Vectored UPL.
2780 */
2781 if (vector_upl_index) {
2782 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2783 reset_vector_run_state();
2784 }
2785
2786 /*
2787 * After this point, if we are using the Vector UPL path and the base is
2788 * not page-aligned then the UPL with that base will be the first in the vector UPL.
2789 */
2790 }
2791
2792 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
2793 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2794
2795 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
2796 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
2797
2798 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
2799 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
2800 pages_in_pl = 0;
2801 upl_size = (upl_size_t)upl_needed_size;
2802 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
2803 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
2804
2805 kret = vm_map_get_upl(map,
2806 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
2807 &upl_size,
2808 &upl,
2809 NULL,
2810 &pages_in_pl,
2811 &upl_flags,
2812 VM_KERN_MEMORY_FILE,
2813 force_data_sync);
2814
2815 if (kret != KERN_SUCCESS) {
2816 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2817 0, 0, 0, kret, 0);
2818 /*
2819 * failed to get pagelist
2820 *
2821 * we may have already spun some portion of this request
2822 * off as async requests... we need to wait for the I/O
2823 * to complete before returning
2824 */
2825 goto wait_for_dwrites;
2826 }
2827 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2828 pages_in_pl = upl_size / PAGE_SIZE;
2829
2830 for (i = 0; i < pages_in_pl; i++) {
2831 if (!upl_valid_page(pl, i)) {
2832 break;
2833 }
2834 }
2835 if (i == pages_in_pl) {
2836 break;
2837 }
2838
2839 /*
2840 * didn't get all the pages back that we
2841 * needed... release this upl and try again
2842 */
2843 ubc_upl_abort(upl, 0);
2844 }
2845 if (force_data_sync >= 3) {
2846 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2847 i, pages_in_pl, upl_size, kret, 0);
2848 /*
2849 * for some reason, we couldn't acquire a hold on all
2850 * the pages needed in the user's address space
2851 *
2852 * we may have already spun some portion of this request
2853 * off as async requests... we need to wait for the I/O
2854 * to complete before returning
2855 */
2856 goto wait_for_dwrites;
2857 }
2858
2859 /*
2860 * Consider the possibility that upl_size wasn't satisfied.
2861 */
2862 if (upl_size < upl_needed_size) {
2863 if (upl_size && upl_offset == 0) {
2864 io_size = upl_size;
2865 } else {
2866 io_size = 0;
2867 }
2868 }
2869 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2870 (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
2871
2872 if (io_size == 0) {
2873 ubc_upl_abort(upl, 0);
2874 /*
2875 * we may have already spun some portion of this request
2876 * off as async requests... we need to wait for the I/O
2877 * to complete before returning
2878 */
2879 goto wait_for_dwrites;
2880 }
2881
2882 if (useVectorUPL) {
2883 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
2884 if (end_off) {
2885 issueVectorUPL = 1;
2886 }
2887 /*
2888 * After this point, if we are using a vector UPL, then
2889 * either all the UPL elements end on a page boundary OR
2890 * this UPL is the last element because it does not end
2891 * on a page boundary.
2892 */
2893 }
2894
2895 /*
2896 * we want push out these writes asynchronously so that we can overlap
2897 * the preparation of the next I/O
2898 * if there are already too many outstanding writes
2899 * wait until some complete before issuing the next
2900 */
2901 if (vp->v_mount->mnt_minsaturationbytecount) {
2902 bytes_outstanding_limit = vp->v_mount->mnt_minsaturationbytecount;
2903 } else {
2904 if (__improbable(os_mul_overflow(max_upl_size, IO_SCALE(vp, 2),
2905 &bytes_outstanding_limit) ||
2906 (bytes_outstanding_limit > overlapping_write_max))) {
2907 bytes_outstanding_limit = overlapping_write_max;
2908 }
2909 }
2910
2911 cluster_iostate_wait(&iostate, bytes_outstanding_limit, "cluster_write_direct");
2912
2913 if (iostate.io_error) {
2914 /*
2915 * one of the earlier writes we issued ran into a hard error
2916 * don't issue any more writes, cleanup the UPL
2917 * that was just created but not used, then
2918 * go wait for all writes that are part of this stream
2919 * to complete before returning the error to the caller
2920 */
2921 ubc_upl_abort(upl, 0);
2922
2923 goto wait_for_dwrites;
2924 }
2925
2926 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
2927 (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
2928
2929 if (!useVectorUPL) {
2930 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
2931 io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2932 } else {
2933 if (!vector_upl_index) {
2934 vector_upl = vector_upl_create(upl_offset, uio->uio_iovcnt);
2935 v_upl_uio_offset = uio->uio_offset;
2936 vector_upl_offset = upl_offset;
2937 }
2938
2939 vector_upl_set_subupl(vector_upl, upl, upl_size);
2940 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
2941 vector_upl_index++;
2942 vector_upl_iosize += io_size;
2943 vector_upl_size += upl_size;
2944
2945 if (issueVectorUPL || vector_upl_index == vector_upl_max_upls(vector_upl) || vector_upl_size >= max_vector_size) {
2946 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2947 reset_vector_run_state();
2948 }
2949 }
2950
2951 /*
2952 * update the uio structure to
2953 * reflect the I/O that we just issued
2954 */
2955 uio_update(uio, (user_size_t)io_size);
2956
2957 /*
2958 * in case we end up calling through to cluster_write_copy to finish
2959 * the tail of this request, we need to update the oldEOF so that we
2960 * don't zero-fill the head of a page if we've successfully written
2961 * data to that area... 'cluster_write_copy' will zero-fill the head of a
2962 * page that is beyond the oldEOF if the write is unaligned... we only
2963 * want that to happen for the very first page of the cluster_write,
2964 * NOT the first page of each vector making up a multi-vector write.
2965 */
2966 if (uio->uio_offset > oldEOF) {
2967 oldEOF = uio->uio_offset;
2968 }
2969
2970 io_req_size -= io_size;
2971
2972 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
2973 (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0);
2974 } /* end while */
2975
2976 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) {
2977 retval = cluster_io_type(uio, write_type, write_length, MIN_DIRECT_WRITE_SIZE);
2978
2979 if (retval == 0 && *write_type == IO_DIRECT) {
2980 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE,
2981 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
2982
2983 goto next_dwrite;
2984 }
2985 }
2986
2987 wait_for_dwrites:
2988
2989 if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
2990 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2991 reset_vector_run_state();
2992 }
2993 /*
2994 * make sure all async writes issued as part of this stream
2995 * have completed before we return
2996 */
2997 cluster_iostate_wait(&iostate, 0, "cluster_write_direct");
2998
2999 if (iostate.io_error) {
3000 retval = iostate.io_error;
3001 }
3002
3003 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
3004
3005 if (io_throttled == TRUE && retval == 0) {
3006 retval = EAGAIN;
3007 }
3008
3009 if (io_req_size && retval == 0) {
3010 /*
3011 * we couldn't handle the tail of this request in DIRECT mode
3012 * so fire it through the copy path
3013 *
3014 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
3015 * so we can just pass 0 in for the headOff and tailOff
3016 */
3017 if (uio->uio_offset > oldEOF) {
3018 oldEOF = uio->uio_offset;
3019 }
3020
3021 retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
3022
3023 *write_type = IO_UNKNOWN;
3024 }
3025 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
3026 (int)uio->uio_offset, io_req_size, retval, 4, 0);
3027
3028 return retval;
3029 }
3030
3031
3032 static int
cluster_write_direct_small(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,int * write_type,u_int32_t * write_length,int flags,int (* callback)(buf_t,void *),void * callback_arg,uint32_t min_io_size)3033 cluster_write_direct_small(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
3034 int flags, int (*callback)(buf_t, void *), void *callback_arg, uint32_t min_io_size)
3035 {
3036 upl_t upl = NULL;
3037 upl_page_info_t *pl;
3038 vm_offset_t upl_offset;
3039 vm_offset_t vector_upl_offset = 0;
3040 u_int32_t io_req_size;
3041 u_int32_t offset_in_file;
3042 u_int32_t offset_in_iovbase;
3043 u_int32_t io_size;
3044 int io_flag = 0;
3045 upl_size_t upl_size = 0, vector_upl_size = 0;
3046 vm_size_t upl_needed_size;
3047 mach_msg_type_number_t pages_in_pl = 0;
3048 upl_control_flags_t upl_flags;
3049 kern_return_t kret = KERN_SUCCESS;
3050 mach_msg_type_number_t i = 0;
3051 int force_data_sync;
3052 int retval = 0;
3053 int first_IO = 1;
3054 struct clios iostate;
3055 user_addr_t iov_base;
3056 u_int32_t mem_alignment_mask;
3057 u_int32_t devblocksize;
3058 u_int32_t max_io_size;
3059 u_int32_t max_upl_size;
3060 u_int32_t max_vector_size;
3061 u_int32_t bytes_outstanding_limit;
3062 boolean_t io_throttled = FALSE;
3063
3064 u_int32_t vector_upl_iosize = 0;
3065 int issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1);
3066 off_t v_upl_uio_offset = 0;
3067 int vector_upl_index = 0;
3068 upl_t vector_upl = NULL;
3069
3070 user_size_t iov_len;
3071 uint32_t io_align_mask;
3072
3073 /*
3074 * When we enter this routine, we know
3075 * -- the resid will not exceed iov_len
3076 */
3077 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
3078 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
3079
3080 assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT);
3081
3082 max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
3083
3084 io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO;
3085
3086 if (flags & IO_PASSIVE) {
3087 io_flag |= CL_PASSIVE;
3088 }
3089
3090 if (flags & IO_NOCACHE) {
3091 io_flag |= CL_NOCACHE;
3092 }
3093
3094 if (flags & IO_SKIP_ENCRYPTION) {
3095 io_flag |= CL_ENCRYPTED;
3096 }
3097
3098 iostate.io_completed = 0;
3099 iostate.io_issued = 0;
3100 iostate.io_error = 0;
3101 iostate.io_wanted = 0;
3102
3103 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
3104
3105 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
3106 if (devblocksize == 1) {
3107 /*
3108 * the AFP client advertises a devblocksize of 1
3109 * however, its BLOCKMAP routine maps to physical
3110 * blocks that are PAGE_SIZE in size...
3111 * therefore we can't ask for I/Os that aren't page aligned
3112 * or aren't multiples of PAGE_SIZE in size
3113 * by setting devblocksize to PAGE_SIZE, we re-instate
3114 * the old behavior we had before the mem_alignment_mask
3115 * changes went in...
3116 */
3117 devblocksize = PAGE_SIZE;
3118 }
3119
3120 io_align_mask = PAGE_MASK;
3121 mem_alignment_mask = PAGE_MASK;
3122 if (min_io_size < MIN_DIRECT_WRITE_SIZE) {
3123 /* The process has opted into fs blocksize direct io writes */
3124 assert((min_io_size & (min_io_size - 1)) == 0);
3125 io_align_mask = min_io_size - 1;
3126 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
3127 }
3128 if ((devblocksize - 1) > mem_alignment_mask) {
3129 /*
3130 * the offset in memory must be on a device block boundary
3131 * so that we can guarantee that we can generate an
3132 * I/O that ends on a page boundary in cluster_io
3133 */
3134 mem_alignment_mask = devblocksize - 1;
3135 }
3136
3137 next_dwrite:
3138 io_req_size = *write_length;
3139
3140 while ((io_req_size >= PAGE_SIZE || io_req_size >= min_io_size) && uio->uio_offset < newEOF && retval == 0) {
3141 int throttle_type;
3142
3143 iov_base = uio_curriovbase(uio);
3144 iov_len = uio_curriovlen(uio);
3145 if (iov_len > io_req_size) {
3146 iov_len = io_req_size;
3147 }
3148 io_size = iov_len;
3149
3150 offset_in_file = (u_int32_t)(uio->uio_offset & io_align_mask);
3151 offset_in_iovbase = (u_int32_t)(iov_base & mem_alignment_mask);
3152
3153 if (offset_in_file || offset_in_iovbase) {
3154 /*
3155 * one of the 2 important offsets is misaligned
3156 * so fire an I/O through the cache for this entire vector
3157 */
3158 if (min_io_size < MIN_DIRECT_WRITE_SIZE) {
3159 if (iov_len < io_req_size) {
3160 io_req_size = iov_len;
3161 }
3162 }
3163 goto wait_for_dwrites;
3164 }
3165
3166 if ((throttle_type = cluster_is_throttled(vp))) {
3167 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
3168
3169 /*
3170 * we're in the throttle window, at the very least
3171 * we want to limit the size of the I/O we're about
3172 * to issue
3173 */
3174 if ((flags & IO_RETURN_ON_THROTTLE) && throttle_type == THROTTLE_NOW) {
3175 /*
3176 * we're in the throttle window and at least 1 I/O
3177 * has already been issued by a throttleable thread
3178 * in this window, so return with EAGAIN to indicate
3179 * to the FS issuing the cluster_write call that it
3180 * should now throttle after dropping any locks
3181 */
3182 throttle_info_update_by_mount(vp->v_mount);
3183
3184 io_throttled = TRUE;
3185 goto wait_for_dwrites;
3186 }
3187 max_vector_size = max_throttle_size;
3188 max_io_size = max_throttle_size;
3189 } else {
3190 max_vector_size = MAX_VECTOR_UPL_SIZE;
3191 max_io_size = max_upl_size;
3192 }
3193
3194 if (first_IO) {
3195 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
3196 first_IO = 0;
3197 }
3198
3199 if (io_size > max_io_size) {
3200 io_size = max_io_size;
3201 }
3202 io_size = io_size & ~io_align_mask;
3203
3204 if (useVectorUPL && (iov_base & PAGE_MASK)) {
3205 /*
3206 * We have an iov_base that's not page-aligned.
3207 * Issue all I/O's that have been collected within
3208 * this Vectored UPL.
3209 */
3210 if (vector_upl_index) {
3211 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3212 reset_vector_run_state();
3213 }
3214
3215 /*
3216 * After this point, if we are using the Vector UPL path and the base is
3217 * not page-aligned then the UPL with that base will be the first in the vector UPL.
3218 */
3219 }
3220
3221 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
3222 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
3223
3224 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
3225 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
3226
3227 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
3228 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
3229 pages_in_pl = 0;
3230 upl_size = (upl_size_t)upl_needed_size;
3231 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
3232 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
3233
3234 kret = vm_map_get_upl(map,
3235 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
3236 &upl_size,
3237 &upl,
3238 NULL,
3239 &pages_in_pl,
3240 &upl_flags,
3241 VM_KERN_MEMORY_FILE,
3242 force_data_sync);
3243
3244 if (kret != KERN_SUCCESS) {
3245 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3246 0, 0, 0, kret, 0);
3247 /*
3248 * failed to get pagelist
3249 *
3250 * we may have already spun some portion of this request
3251 * off as async requests... we need to wait for the I/O
3252 * to complete before returning
3253 */
3254 goto wait_for_dwrites;
3255 }
3256 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
3257 pages_in_pl = upl_size / PAGE_SIZE;
3258
3259 for (i = 0; i < pages_in_pl; i++) {
3260 if (!upl_valid_page(pl, i)) {
3261 break;
3262 }
3263 }
3264 if (i == pages_in_pl) {
3265 break;
3266 }
3267
3268 /*
3269 * didn't get all the pages back that we
3270 * needed... release this upl and try again
3271 */
3272 ubc_upl_abort(upl, 0);
3273 }
3274 if (force_data_sync >= 3) {
3275 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3276 i, pages_in_pl, upl_size, kret, 0);
3277 /*
3278 * for some reason, we couldn't acquire a hold on all
3279 * the pages needed in the user's address space
3280 *
3281 * we may have already spun some portion of this request
3282 * off as async requests... we need to wait for the I/O
3283 * to complete before returning
3284 */
3285 if (io_req_size > upl_needed_size) {
3286 io_req_size = upl_needed_size;
3287 }
3288 goto wait_for_dwrites;
3289 }
3290
3291 /*
3292 * Consider the possibility that upl_size wasn't satisfied.
3293 */
3294 if (upl_size < upl_needed_size) {
3295 if (upl_size && upl_offset == 0) {
3296 io_size = upl_size;
3297 } else {
3298 io_size = 0;
3299 }
3300 }
3301 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3302 (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
3303
3304 if (io_size == 0) {
3305 ubc_upl_abort(upl, 0);
3306 /*
3307 * we may have already spun some portion of this request
3308 * off as async requests... we need to wait for the I/O
3309 * to complete before returning
3310 */
3311 io_req_size = upl_needed_size;
3312 goto wait_for_dwrites;
3313 }
3314
3315 if (useVectorUPL) {
3316 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
3317 if (end_off) {
3318 issueVectorUPL = 1;
3319 }
3320 /*
3321 * After this point, if we are using a vector UPL, then
3322 * either all the UPL elements end on a page boundary OR
3323 * this UPL is the last element because it does not end
3324 * on a page boundary.
3325 */
3326 }
3327
3328 /*
3329 * we want push out these writes asynchronously so that we can overlap
3330 * the preparation of the next I/O
3331 * if there are already too many outstanding writes
3332 * wait until some complete before issuing the next
3333 */
3334 if (vp->v_mount->mnt_minsaturationbytecount) {
3335 bytes_outstanding_limit = vp->v_mount->mnt_minsaturationbytecount;
3336 } else {
3337 if (__improbable(os_mul_overflow(max_upl_size, IO_SCALE(vp, 2),
3338 &bytes_outstanding_limit) ||
3339 (bytes_outstanding_limit > overlapping_write_max))) {
3340 bytes_outstanding_limit = overlapping_write_max;
3341 }
3342 }
3343
3344 cluster_iostate_wait(&iostate, bytes_outstanding_limit, "cluster_write_direct");
3345
3346 if (iostate.io_error) {
3347 /*
3348 * one of the earlier writes we issued ran into a hard error
3349 * don't issue any more writes, cleanup the UPL
3350 * that was just created but not used, then
3351 * go wait for all writes that are part of this stream
3352 * to complete before returning the error to the caller
3353 */
3354 ubc_upl_abort(upl, 0);
3355
3356 goto wait_for_dwrites;
3357 }
3358
3359 task_update_logical_writes(current_task(), io_size, TASK_WRITE_IMMEDIATE, vp);
3360
3361 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
3362 (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
3363
3364 if (!useVectorUPL) {
3365 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
3366 io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3367 } else {
3368 if (!vector_upl_index) {
3369 vector_upl = vector_upl_create(upl_offset, uio->uio_iovcnt);
3370 v_upl_uio_offset = uio->uio_offset;
3371 vector_upl_offset = upl_offset;
3372 }
3373
3374 vector_upl_set_subupl(vector_upl, upl, upl_size);
3375 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
3376 vector_upl_index++;
3377 vector_upl_iosize += io_size;
3378 vector_upl_size += upl_size;
3379
3380 if (issueVectorUPL || vector_upl_index == vector_upl_max_upls(vector_upl) || vector_upl_size >= max_vector_size) {
3381 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3382 reset_vector_run_state();
3383 }
3384 }
3385
3386 /*
3387 * update the uio structure to
3388 * reflect the I/O that we just issued
3389 */
3390 uio_update(uio, (user_size_t)io_size);
3391
3392 /*
3393 * in case we end up calling through to cluster_write_copy to finish
3394 * the tail of this request, we need to update the oldEOF so that we
3395 * don't zero-fill the head of a page if we've successfully written
3396 * data to that area... 'cluster_write_copy' will zero-fill the head of a
3397 * page that is beyond the oldEOF if the write is unaligned... we only
3398 * want that to happen for the very first page of the cluster_write,
3399 * NOT the first page of each vector making up a multi-vector write.
3400 */
3401 if (uio->uio_offset > oldEOF) {
3402 oldEOF = uio->uio_offset;
3403 }
3404
3405 io_req_size -= io_size;
3406
3407 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
3408 (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0);
3409 } /* end while */
3410
3411 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) {
3412 retval = cluster_io_type(uio, write_type, write_length, min_io_size);
3413
3414 if (retval == 0 && *write_type == IO_DIRECT) {
3415 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE,
3416 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
3417
3418 goto next_dwrite;
3419 }
3420 }
3421
3422 wait_for_dwrites:
3423
3424 if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
3425 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3426 reset_vector_run_state();
3427 }
3428 /*
3429 * make sure all async writes issued as part of this stream
3430 * have completed before we return
3431 */
3432 cluster_iostate_wait(&iostate, 0, "cluster_write_direct");
3433
3434 if (iostate.io_error) {
3435 retval = iostate.io_error;
3436 }
3437
3438 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
3439
3440 if (io_throttled == TRUE && retval == 0) {
3441 retval = EAGAIN;
3442 }
3443
3444 if (io_req_size && retval == 0) {
3445 /*
3446 * we couldn't handle the tail of this request in DIRECT mode
3447 * so fire it through the copy path
3448 *
3449 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
3450 * so we can just pass 0 in for the headOff and tailOff
3451 */
3452 if (uio->uio_offset > oldEOF) {
3453 oldEOF = uio->uio_offset;
3454 }
3455
3456 retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
3457
3458 *write_type = IO_UNKNOWN;
3459 }
3460 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
3461 (int)uio->uio_offset, io_req_size, retval, 4, 0);
3462
3463 return retval;
3464 }
3465
3466 static int
cluster_write_contig(vnode_t vp,struct uio * uio,off_t newEOF,int * write_type,u_int32_t * write_length,int (* callback)(buf_t,void *),void * callback_arg,int bflag)3467 cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length,
3468 int (*callback)(buf_t, void *), void *callback_arg, int bflag)
3469 {
3470 upl_page_info_t *pl;
3471 addr64_t src_paddr = 0;
3472 upl_t upl[MAX_VECTS];
3473 vm_offset_t upl_offset;
3474 u_int32_t tail_size = 0;
3475 u_int32_t io_size;
3476 u_int32_t xsize;
3477 upl_size_t upl_size;
3478 vm_size_t upl_needed_size;
3479 mach_msg_type_number_t pages_in_pl;
3480 upl_control_flags_t upl_flags;
3481 kern_return_t kret;
3482 struct clios iostate;
3483 int error = 0;
3484 int cur_upl = 0;
3485 int num_upl = 0;
3486 int n;
3487 user_addr_t iov_base;
3488 u_int32_t devblocksize;
3489 u_int32_t mem_alignment_mask;
3490
3491 /*
3492 * When we enter this routine, we know
3493 * -- the io_req_size will not exceed iov_len
3494 * -- the target address is physically contiguous
3495 */
3496 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
3497
3498 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
3499 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
3500
3501 iostate.io_completed = 0;
3502 iostate.io_issued = 0;
3503 iostate.io_error = 0;
3504 iostate.io_wanted = 0;
3505
3506 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
3507
3508 next_cwrite:
3509 io_size = *write_length;
3510
3511 iov_base = uio_curriovbase(uio);
3512
3513 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
3514 upl_needed_size = upl_offset + io_size;
3515
3516 pages_in_pl = 0;
3517 upl_size = (upl_size_t)upl_needed_size;
3518 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
3519 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
3520
3521 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
3522 kret = vm_map_get_upl(map,
3523 vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
3524 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0);
3525
3526 if (kret != KERN_SUCCESS) {
3527 /*
3528 * failed to get pagelist
3529 */
3530 error = EINVAL;
3531 goto wait_for_cwrites;
3532 }
3533 num_upl++;
3534
3535 /*
3536 * Consider the possibility that upl_size wasn't satisfied.
3537 */
3538 if (upl_size < upl_needed_size) {
3539 /*
3540 * This is a failure in the physical memory case.
3541 */
3542 error = EINVAL;
3543 goto wait_for_cwrites;
3544 }
3545 pl = ubc_upl_pageinfo(upl[cur_upl]);
3546
3547 src_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
3548
3549 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
3550 u_int32_t head_size;
3551
3552 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
3553
3554 if (head_size > io_size) {
3555 head_size = io_size;
3556 }
3557
3558 error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg);
3559
3560 if (error) {
3561 goto wait_for_cwrites;
3562 }
3563
3564 upl_offset += head_size;
3565 src_paddr += head_size;
3566 io_size -= head_size;
3567
3568 iov_base += head_size;
3569 }
3570 if ((u_int32_t)iov_base & mem_alignment_mask) {
3571 /*
3572 * request doesn't set up on a memory boundary
3573 * the underlying DMA engine can handle...
3574 * return an error instead of going through
3575 * the slow copy path since the intent of this
3576 * path is direct I/O from device memory
3577 */
3578 error = EINVAL;
3579 goto wait_for_cwrites;
3580 }
3581
3582 tail_size = io_size & (devblocksize - 1);
3583 io_size -= tail_size;
3584
3585 while (io_size && error == 0) {
3586 if (io_size > MAX_IO_CONTIG_SIZE) {
3587 xsize = MAX_IO_CONTIG_SIZE;
3588 } else {
3589 xsize = io_size;
3590 }
3591 /*
3592 * request asynchronously so that we can overlap
3593 * the preparation of the next I/O... we'll do
3594 * the commit after all the I/O has completed
3595 * since its all issued against the same UPL
3596 * if there are already too many outstanding writes
3597 * wait until some have completed before issuing the next
3598 */
3599 cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_write_contig");
3600
3601 if (iostate.io_error) {
3602 /*
3603 * one of the earlier writes we issued ran into a hard error
3604 * don't issue any more writes...
3605 * go wait for all writes that are part of this stream
3606 * to complete before returning the error to the caller
3607 */
3608 goto wait_for_cwrites;
3609 }
3610 /*
3611 * issue an asynchronous write to cluster_io
3612 */
3613 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
3614 xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg);
3615
3616 if (error == 0) {
3617 /*
3618 * The cluster_io write completed successfully,
3619 * update the uio structure
3620 */
3621 uio_update(uio, (user_size_t)xsize);
3622
3623 upl_offset += xsize;
3624 src_paddr += xsize;
3625 io_size -= xsize;
3626 }
3627 }
3628 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) {
3629 error = cluster_io_type(uio, write_type, write_length, 0);
3630
3631 if (error == 0 && *write_type == IO_CONTIG) {
3632 cur_upl++;
3633 goto next_cwrite;
3634 }
3635 } else {
3636 *write_type = IO_UNKNOWN;
3637 }
3638
3639 wait_for_cwrites:
3640 /*
3641 * make sure all async writes that are part of this stream
3642 * have completed before we proceed
3643 */
3644 cluster_iostate_wait(&iostate, 0, "cluster_write_contig");
3645
3646 if (iostate.io_error) {
3647 error = iostate.io_error;
3648 }
3649
3650 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
3651
3652 if (error == 0 && tail_size) {
3653 error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg);
3654 }
3655
3656 for (n = 0; n < num_upl; n++) {
3657 /*
3658 * just release our hold on each physically contiguous
3659 * region without changing any state
3660 */
3661 ubc_upl_abort(upl[n], 0);
3662 }
3663
3664 return error;
3665 }
3666
3667
3668 /*
3669 * need to avoid a race between an msync of a range of pages dirtied via mmap
3670 * vs a filesystem such as HFS deciding to write a 'hole' to disk via cluster_write's
3671 * zerofill mechanism before it has seen the VNOP_PAGEOUTs for the pages being msync'd
3672 *
3673 * we should never force-zero-fill pages that are already valid in the cache...
3674 * the entire page contains valid data (either from disk, zero-filled or dirtied
3675 * via an mmap) so we can only do damage by trying to zero-fill
3676 *
3677 */
3678 static int
cluster_zero_range(upl_t upl,upl_page_info_t * pl,int flags,int io_offset,off_t zero_off,off_t upl_f_offset,int bytes_to_zero)3679 cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off_t zero_off, off_t upl_f_offset, int bytes_to_zero)
3680 {
3681 int zero_pg_index;
3682 boolean_t need_cluster_zero = TRUE;
3683
3684 if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
3685 bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64));
3686 zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
3687
3688 if (upl_valid_page(pl, zero_pg_index)) {
3689 /*
3690 * never force zero valid pages - dirty or clean
3691 * we'll leave these in the UPL for cluster_write_copy to deal with
3692 */
3693 need_cluster_zero = FALSE;
3694 }
3695 }
3696 if (need_cluster_zero == TRUE) {
3697 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
3698 }
3699
3700 return bytes_to_zero;
3701 }
3702
3703
3704 void
cluster_update_state(vnode_t vp,vm_object_offset_t s_offset,vm_object_offset_t e_offset,boolean_t vm_initiated)3705 cluster_update_state(vnode_t vp, vm_object_offset_t s_offset, vm_object_offset_t e_offset, boolean_t vm_initiated)
3706 {
3707 struct cl_extent cl;
3708 boolean_t first_pass = TRUE;
3709
3710 assert(s_offset < e_offset);
3711 assert((s_offset & PAGE_MASK_64) == 0);
3712 assert((e_offset & PAGE_MASK_64) == 0);
3713
3714 cl.b_addr = (daddr64_t)(s_offset / PAGE_SIZE_64);
3715 cl.e_addr = (daddr64_t)(e_offset / PAGE_SIZE_64);
3716
3717 cluster_update_state_internal(vp, &cl, 0, TRUE, &first_pass, s_offset, (int)(e_offset - s_offset),
3718 vp->v_un.vu_ubcinfo->ui_size, NULL, NULL, vm_initiated);
3719 }
3720
3721
3722 static void
cluster_update_state_internal(vnode_t vp,struct cl_extent * cl,int flags,boolean_t defer_writes,boolean_t * first_pass,off_t write_off,int write_cnt,off_t newEOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)3723 cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes,
3724 boolean_t *first_pass, off_t write_off, int write_cnt, off_t newEOF,
3725 int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
3726 {
3727 struct cl_writebehind *wbp;
3728 int cl_index;
3729 int ret_cluster_try_push;
3730 u_int max_cluster_pgcount;
3731
3732
3733 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
3734
3735 /*
3736 * take the lock to protect our accesses
3737 * of the writebehind and sparse cluster state
3738 */
3739 wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
3740
3741 if (wbp->cl_scmap) {
3742 if (!(flags & IO_NOCACHE)) {
3743 /*
3744 * we've fallen into the sparse
3745 * cluster method of delaying dirty pages
3746 */
3747 sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated);
3748
3749 lck_mtx_unlock(&wbp->cl_lockw);
3750 return;
3751 }
3752 /*
3753 * must have done cached writes that fell into
3754 * the sparse cluster mechanism... we've switched
3755 * to uncached writes on the file, so go ahead
3756 * and push whatever's in the sparse map
3757 * and switch back to normal clustering
3758 */
3759 wbp->cl_number = 0;
3760
3761 sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, newEOF, PUSH_ALL, 0, callback, callback_arg, vm_initiated);
3762 /*
3763 * no clusters of either type present at this point
3764 * so just go directly to start_new_cluster since
3765 * we know we need to delay this I/O since we've
3766 * already released the pages back into the cache
3767 * to avoid the deadlock with sparse_cluster_push
3768 */
3769 goto start_new_cluster;
3770 }
3771 if (*first_pass == TRUE) {
3772 if (write_off == wbp->cl_last_write) {
3773 wbp->cl_seq_written += write_cnt;
3774 } else {
3775 wbp->cl_seq_written = write_cnt;
3776 }
3777
3778 wbp->cl_last_write = write_off + write_cnt;
3779
3780 *first_pass = FALSE;
3781 }
3782 if (wbp->cl_number == 0) {
3783 /*
3784 * no clusters currently present
3785 */
3786 goto start_new_cluster;
3787 }
3788
3789 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
3790 /*
3791 * check each cluster that we currently hold
3792 * try to merge some or all of this write into
3793 * one or more of the existing clusters... if
3794 * any portion of the write remains, start a
3795 * new cluster
3796 */
3797 if (cl->b_addr >= wbp->cl_clusters[cl_index].b_addr) {
3798 /*
3799 * the current write starts at or after the current cluster
3800 */
3801 if (cl->e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3802 /*
3803 * we have a write that fits entirely
3804 * within the existing cluster limits
3805 */
3806 if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) {
3807 /*
3808 * update our idea of where the cluster ends
3809 */
3810 wbp->cl_clusters[cl_index].e_addr = cl->e_addr;
3811 }
3812 break;
3813 }
3814 if (cl->b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3815 /*
3816 * we have a write that starts in the middle of the current cluster
3817 * but extends beyond the cluster's limit... we know this because
3818 * of the previous checks
3819 * we'll extend the current cluster to the max
3820 * and update the b_addr for the current write to reflect that
3821 * the head of it was absorbed into this cluster...
3822 * note that we'll always have a leftover tail in this case since
3823 * full absorbtion would have occurred in the clause above
3824 */
3825 wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount;
3826
3827 cl->b_addr = wbp->cl_clusters[cl_index].e_addr;
3828 }
3829 /*
3830 * we come here for the case where the current write starts
3831 * beyond the limit of the existing cluster or we have a leftover
3832 * tail after a partial absorbtion
3833 *
3834 * in either case, we'll check the remaining clusters before
3835 * starting a new one
3836 */
3837 } else {
3838 /*
3839 * the current write starts in front of the cluster we're currently considering
3840 */
3841 if ((wbp->cl_clusters[cl_index].e_addr - cl->b_addr) <= max_cluster_pgcount) {
3842 /*
3843 * we can just merge the new request into
3844 * this cluster and leave it in the cache
3845 * since the resulting cluster is still
3846 * less than the maximum allowable size
3847 */
3848 wbp->cl_clusters[cl_index].b_addr = cl->b_addr;
3849
3850 if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) {
3851 /*
3852 * the current write completely
3853 * envelops the existing cluster and since
3854 * each write is limited to at most max_cluster_pgcount pages
3855 * we can just use the start and last blocknos of the write
3856 * to generate the cluster limits
3857 */
3858 wbp->cl_clusters[cl_index].e_addr = cl->e_addr;
3859 }
3860 break;
3861 }
3862 /*
3863 * if we were to combine this write with the current cluster
3864 * we would exceed the cluster size limit.... so,
3865 * let's see if there's any overlap of the new I/O with
3866 * the cluster we're currently considering... in fact, we'll
3867 * stretch the cluster out to it's full limit and see if we
3868 * get an intersection with the current write
3869 *
3870 */
3871 if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) {
3872 /*
3873 * the current write extends into the proposed cluster
3874 * clip the length of the current write after first combining it's
3875 * tail with the newly shaped cluster
3876 */
3877 wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount;
3878
3879 cl->e_addr = wbp->cl_clusters[cl_index].b_addr;
3880 }
3881 /*
3882 * if we get here, there was no way to merge
3883 * any portion of this write with this cluster
3884 * or we could only merge part of it which
3885 * will leave a tail...
3886 * we'll check the remaining clusters before starting a new one
3887 */
3888 }
3889 }
3890 if (cl_index < wbp->cl_number) {
3891 /*
3892 * we found an existing cluster(s) that we
3893 * could entirely merge this I/O into
3894 */
3895 goto delay_io;
3896 }
3897
3898 if (defer_writes == FALSE &&
3899 wbp->cl_number == MAX_CLUSTERS &&
3900 wbp->cl_seq_written >= (MAX_CLUSTERS * (max_cluster_pgcount * PAGE_SIZE))) {
3901 uint32_t n;
3902
3903 if (vp->v_mount->mnt_minsaturationbytecount) {
3904 n = vp->v_mount->mnt_minsaturationbytecount / MAX_CLUSTER_SIZE(vp);
3905
3906 if (n > MAX_CLUSTERS) {
3907 n = MAX_CLUSTERS;
3908 }
3909 } else {
3910 n = 0;
3911 }
3912
3913 if (n == 0) {
3914 if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
3915 n = WRITE_BEHIND_SSD;
3916 } else {
3917 n = WRITE_BEHIND;
3918 }
3919 }
3920 while (n--) {
3921 cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg, NULL, vm_initiated);
3922 }
3923 }
3924 if (wbp->cl_number < MAX_CLUSTERS) {
3925 /*
3926 * we didn't find an existing cluster to
3927 * merge into, but there's room to start
3928 * a new one
3929 */
3930 goto start_new_cluster;
3931 }
3932 /*
3933 * no exisitng cluster to merge with and no
3934 * room to start a new one... we'll try
3935 * pushing one of the existing ones... if none of
3936 * them are able to be pushed, we'll switch
3937 * to the sparse cluster mechanism
3938 * cluster_try_push updates cl_number to the
3939 * number of remaining clusters... and
3940 * returns the number of currently unused clusters
3941 */
3942 ret_cluster_try_push = 0;
3943
3944 /*
3945 * if writes are not deferred, call cluster push immediately
3946 */
3947 if (defer_writes == FALSE) {
3948 ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg, NULL, vm_initiated);
3949 }
3950 /*
3951 * execute following regardless of writes being deferred or not
3952 */
3953 if (ret_cluster_try_push == 0) {
3954 /*
3955 * no more room in the normal cluster mechanism
3956 * so let's switch to the more expansive but expensive
3957 * sparse mechanism....
3958 */
3959 sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg, vm_initiated);
3960 sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated);
3961
3962 lck_mtx_unlock(&wbp->cl_lockw);
3963 return;
3964 }
3965 start_new_cluster:
3966 wbp->cl_clusters[wbp->cl_number].b_addr = cl->b_addr;
3967 wbp->cl_clusters[wbp->cl_number].e_addr = cl->e_addr;
3968
3969 wbp->cl_clusters[wbp->cl_number].io_flags = 0;
3970
3971 if (flags & IO_NOCACHE) {
3972 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE;
3973 }
3974
3975 if (flags & IO_PASSIVE) {
3976 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE;
3977 }
3978
3979 wbp->cl_number++;
3980 delay_io:
3981 lck_mtx_unlock(&wbp->cl_lockw);
3982 return;
3983 }
3984
3985
3986 static int
cluster_write_copy(vnode_t vp,struct uio * uio,u_int32_t io_req_size,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int flags,int (* callback)(buf_t,void *),void * callback_arg)3987 cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff,
3988 off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg)
3989 {
3990 upl_page_info_t *pl;
3991 upl_t upl;
3992 vm_offset_t upl_offset = 0;
3993 vm_size_t upl_size;
3994 off_t upl_f_offset;
3995 int pages_in_upl;
3996 int start_offset;
3997 int xfer_resid;
3998 int io_size;
3999 int io_offset;
4000 int bytes_to_zero;
4001 int bytes_to_move;
4002 kern_return_t kret;
4003 int retval = 0;
4004 int io_resid;
4005 long long total_size;
4006 long long zero_cnt;
4007 off_t zero_off;
4008 long long zero_cnt1;
4009 off_t zero_off1;
4010 off_t write_off = 0;
4011 int write_cnt = 0;
4012 boolean_t first_pass = FALSE;
4013 struct cl_extent cl;
4014 int bflag;
4015 u_int max_io_size;
4016
4017 if (uio) {
4018 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
4019 (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0);
4020
4021 io_resid = io_req_size;
4022 } else {
4023 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
4024 0, 0, (int)oldEOF, (int)newEOF, 0);
4025
4026 io_resid = 0;
4027 }
4028 if (flags & IO_PASSIVE) {
4029 bflag = CL_PASSIVE;
4030 } else {
4031 bflag = 0;
4032 }
4033 if (flags & IO_NOCACHE) {
4034 bflag |= CL_NOCACHE;
4035 }
4036
4037 if (flags & IO_SKIP_ENCRYPTION) {
4038 bflag |= CL_ENCRYPTED;
4039 }
4040
4041 zero_cnt = 0;
4042 zero_cnt1 = 0;
4043 zero_off = 0;
4044 zero_off1 = 0;
4045
4046 max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
4047
4048 if (flags & IO_HEADZEROFILL) {
4049 /*
4050 * some filesystems (HFS is one) don't support unallocated holes within a file...
4051 * so we zero fill the intervening space between the old EOF and the offset
4052 * where the next chunk of real data begins.... ftruncate will also use this
4053 * routine to zero fill to the new EOF when growing a file... in this case, the
4054 * uio structure will not be provided
4055 */
4056 if (uio) {
4057 if (headOff < uio->uio_offset) {
4058 zero_cnt = uio->uio_offset - headOff;
4059 zero_off = headOff;
4060 }
4061 } else if (headOff < newEOF) {
4062 zero_cnt = newEOF - headOff;
4063 zero_off = headOff;
4064 }
4065 } else {
4066 if (uio && uio->uio_offset > oldEOF) {
4067 zero_off = uio->uio_offset & ~PAGE_MASK_64;
4068
4069 if (zero_off >= oldEOF) {
4070 zero_cnt = uio->uio_offset - zero_off;
4071
4072 flags |= IO_HEADZEROFILL;
4073 }
4074 }
4075 }
4076 if (flags & IO_TAILZEROFILL) {
4077 if (uio) {
4078 zero_off1 = uio->uio_offset + io_req_size;
4079
4080 if (zero_off1 < tailOff) {
4081 zero_cnt1 = tailOff - zero_off1;
4082 }
4083 }
4084 } else {
4085 if (uio && newEOF > oldEOF) {
4086 zero_off1 = uio->uio_offset + io_req_size;
4087
4088 if (zero_off1 == newEOF && (zero_off1 & PAGE_MASK_64)) {
4089 zero_cnt1 = PAGE_SIZE_64 - (zero_off1 & PAGE_MASK_64);
4090
4091 flags |= IO_TAILZEROFILL;
4092 }
4093 }
4094 }
4095 if (zero_cnt == 0 && uio == (struct uio *) 0) {
4096 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
4097 retval, 0, 0, 0, 0);
4098 return 0;
4099 }
4100 if (uio) {
4101 write_off = uio->uio_offset;
4102 write_cnt = (int)uio_resid(uio);
4103 /*
4104 * delay updating the sequential write info
4105 * in the control block until we've obtained
4106 * the lock for it
4107 */
4108 first_pass = TRUE;
4109 }
4110 while ((total_size = (io_resid + zero_cnt + zero_cnt1)) && retval == 0) {
4111 /*
4112 * for this iteration of the loop, figure out where our starting point is
4113 */
4114 if (zero_cnt) {
4115 start_offset = (int)(zero_off & PAGE_MASK_64);
4116 upl_f_offset = zero_off - start_offset;
4117 } else if (io_resid) {
4118 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
4119 upl_f_offset = uio->uio_offset - start_offset;
4120 } else {
4121 start_offset = (int)(zero_off1 & PAGE_MASK_64);
4122 upl_f_offset = zero_off1 - start_offset;
4123 }
4124 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE,
4125 (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0);
4126
4127 if (total_size > max_io_size) {
4128 total_size = max_io_size;
4129 }
4130
4131 cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
4132
4133 if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) {
4134 /*
4135 * assumption... total_size <= io_resid
4136 * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
4137 */
4138 if ((start_offset + total_size) > max_io_size) {
4139 total_size = max_io_size - start_offset;
4140 }
4141 xfer_resid = (int)total_size;
4142
4143 retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1);
4144
4145 if (retval) {
4146 break;
4147 }
4148
4149 io_resid -= (total_size - xfer_resid);
4150 total_size = xfer_resid;
4151 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
4152 upl_f_offset = uio->uio_offset - start_offset;
4153
4154 if (total_size == 0) {
4155 if (start_offset) {
4156 /*
4157 * the write did not finish on a page boundary
4158 * which will leave upl_f_offset pointing to the
4159 * beginning of the last page written instead of
4160 * the page beyond it... bump it in this case
4161 * so that the cluster code records the last page
4162 * written as dirty
4163 */
4164 upl_f_offset += PAGE_SIZE_64;
4165 }
4166 upl_size = 0;
4167
4168 goto check_cluster;
4169 }
4170 }
4171 /*
4172 * compute the size of the upl needed to encompass
4173 * the requested write... limit each call to cluster_io
4174 * to the maximum UPL size... cluster_io will clip if
4175 * this exceeds the maximum io_size for the device,
4176 * make sure to account for
4177 * a starting offset that's not page aligned
4178 */
4179 upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4180
4181 if (upl_size > max_io_size) {
4182 upl_size = max_io_size;
4183 }
4184
4185 pages_in_upl = (int)(upl_size / PAGE_SIZE);
4186 io_size = (int)(upl_size - start_offset);
4187
4188 if ((long long)io_size > total_size) {
4189 io_size = (int)total_size;
4190 }
4191
4192 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
4193
4194
4195 /*
4196 * Gather the pages from the buffer cache.
4197 * The UPL_WILL_MODIFY flag lets the UPL subsystem know
4198 * that we intend to modify these pages.
4199 */
4200 kret = ubc_create_upl_kernel(vp,
4201 upl_f_offset,
4202 (int)upl_size,
4203 &upl,
4204 &pl,
4205 UPL_SET_LITE | ((uio != NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY),
4206 VM_KERN_MEMORY_FILE);
4207 if (kret != KERN_SUCCESS) {
4208 panic("cluster_write_copy: failed to get pagelist");
4209 }
4210
4211 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END,
4212 upl, (int)upl_f_offset, start_offset, 0, 0);
4213
4214 if (start_offset && upl_f_offset < oldEOF && !upl_valid_page(pl, 0)) {
4215 int read_size;
4216
4217 /*
4218 * we're starting in the middle of the first page of the upl
4219 * and the page isn't currently valid, so we're going to have
4220 * to read it in first... this is a synchronous operation
4221 */
4222 read_size = PAGE_SIZE;
4223
4224 if ((upl_f_offset + read_size) > oldEOF) {
4225 read_size = (int)(oldEOF - upl_f_offset);
4226 }
4227
4228 retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
4229 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
4230 if (retval) {
4231 /*
4232 * we had an error during the read which causes us to abort
4233 * the current cluster_write request... before we do, we need
4234 * to release the rest of the pages in the upl without modifying
4235 * there state and mark the failed page in error
4236 */
4237 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4238
4239 if (upl_size > PAGE_SIZE) {
4240 ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size,
4241 UPL_ABORT_FREE_ON_EMPTY);
4242 }
4243
4244 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4245 upl, 0, 0, retval, 0);
4246 break;
4247 }
4248 }
4249 if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
4250 /*
4251 * the last offset we're writing to in this upl does not end on a page
4252 * boundary... if it's not beyond the old EOF, then we'll also need to
4253 * pre-read this page in if it isn't already valid
4254 */
4255 upl_offset = upl_size - PAGE_SIZE;
4256
4257 if ((upl_f_offset + start_offset + io_size) < oldEOF &&
4258 !upl_valid_page(pl, (int)(upl_offset / PAGE_SIZE))) {
4259 int read_size;
4260
4261 read_size = PAGE_SIZE;
4262
4263 if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF) {
4264 read_size = (int)(oldEOF - (upl_f_offset + upl_offset));
4265 }
4266
4267 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
4268 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
4269 if (retval) {
4270 /*
4271 * we had an error during the read which causes us to abort
4272 * the current cluster_write request... before we do, we
4273 * need to release the rest of the pages in the upl without
4274 * modifying there state and mark the failed page in error
4275 */
4276 ubc_upl_abort_range(upl, (upl_offset_t)upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4277
4278 if (upl_size > PAGE_SIZE) {
4279 ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, UPL_ABORT_FREE_ON_EMPTY);
4280 }
4281
4282 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4283 upl, 0, 0, retval, 0);
4284 break;
4285 }
4286 }
4287 }
4288 xfer_resid = io_size;
4289 io_offset = start_offset;
4290
4291 while (zero_cnt && xfer_resid) {
4292 if (zero_cnt < (long long)xfer_resid) {
4293 bytes_to_zero = (int)zero_cnt;
4294 } else {
4295 bytes_to_zero = xfer_resid;
4296 }
4297
4298 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero);
4299
4300 xfer_resid -= bytes_to_zero;
4301 zero_cnt -= bytes_to_zero;
4302 zero_off += bytes_to_zero;
4303 io_offset += bytes_to_zero;
4304 }
4305 if (xfer_resid && io_resid) {
4306 u_int32_t io_requested;
4307
4308 bytes_to_move = min(io_resid, xfer_resid);
4309 io_requested = bytes_to_move;
4310
4311 retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
4312
4313 if (retval) {
4314 ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, UPL_ABORT_FREE_ON_EMPTY);
4315
4316 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4317 upl, 0, 0, retval, 0);
4318 } else {
4319 io_resid -= bytes_to_move;
4320 xfer_resid -= bytes_to_move;
4321 io_offset += bytes_to_move;
4322 }
4323 }
4324 while (xfer_resid && zero_cnt1 && retval == 0) {
4325 if (zero_cnt1 < (long long)xfer_resid) {
4326 bytes_to_zero = (int)zero_cnt1;
4327 } else {
4328 bytes_to_zero = xfer_resid;
4329 }
4330
4331 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero);
4332
4333 xfer_resid -= bytes_to_zero;
4334 zero_cnt1 -= bytes_to_zero;
4335 zero_off1 += bytes_to_zero;
4336 io_offset += bytes_to_zero;
4337 }
4338 if (retval == 0) {
4339 int do_zeroing = 1;
4340
4341 io_size += start_offset;
4342
4343 /* Force more restrictive zeroing behavior only on APFS */
4344 if ((vnode_tag(vp) == VT_APFS) && (newEOF < oldEOF)) {
4345 do_zeroing = 0;
4346 }
4347
4348 if (do_zeroing && (upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
4349 /*
4350 * if we're extending the file with this write
4351 * we'll zero fill the rest of the page so that
4352 * if the file gets extended again in such a way as to leave a
4353 * hole starting at this EOF, we'll have zero's in the correct spot
4354 */
4355 cluster_zero(upl, io_size, (int)(upl_size - io_size), NULL);
4356 }
4357 /*
4358 * release the upl now if we hold one since...
4359 * 1) pages in it may be present in the sparse cluster map
4360 * and may span 2 separate buckets there... if they do and
4361 * we happen to have to flush a bucket to make room and it intersects
4362 * this upl, a deadlock may result on page BUSY
4363 * 2) we're delaying the I/O... from this point forward we're just updating
4364 * the cluster state... no need to hold the pages, so commit them
4365 * 3) IO_SYNC is set...
4366 * because we had to ask for a UPL that provides currenty non-present pages, the
4367 * UPL has been automatically set to clear the dirty flags (both software and hardware)
4368 * upon committing it... this is not the behavior we want since it's possible for
4369 * pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
4370 * we'll pick these pages back up later with the correct behavior specified.
4371 * 4) we don't want to hold pages busy in a UPL and then block on the cluster lock... if a flush
4372 * of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages
4373 * we hold since the flushing context is holding the cluster lock.
4374 */
4375 ubc_upl_commit_range(upl, 0, (upl_size_t)upl_size,
4376 UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
4377 check_cluster:
4378 /*
4379 * calculate the last logical block number
4380 * that this delayed I/O encompassed
4381 */
4382 cl.e_addr = (daddr64_t)((upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64);
4383
4384 if (flags & IO_SYNC) {
4385 /*
4386 * if the IO_SYNC flag is set than we need to bypass
4387 * any clustering and immediately issue the I/O
4388 *
4389 * we don't hold the lock at this point
4390 *
4391 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
4392 * so that we correctly deal with a change in state of the hardware modify bit...
4393 * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force
4394 * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also
4395 * responsible for generating the correct sized I/O(s)
4396 */
4397 retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg, FALSE);
4398 } else {
4399 boolean_t defer_writes = FALSE;
4400
4401 if (vfs_flags(vp->v_mount) & MNT_DEFWRITE) {
4402 defer_writes = TRUE;
4403 }
4404
4405 cluster_update_state_internal(vp, &cl, flags, defer_writes, &first_pass,
4406 write_off, write_cnt, newEOF, callback, callback_arg, FALSE);
4407 }
4408 }
4409 }
4410 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, retval, 0, io_resid, 0, 0);
4411
4412 return retval;
4413 }
4414
4415
4416
4417 int
cluster_read(vnode_t vp,struct uio * uio,off_t filesize,int xflags)4418 cluster_read(vnode_t vp, struct uio *uio, off_t filesize, int xflags)
4419 {
4420 return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL);
4421 }
4422
4423
4424 int
cluster_read_ext(vnode_t vp,struct uio * uio,off_t filesize,int xflags,int (* callback)(buf_t,void *),void * callback_arg)4425 cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (*callback)(buf_t, void *), void *callback_arg)
4426 {
4427 int retval = 0;
4428 int flags;
4429 user_ssize_t cur_resid;
4430 u_int32_t io_size;
4431 u_int32_t read_length = 0;
4432 int read_type = IO_COPY;
4433
4434 flags = xflags;
4435
4436 if (vp->v_flag & VNOCACHE_DATA) {
4437 flags |= IO_NOCACHE;
4438 }
4439 if ((vp->v_flag & VRAOFF) || speculative_reads_disabled) {
4440 flags |= IO_RAOFF;
4441 }
4442
4443 if (flags & IO_SKIP_ENCRYPTION) {
4444 flags |= IO_ENCRYPTED;
4445 }
4446
4447 /*
4448 * do a read through the cache if one of the following is true....
4449 * NOCACHE is not true
4450 * the uio request doesn't target USERSPACE
4451 * Alternatively, if IO_ENCRYPTED is set, then we want to bypass the cache as well.
4452 * Reading encrypted data from a CP filesystem should never result in the data touching
4453 * the UBC.
4454 *
4455 * otherwise, find out if we want the direct or contig variant for
4456 * the first vector in the uio request
4457 */
4458 if (((flags & IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) || (flags & IO_ENCRYPTED)) {
4459 retval = cluster_io_type(uio, &read_type, &read_length, 0);
4460 }
4461
4462 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < filesize && retval == 0) {
4463 switch (read_type) {
4464 case IO_COPY:
4465 /*
4466 * make sure the uio_resid isn't too big...
4467 * internally, we want to handle all of the I/O in
4468 * chunk sizes that fit in a 32 bit int
4469 */
4470 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
4471 io_size = MAX_IO_REQUEST_SIZE;
4472 } else {
4473 io_size = (u_int32_t)cur_resid;
4474 }
4475
4476 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
4477 break;
4478
4479 case IO_DIRECT:
4480 retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg);
4481 break;
4482
4483 case IO_CONTIG:
4484 retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags);
4485 break;
4486
4487 case IO_UNKNOWN:
4488 retval = cluster_io_type(uio, &read_type, &read_length, 0);
4489 break;
4490 }
4491 }
4492 return retval;
4493 }
4494
4495
4496
4497 static void
cluster_read_upl_release(upl_t upl,int start_pg,int last_pg,int take_reference)4498 cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference)
4499 {
4500 int range;
4501 int abort_flags = UPL_ABORT_FREE_ON_EMPTY;
4502
4503 if ((range = last_pg - start_pg)) {
4504 if (take_reference) {
4505 abort_flags |= UPL_ABORT_REFERENCE;
4506 }
4507
4508 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags);
4509 }
4510 }
4511
4512
4513 static int
cluster_read_copy(vnode_t vp,struct uio * uio,u_int32_t io_req_size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)4514 cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
4515 {
4516 upl_page_info_t *pl;
4517 upl_t upl = NULL;
4518 vm_offset_t upl_offset;
4519 u_int32_t upl_size;
4520 off_t upl_f_offset;
4521 int start_offset;
4522 int start_pg;
4523 int last_pg;
4524 int uio_last = 0;
4525 int pages_in_upl;
4526 off_t max_size;
4527 off_t last_ioread_offset;
4528 off_t last_request_offset;
4529 kern_return_t kret;
4530 int error = 0;
4531 int retval = 0;
4532 u_int32_t size_of_prefetch;
4533 u_int32_t xsize;
4534 u_int32_t io_size;
4535 u_int32_t max_rd_size;
4536 u_int32_t max_io_size;
4537 u_int32_t max_prefetch;
4538 u_int rd_ahead_enabled = 1;
4539 u_int prefetch_enabled = 1;
4540 struct cl_readahead * rap;
4541 struct clios iostate;
4542 struct cl_extent extent;
4543 int bflag;
4544 int take_reference = 1;
4545 int policy = IOPOL_DEFAULT;
4546 boolean_t iolock_inited = FALSE;
4547
4548 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
4549 (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0);
4550
4551 if (flags & IO_ENCRYPTED) {
4552 panic("encrypted blocks will hit UBC!");
4553 }
4554
4555 policy = throttle_get_io_policy(NULL);
4556
4557 if (policy == THROTTLE_LEVEL_TIER3 || policy == THROTTLE_LEVEL_TIER2 || (flags & IO_NOCACHE)) {
4558 take_reference = 0;
4559 }
4560
4561 if (flags & IO_PASSIVE) {
4562 bflag = CL_PASSIVE;
4563 } else {
4564 bflag = 0;
4565 }
4566
4567 if (flags & IO_NOCACHE) {
4568 bflag |= CL_NOCACHE;
4569 }
4570
4571 if (flags & IO_SKIP_ENCRYPTION) {
4572 bflag |= CL_ENCRYPTED;
4573 }
4574
4575 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
4576 max_prefetch = cluster_max_prefetch(vp, max_io_size, prefetch_max);
4577 max_rd_size = max_prefetch;
4578
4579 last_request_offset = uio->uio_offset + io_req_size;
4580
4581 if (last_request_offset > filesize) {
4582 last_request_offset = filesize;
4583 }
4584
4585 if ((flags & (IO_RAOFF | IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) {
4586 rd_ahead_enabled = 0;
4587 rap = NULL;
4588 } else {
4589 if (cluster_is_throttled(vp)) {
4590 /*
4591 * we're in the throttle window, at the very least
4592 * we want to limit the size of the I/O we're about
4593 * to issue
4594 */
4595 rd_ahead_enabled = 0;
4596 prefetch_enabled = 0;
4597
4598 max_rd_size = calculate_max_throttle_size(vp);
4599 }
4600 if ((rap = cluster_get_rap(vp)) == NULL) {
4601 rd_ahead_enabled = 0;
4602 } else {
4603 extent.b_addr = uio->uio_offset / PAGE_SIZE_64;
4604 extent.e_addr = (last_request_offset - 1) / PAGE_SIZE_64;
4605 }
4606 }
4607 if (rap != NULL && rap->cl_ralen && (rap->cl_lastr == extent.b_addr || (rap->cl_lastr + 1) == extent.b_addr)) {
4608 /*
4609 * determine if we already have a read-ahead in the pipe courtesy of the
4610 * last read systemcall that was issued...
4611 * if so, pick up it's extent to determine where we should start
4612 * with respect to any read-ahead that might be necessary to
4613 * garner all the data needed to complete this read systemcall
4614 */
4615 last_ioread_offset = (rap->cl_maxra * PAGE_SIZE_64) + PAGE_SIZE_64;
4616
4617 if (last_ioread_offset < uio->uio_offset) {
4618 last_ioread_offset = (off_t)0;
4619 } else if (last_ioread_offset > last_request_offset) {
4620 last_ioread_offset = last_request_offset;
4621 }
4622 } else {
4623 last_ioread_offset = (off_t)0;
4624 }
4625
4626 while (io_req_size && uio->uio_offset < filesize && retval == 0) {
4627 max_size = filesize - uio->uio_offset;
4628 bool leftover_upl_aborted = false;
4629
4630 if ((off_t)(io_req_size) < max_size) {
4631 io_size = io_req_size;
4632 } else {
4633 io_size = (u_int32_t)max_size;
4634 }
4635
4636 if (!(flags & IO_NOCACHE)) {
4637 while (io_size) {
4638 u_int32_t io_resid;
4639 u_int32_t io_requested;
4640
4641 /*
4642 * if we keep finding the pages we need already in the cache, then
4643 * don't bother to call cluster_read_prefetch since it costs CPU cycles
4644 * to determine that we have all the pages we need... once we miss in
4645 * the cache and have issued an I/O, than we'll assume that we're likely
4646 * to continue to miss in the cache and it's to our advantage to try and prefetch
4647 */
4648 if (last_request_offset && last_ioread_offset && (size_of_prefetch = (u_int32_t)(last_request_offset - last_ioread_offset))) {
4649 if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) {
4650 /*
4651 * we've already issued I/O for this request and
4652 * there's still work to do and
4653 * our prefetch stream is running dry, so issue a
4654 * pre-fetch I/O... the I/O latency will overlap
4655 * with the copying of the data
4656 */
4657 if (size_of_prefetch > max_rd_size) {
4658 size_of_prefetch = max_rd_size;
4659 }
4660
4661 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
4662
4663 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
4664
4665 if (last_ioread_offset > last_request_offset) {
4666 last_ioread_offset = last_request_offset;
4667 }
4668 }
4669 }
4670 /*
4671 * limit the size of the copy we're about to do so that
4672 * we can notice that our I/O pipe is running dry and
4673 * get the next I/O issued before it does go dry
4674 */
4675 if (last_ioread_offset && io_size > (max_io_size / 4)) {
4676 io_resid = (max_io_size / 4);
4677 } else {
4678 io_resid = io_size;
4679 }
4680
4681 io_requested = io_resid;
4682
4683 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, take_reference);
4684
4685 xsize = io_requested - io_resid;
4686
4687 io_size -= xsize;
4688 io_req_size -= xsize;
4689
4690 if (retval || io_resid) {
4691 /*
4692 * if we run into a real error or
4693 * a page that is not in the cache
4694 * we need to leave streaming mode
4695 */
4696 break;
4697 }
4698
4699 if (rd_ahead_enabled && (io_size == 0 || last_ioread_offset == last_request_offset)) {
4700 /*
4701 * we're already finished the I/O for this read request
4702 * let's see if we should do a read-ahead
4703 */
4704 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
4705 }
4706 }
4707 if (retval) {
4708 break;
4709 }
4710 if (io_size == 0) {
4711 if (rap != NULL) {
4712 if (extent.e_addr < rap->cl_lastr) {
4713 rap->cl_maxra = 0;
4714 }
4715 rap->cl_lastr = extent.e_addr;
4716 }
4717 break;
4718 }
4719 /*
4720 * recompute max_size since cluster_copy_ubc_data_internal
4721 * may have advanced uio->uio_offset
4722 */
4723 max_size = filesize - uio->uio_offset;
4724 }
4725
4726 iostate.io_completed = 0;
4727 iostate.io_issued = 0;
4728 iostate.io_error = 0;
4729 iostate.io_wanted = 0;
4730
4731 if ((flags & IO_RETURN_ON_THROTTLE)) {
4732 if (cluster_is_throttled(vp) == THROTTLE_NOW) {
4733 if (!cluster_io_present_in_BC(vp, uio->uio_offset)) {
4734 /*
4735 * we're in the throttle window and at least 1 I/O
4736 * has already been issued by a throttleable thread
4737 * in this window, so return with EAGAIN to indicate
4738 * to the FS issuing the cluster_read call that it
4739 * should now throttle after dropping any locks
4740 */
4741 throttle_info_update_by_mount(vp->v_mount);
4742
4743 retval = EAGAIN;
4744 break;
4745 }
4746 }
4747 }
4748
4749 /*
4750 * compute the size of the upl needed to encompass
4751 * the requested read... limit each call to cluster_io
4752 * to the maximum UPL size... cluster_io will clip if
4753 * this exceeds the maximum io_size for the device,
4754 * make sure to account for
4755 * a starting offset that's not page aligned
4756 */
4757 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
4758 upl_f_offset = uio->uio_offset - (off_t)start_offset;
4759
4760 if (io_size > max_rd_size) {
4761 io_size = max_rd_size;
4762 }
4763
4764 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4765
4766 if (flags & IO_NOCACHE) {
4767 if (upl_size > max_io_size) {
4768 upl_size = max_io_size;
4769 }
4770 } else {
4771 if (upl_size > max_io_size / 4) {
4772 upl_size = max_io_size / 4;
4773 upl_size &= ~PAGE_MASK;
4774
4775 if (upl_size == 0) {
4776 upl_size = PAGE_SIZE;
4777 }
4778 }
4779 }
4780 pages_in_upl = upl_size / PAGE_SIZE;
4781
4782 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
4783 upl, (int)upl_f_offset, upl_size, start_offset, 0);
4784
4785 kret = ubc_create_upl_kernel(vp,
4786 upl_f_offset,
4787 upl_size,
4788 &upl,
4789 &pl,
4790 UPL_FILE_IO | UPL_SET_LITE,
4791 VM_KERN_MEMORY_FILE);
4792 if (kret != KERN_SUCCESS) {
4793 panic("cluster_read_copy: failed to get pagelist");
4794 }
4795
4796 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END,
4797 upl, (int)upl_f_offset, upl_size, start_offset, 0);
4798
4799 /*
4800 * scan from the beginning of the upl looking for the first
4801 * non-valid page.... this will become the first page in
4802 * the request we're going to make to 'cluster_io'... if all
4803 * of the pages are valid, we won't call through to 'cluster_io'
4804 */
4805 for (start_pg = 0; start_pg < pages_in_upl; start_pg++) {
4806 if (!upl_valid_page(pl, start_pg)) {
4807 break;
4808 }
4809 }
4810
4811 /*
4812 * scan from the starting invalid page looking for a valid
4813 * page before the end of the upl is reached, if we
4814 * find one, then it will be the last page of the request to
4815 * 'cluster_io'
4816 */
4817 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
4818 if (upl_valid_page(pl, last_pg)) {
4819 break;
4820 }
4821 }
4822
4823 if (start_pg < last_pg) {
4824 /*
4825 * we found a range of 'invalid' pages that must be filled
4826 * if the last page in this range is the last page of the file
4827 * we may have to clip the size of it to keep from reading past
4828 * the end of the last physical block associated with the file
4829 */
4830 if (iolock_inited == FALSE) {
4831 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
4832
4833 iolock_inited = TRUE;
4834 }
4835 upl_offset = start_pg * PAGE_SIZE;
4836 io_size = (last_pg - start_pg) * PAGE_SIZE;
4837
4838 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) {
4839 io_size = (u_int32_t)(filesize - (upl_f_offset + upl_offset));
4840 }
4841
4842 /*
4843 * Find out if this needs verification, we'll have to manage the UPL
4844 * diffrently if so. Note that this call only lets us know if
4845 * verification is enabled on this mount point, the actual verification
4846 * is performed in the File system.
4847 */
4848 size_t verify_block_size = 0;
4849 if ((VNOP_VERIFY(vp, start_offset, NULL, 0, &verify_block_size, NULL, VNODE_VERIFY_DEFAULT, NULL) == 0) /* && verify_block_size */) {
4850 for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
4851 if (!upl_valid_page(pl, uio_last)) {
4852 break;
4853 }
4854 }
4855 if (uio_last < pages_in_upl) {
4856 /*
4857 * there were some invalid pages beyond the valid pages
4858 * that we didn't issue an I/O for, just release them
4859 * unchanged now, so that any prefetch/readahed can
4860 * include them
4861 */
4862 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
4863 (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
4864 leftover_upl_aborted = true;
4865 }
4866 }
4867
4868 /*
4869 * issue an asynchronous read to cluster_io
4870 */
4871
4872 error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
4873 io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
4874
4875 if (rap) {
4876 if (extent.e_addr < rap->cl_maxra) {
4877 /*
4878 * we've just issued a read for a block that should have been
4879 * in the cache courtesy of the read-ahead engine... something
4880 * has gone wrong with the pipeline, so reset the read-ahead
4881 * logic which will cause us to restart from scratch
4882 */
4883 rap->cl_maxra = 0;
4884 }
4885 }
4886 }
4887 if (error == 0) {
4888 /*
4889 * if the read completed successfully, or there was no I/O request
4890 * issued, than copy the data into user land via 'cluster_upl_copy_data'
4891 * we'll first add on any 'valid'
4892 * pages that were present in the upl when we acquired it.
4893 */
4894 u_int val_size;
4895
4896 if (!leftover_upl_aborted) {
4897 for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
4898 if (!upl_valid_page(pl, uio_last)) {
4899 break;
4900 }
4901 }
4902 if (uio_last < pages_in_upl) {
4903 /*
4904 * there were some invalid pages beyond the valid pages
4905 * that we didn't issue an I/O for, just release them
4906 * unchanged now, so that any prefetch/readahed can
4907 * include them
4908 */
4909 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
4910 (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
4911 }
4912 }
4913
4914 /*
4915 * compute size to transfer this round, if io_req_size is
4916 * still non-zero after this attempt, we'll loop around and
4917 * set up for another I/O.
4918 */
4919 val_size = (uio_last * PAGE_SIZE) - start_offset;
4920
4921 if (val_size > max_size) {
4922 val_size = (u_int)max_size;
4923 }
4924
4925 if (val_size > io_req_size) {
4926 val_size = io_req_size;
4927 }
4928
4929 if ((uio->uio_offset + val_size) > last_ioread_offset) {
4930 last_ioread_offset = uio->uio_offset + val_size;
4931 }
4932
4933 if ((size_of_prefetch = (u_int32_t)(last_request_offset - last_ioread_offset)) && prefetch_enabled) {
4934 if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) {
4935 /*
4936 * if there's still I/O left to do for this request, and...
4937 * we're not in hard throttle mode, and...
4938 * we're close to using up the previous prefetch, then issue a
4939 * new pre-fetch I/O... the I/O latency will overlap
4940 * with the copying of the data
4941 */
4942 if (size_of_prefetch > max_rd_size) {
4943 size_of_prefetch = max_rd_size;
4944 }
4945
4946 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
4947
4948 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
4949
4950 if (last_ioread_offset > last_request_offset) {
4951 last_ioread_offset = last_request_offset;
4952 }
4953 }
4954 } else if ((uio->uio_offset + val_size) == last_request_offset) {
4955 /*
4956 * this transfer will finish this request, so...
4957 * let's try to read ahead if we're in
4958 * a sequential access pattern and we haven't
4959 * explicitly disabled it
4960 */
4961 if (rd_ahead_enabled) {
4962 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
4963 }
4964
4965 if (rap != NULL) {
4966 if (extent.e_addr < rap->cl_lastr) {
4967 rap->cl_maxra = 0;
4968 }
4969 rap->cl_lastr = extent.e_addr;
4970 }
4971 }
4972 if (iolock_inited == TRUE) {
4973 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4974 }
4975
4976 if (iostate.io_error) {
4977 error = iostate.io_error;
4978 } else {
4979 u_int32_t io_requested;
4980
4981 io_requested = val_size;
4982
4983 retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested);
4984
4985 io_req_size -= (val_size - io_requested);
4986 }
4987 } else {
4988 if (iolock_inited == TRUE) {
4989 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4990 }
4991 }
4992 if (start_pg < last_pg) {
4993 /*
4994 * compute the range of pages that we actually issued an I/O for
4995 * and either commit them as valid if the I/O succeeded
4996 * or abort them if the I/O failed or we're not supposed to
4997 * keep them in the cache
4998 */
4999 io_size = (last_pg - start_pg) * PAGE_SIZE;
5000
5001 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0);
5002
5003 if (error || (flags & IO_NOCACHE)) {
5004 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
5005 UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
5006 } else {
5007 int commit_flags = UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY;
5008
5009 if (take_reference) {
5010 commit_flags |= UPL_COMMIT_INACTIVATE;
5011 } else {
5012 commit_flags |= UPL_COMMIT_SPECULATE;
5013 }
5014
5015 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags);
5016 }
5017 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0);
5018 }
5019 if ((last_pg - start_pg) < pages_in_upl) {
5020 /*
5021 * the set of pages that we issued an I/O for did not encompass
5022 * the entire upl... so just release these without modifying
5023 * their state
5024 */
5025 if (error) {
5026 if (leftover_upl_aborted) {
5027 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, (uio_last - start_pg) * PAGE_SIZE,
5028 UPL_ABORT_FREE_ON_EMPTY);
5029 } else {
5030 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
5031 }
5032 } else {
5033 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
5034 upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
5035
5036 /*
5037 * handle any valid pages at the beginning of
5038 * the upl... release these appropriately
5039 */
5040 cluster_read_upl_release(upl, 0, start_pg, take_reference);
5041
5042 /*
5043 * handle any valid pages immediately after the
5044 * pages we issued I/O for... ... release these appropriately
5045 */
5046 cluster_read_upl_release(upl, last_pg, uio_last, take_reference);
5047
5048 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, -1, -1, 0, 0);
5049 }
5050 }
5051 if (retval == 0) {
5052 retval = error;
5053 }
5054
5055 if (io_req_size) {
5056 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
5057
5058 if (cluster_is_throttled(vp)) {
5059 /*
5060 * we're in the throttle window, at the very least
5061 * we want to limit the size of the I/O we're about
5062 * to issue
5063 */
5064 rd_ahead_enabled = 0;
5065 prefetch_enabled = 0;
5066 max_rd_size = max_throttle_size;
5067 } else {
5068 if (max_rd_size == max_throttle_size) {
5069 /*
5070 * coming out of throttled state
5071 */
5072 if (policy != THROTTLE_LEVEL_TIER3 && policy != THROTTLE_LEVEL_TIER2) {
5073 if (rap != NULL) {
5074 rd_ahead_enabled = 1;
5075 }
5076 prefetch_enabled = 1;
5077 }
5078 max_rd_size = max_prefetch;
5079 last_ioread_offset = 0;
5080 }
5081 }
5082 }
5083 }
5084 if (iolock_inited == TRUE) {
5085 /*
5086 * cluster_io returned an error after it
5087 * had already issued some I/O. we need
5088 * to wait for that I/O to complete before
5089 * we can destroy the iostate mutex...
5090 * 'retval' already contains the early error
5091 * so no need to pick it up from iostate.io_error
5092 */
5093 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
5094
5095 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
5096 }
5097 if (rap != NULL) {
5098 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
5099 (int)uio->uio_offset, io_req_size, rap->cl_lastr, retval, 0);
5100
5101 lck_mtx_unlock(&rap->cl_lockr);
5102 } else {
5103 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
5104 (int)uio->uio_offset, io_req_size, 0, retval, 0);
5105 }
5106
5107 return retval;
5108 }
5109
5110 /*
5111 * We don't want another read/write lock for every vnode in the system
5112 * so we keep a hash of them here. There should never be very many of
5113 * these around at any point in time.
5114 */
5115 cl_direct_read_lock_t *
cluster_lock_direct_read(vnode_t vp,lck_rw_type_t type)5116 cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type)
5117 {
5118 struct cl_direct_read_locks *head
5119 = &cl_direct_read_locks[(uintptr_t)vp / sizeof(*vp)
5120 % CL_DIRECT_READ_LOCK_BUCKETS];
5121
5122 struct cl_direct_read_lock *lck, *new_lck = NULL;
5123
5124 for (;;) {
5125 lck_spin_lock(&cl_direct_read_spin_lock);
5126
5127 LIST_FOREACH(lck, head, chain) {
5128 if (lck->vp == vp) {
5129 ++lck->ref_count;
5130 lck_spin_unlock(&cl_direct_read_spin_lock);
5131 if (new_lck) {
5132 // Someone beat us to it, ditch the allocation
5133 lck_rw_destroy(&new_lck->rw_lock, &cl_mtx_grp);
5134 kfree_type(cl_direct_read_lock_t, new_lck);
5135 }
5136 lck_rw_lock(&lck->rw_lock, type);
5137 return lck;
5138 }
5139 }
5140
5141 if (new_lck) {
5142 // Use the lock we allocated
5143 LIST_INSERT_HEAD(head, new_lck, chain);
5144 lck_spin_unlock(&cl_direct_read_spin_lock);
5145 lck_rw_lock(&new_lck->rw_lock, type);
5146 return new_lck;
5147 }
5148
5149 lck_spin_unlock(&cl_direct_read_spin_lock);
5150
5151 // Allocate a new lock
5152 new_lck = kalloc_type(cl_direct_read_lock_t, Z_WAITOK);
5153 lck_rw_init(&new_lck->rw_lock, &cl_mtx_grp, LCK_ATTR_NULL);
5154 new_lck->vp = vp;
5155 new_lck->ref_count = 1;
5156
5157 // Got to go round again
5158 }
5159 }
5160
5161 void
cluster_unlock_direct_read(cl_direct_read_lock_t * lck)5162 cluster_unlock_direct_read(cl_direct_read_lock_t *lck)
5163 {
5164 lck_rw_done(&lck->rw_lock);
5165
5166 lck_spin_lock(&cl_direct_read_spin_lock);
5167 if (lck->ref_count == 1) {
5168 LIST_REMOVE(lck, chain);
5169 lck_spin_unlock(&cl_direct_read_spin_lock);
5170 lck_rw_destroy(&lck->rw_lock, &cl_mtx_grp);
5171 kfree_type(cl_direct_read_lock_t, lck);
5172 } else {
5173 --lck->ref_count;
5174 lck_spin_unlock(&cl_direct_read_spin_lock);
5175 }
5176 }
5177
5178 static int
cluster_read_direct(vnode_t vp,struct uio * uio,off_t filesize,int * read_type,u_int32_t * read_length,int flags,int (* callback)(buf_t,void *),void * callback_arg)5179 cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
5180 int flags, int (*callback)(buf_t, void *), void *callback_arg)
5181 {
5182 upl_t upl = NULL;
5183 upl_page_info_t *pl;
5184 off_t max_io_size;
5185 vm_offset_t upl_offset, vector_upl_offset = 0;
5186 upl_size_t upl_size = 0, vector_upl_size = 0;
5187 vm_size_t upl_needed_size;
5188 unsigned int pages_in_pl;
5189 upl_control_flags_t upl_flags;
5190 kern_return_t kret = KERN_SUCCESS;
5191 unsigned int i;
5192 int force_data_sync;
5193 int retval = 0;
5194 int no_zero_fill = 0;
5195 int io_flag = 0;
5196 int misaligned = 0;
5197 struct clios iostate;
5198 user_addr_t iov_base;
5199 u_int32_t io_req_size;
5200 u_int32_t offset_in_file;
5201 u_int32_t offset_in_iovbase;
5202 u_int32_t io_size;
5203 u_int32_t io_min;
5204 u_int32_t xsize;
5205 u_int32_t devblocksize;
5206 u_int32_t mem_alignment_mask;
5207 u_int32_t max_upl_size;
5208 u_int32_t max_rd_size;
5209 u_int32_t max_rd_ahead;
5210 u_int32_t max_vector_size;
5211 boolean_t io_throttled = FALSE;
5212
5213 u_int32_t vector_upl_iosize = 0;
5214 int issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1);
5215 off_t v_upl_uio_offset = 0;
5216 int vector_upl_index = 0;
5217 upl_t vector_upl = NULL;
5218 cl_direct_read_lock_t *lock = NULL;
5219
5220 assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT);
5221
5222 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
5223 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
5224
5225 max_upl_size = cluster_max_io_size(vp->v_mount, CL_READ);
5226
5227 max_rd_size = max_upl_size;
5228
5229 if (__improbable(os_mul_overflow(max_rd_size, IO_SCALE(vp, 2),
5230 &max_rd_ahead) || (max_rd_ahead > overlapping_read_max))) {
5231 max_rd_ahead = overlapping_read_max;
5232 }
5233
5234 io_flag = CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO | CL_DIRECT_IO;
5235
5236 if (flags & IO_PASSIVE) {
5237 io_flag |= CL_PASSIVE;
5238 }
5239
5240 if (flags & IO_ENCRYPTED) {
5241 io_flag |= CL_RAW_ENCRYPTED;
5242 }
5243
5244 if (flags & IO_NOCACHE) {
5245 io_flag |= CL_NOCACHE;
5246 }
5247
5248 if (flags & IO_SKIP_ENCRYPTION) {
5249 io_flag |= CL_ENCRYPTED;
5250 }
5251
5252 iostate.io_completed = 0;
5253 iostate.io_issued = 0;
5254 iostate.io_error = 0;
5255 iostate.io_wanted = 0;
5256
5257 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
5258
5259 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
5260 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
5261
5262 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
5263 (int)devblocksize, (int)mem_alignment_mask, 0, 0, 0);
5264
5265 if (devblocksize == 1) {
5266 /*
5267 * the AFP client advertises a devblocksize of 1
5268 * however, its BLOCKMAP routine maps to physical
5269 * blocks that are PAGE_SIZE in size...
5270 * therefore we can't ask for I/Os that aren't page aligned
5271 * or aren't multiples of PAGE_SIZE in size
5272 * by setting devblocksize to PAGE_SIZE, we re-instate
5273 * the old behavior we had before the mem_alignment_mask
5274 * changes went in...
5275 */
5276 devblocksize = PAGE_SIZE;
5277 }
5278
5279 /*
5280 * We are going to need this uio for the prefaulting later
5281 * especially for the cases where multiple non-contiguous
5282 * iovs are passed into this routine.
5283 */
5284 uio_t uio_acct = uio_duplicate(uio);
5285
5286 next_dread:
5287 io_req_size = *read_length;
5288 iov_base = uio_curriovbase(uio);
5289
5290 offset_in_file = (u_int32_t)uio->uio_offset & (devblocksize - 1);
5291 offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
5292
5293 if (vm_map_page_mask(current_map()) < PAGE_MASK) {
5294 /*
5295 * XXX TODO4K
5296 * Direct I/O might not work as expected from a 16k kernel space
5297 * to a 4k user space because each 4k chunk might point to
5298 * a different 16k physical page...
5299 * Let's go the "misaligned" way.
5300 */
5301 if (!misaligned) {
5302 DEBUG4K_VFS("forcing misaligned\n");
5303 }
5304 misaligned = 1;
5305 }
5306
5307 if (offset_in_file || offset_in_iovbase) {
5308 /*
5309 * one of the 2 important offsets is misaligned
5310 * so fire an I/O through the cache for this entire vector
5311 */
5312 misaligned = 1;
5313 }
5314 if (iov_base & (devblocksize - 1)) {
5315 /*
5316 * the offset in memory must be on a device block boundary
5317 * so that we can guarantee that we can generate an
5318 * I/O that ends on a page boundary in cluster_io
5319 */
5320 misaligned = 1;
5321 }
5322
5323 max_io_size = filesize - uio->uio_offset;
5324
5325 /*
5326 * The user must request IO in aligned chunks. If the
5327 * offset into the file is bad, or the userland pointer
5328 * is non-aligned, then we cannot service the encrypted IO request.
5329 */
5330 if (flags & IO_ENCRYPTED) {
5331 if (misaligned || (io_req_size & (devblocksize - 1))) {
5332 retval = EINVAL;
5333 }
5334
5335 max_io_size = roundup(max_io_size, devblocksize);
5336 }
5337
5338 if ((off_t)io_req_size > max_io_size) {
5339 io_req_size = (u_int32_t)max_io_size;
5340 }
5341
5342 /*
5343 * When we get to this point, we know...
5344 * -- the offset into the file is on a devblocksize boundary
5345 */
5346
5347 while (io_req_size && retval == 0) {
5348 u_int32_t io_start;
5349
5350 if (cluster_is_throttled(vp)) {
5351 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
5352
5353 /*
5354 * we're in the throttle window, at the very least
5355 * we want to limit the size of the I/O we're about
5356 * to issue
5357 */
5358 max_rd_size = max_throttle_size;
5359 max_rd_ahead = max_throttle_size - 1;
5360 max_vector_size = max_throttle_size;
5361 } else {
5362 max_rd_size = max_upl_size;
5363 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
5364 max_vector_size = MAX_VECTOR_UPL_SIZE;
5365 }
5366 io_start = io_size = io_req_size;
5367
5368 /*
5369 * First look for pages already in the cache
5370 * and move them to user space. But only do this
5371 * check if we are not retrieving encrypted data directly
5372 * from the filesystem; those blocks should never
5373 * be in the UBC.
5374 *
5375 * cluster_copy_ubc_data returns the resid
5376 * in io_size
5377 */
5378 if ((flags & IO_ENCRYPTED) == 0) {
5379 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
5380 }
5381 /*
5382 * calculate the number of bytes actually copied
5383 * starting size - residual
5384 */
5385 xsize = io_start - io_size;
5386
5387 io_req_size -= xsize;
5388
5389 if (useVectorUPL && (xsize || (iov_base & PAGE_MASK))) {
5390 /*
5391 * We found something in the cache or we have an iov_base that's not
5392 * page-aligned.
5393 *
5394 * Issue all I/O's that have been collected within this Vectored UPL.
5395 */
5396 if (vector_upl_index) {
5397 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5398 reset_vector_run_state();
5399 }
5400
5401 if (xsize) {
5402 useVectorUPL = 0;
5403 }
5404
5405 /*
5406 * After this point, if we are using the Vector UPL path and the base is
5407 * not page-aligned then the UPL with that base will be the first in the vector UPL.
5408 */
5409 }
5410
5411 /*
5412 * check to see if we are finished with this request.
5413 *
5414 * If we satisfied this IO already, then io_req_size will be 0.
5415 * Otherwise, see if the IO was mis-aligned and needs to go through
5416 * the UBC to deal with the 'tail'.
5417 *
5418 */
5419 if (io_req_size == 0 || (misaligned)) {
5420 /*
5421 * see if there's another uio vector to
5422 * process that's of type IO_DIRECT
5423 *
5424 * break out of while loop to get there
5425 */
5426 break;
5427 }
5428 /*
5429 * assume the request ends on a device block boundary
5430 */
5431 io_min = devblocksize;
5432
5433 /*
5434 * we can handle I/O's in multiples of the device block size
5435 * however, if io_size isn't a multiple of devblocksize we
5436 * want to clip it back to the nearest page boundary since
5437 * we are going to have to go through cluster_read_copy to
5438 * deal with the 'overhang'... by clipping it to a PAGE_SIZE
5439 * multiple, we avoid asking the drive for the same physical
5440 * blocks twice.. once for the partial page at the end of the
5441 * request and a 2nd time for the page we read into the cache
5442 * (which overlaps the end of the direct read) in order to
5443 * get at the overhang bytes
5444 */
5445 if (io_size & (devblocksize - 1)) {
5446 assert(!(flags & IO_ENCRYPTED));
5447 /*
5448 * Clip the request to the previous page size boundary
5449 * since request does NOT end on a device block boundary
5450 */
5451 io_size &= ~PAGE_MASK;
5452 io_min = PAGE_SIZE;
5453 }
5454 if (retval || io_size < io_min) {
5455 /*
5456 * either an error or we only have the tail left to
5457 * complete via the copy path...
5458 * we may have already spun some portion of this request
5459 * off as async requests... we need to wait for the I/O
5460 * to complete before returning
5461 */
5462 goto wait_for_dreads;
5463 }
5464
5465 /*
5466 * Don't re-check the UBC data if we are looking for uncached IO
5467 * or asking for encrypted blocks.
5468 */
5469 if ((flags & IO_ENCRYPTED) == 0) {
5470 if ((xsize = io_size) > max_rd_size) {
5471 xsize = max_rd_size;
5472 }
5473
5474 io_size = 0;
5475
5476 if (!lock) {
5477 /*
5478 * We hold a lock here between the time we check the
5479 * cache and the time we issue I/O. This saves us
5480 * from having to lock the pages in the cache. Not
5481 * all clients will care about this lock but some
5482 * clients may want to guarantee stability between
5483 * here and when the I/O is issued in which case they
5484 * will take the lock exclusively.
5485 */
5486 lock = cluster_lock_direct_read(vp, LCK_RW_TYPE_SHARED);
5487 }
5488
5489 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
5490
5491 if (io_size == 0) {
5492 /*
5493 * a page must have just come into the cache
5494 * since the first page in this range is no
5495 * longer absent, go back and re-evaluate
5496 */
5497 continue;
5498 }
5499 }
5500 if ((flags & IO_RETURN_ON_THROTTLE)) {
5501 if (cluster_is_throttled(vp) == THROTTLE_NOW) {
5502 if (!cluster_io_present_in_BC(vp, uio->uio_offset)) {
5503 /*
5504 * we're in the throttle window and at least 1 I/O
5505 * has already been issued by a throttleable thread
5506 * in this window, so return with EAGAIN to indicate
5507 * to the FS issuing the cluster_read call that it
5508 * should now throttle after dropping any locks
5509 */
5510 throttle_info_update_by_mount(vp->v_mount);
5511
5512 io_throttled = TRUE;
5513 goto wait_for_dreads;
5514 }
5515 }
5516 }
5517 if (io_size > max_rd_size) {
5518 io_size = max_rd_size;
5519 }
5520
5521 iov_base = uio_curriovbase(uio);
5522
5523 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
5524 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
5525
5526 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
5527 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
5528
5529 if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0)) {
5530 no_zero_fill = 1;
5531 } else {
5532 no_zero_fill = 0;
5533 }
5534
5535 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
5536 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
5537 pages_in_pl = 0;
5538 upl_size = (upl_size_t)upl_needed_size;
5539 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
5540 if (no_zero_fill) {
5541 upl_flags |= UPL_NOZEROFILL;
5542 }
5543 if (force_data_sync) {
5544 upl_flags |= UPL_FORCE_DATA_SYNC;
5545 }
5546
5547 kret = vm_map_create_upl(map,
5548 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
5549 &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE);
5550
5551 if (kret != KERN_SUCCESS) {
5552 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5553 (int)upl_offset, upl_size, io_size, kret, 0);
5554 /*
5555 * failed to get pagelist
5556 *
5557 * we may have already spun some portion of this request
5558 * off as async requests... we need to wait for the I/O
5559 * to complete before returning
5560 */
5561 goto wait_for_dreads;
5562 }
5563 pages_in_pl = upl_size / PAGE_SIZE;
5564 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
5565
5566 for (i = 0; i < pages_in_pl; i++) {
5567 if (!upl_page_present(pl, i)) {
5568 break;
5569 }
5570 }
5571 if (i == pages_in_pl) {
5572 break;
5573 }
5574
5575 ubc_upl_abort(upl, 0);
5576 }
5577 if (force_data_sync >= 3) {
5578 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5579 (int)upl_offset, upl_size, io_size, kret, 0);
5580
5581 goto wait_for_dreads;
5582 }
5583 /*
5584 * Consider the possibility that upl_size wasn't satisfied.
5585 */
5586 if (upl_size < upl_needed_size) {
5587 if (upl_size && upl_offset == 0) {
5588 io_size = upl_size;
5589 } else {
5590 io_size = 0;
5591 }
5592 }
5593 if (io_size == 0) {
5594 ubc_upl_abort(upl, 0);
5595 goto wait_for_dreads;
5596 }
5597 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5598 (int)upl_offset, upl_size, io_size, kret, 0);
5599
5600 if (useVectorUPL) {
5601 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
5602 if (end_off) {
5603 issueVectorUPL = 1;
5604 }
5605 /*
5606 * After this point, if we are using a vector UPL, then
5607 * either all the UPL elements end on a page boundary OR
5608 * this UPL is the last element because it does not end
5609 * on a page boundary.
5610 */
5611 }
5612
5613 /*
5614 * request asynchronously so that we can overlap
5615 * the preparation of the next I/O
5616 * if there are already too many outstanding reads
5617 * wait until some have completed before issuing the next read
5618 */
5619 cluster_iostate_wait(&iostate, max_rd_ahead, "cluster_read_direct");
5620
5621 if (iostate.io_error) {
5622 /*
5623 * one of the earlier reads we issued ran into a hard error
5624 * don't issue any more reads, cleanup the UPL
5625 * that was just created but not used, then
5626 * go wait for any other reads to complete before
5627 * returning the error to the caller
5628 */
5629 ubc_upl_abort(upl, 0);
5630
5631 goto wait_for_dreads;
5632 }
5633 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
5634 upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
5635
5636 if (!useVectorUPL) {
5637 if (no_zero_fill) {
5638 io_flag &= ~CL_PRESERVE;
5639 } else {
5640 io_flag |= CL_PRESERVE;
5641 }
5642
5643 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5644 } else {
5645 if (!vector_upl_index) {
5646 vector_upl = vector_upl_create(upl_offset, uio->uio_iovcnt);
5647 v_upl_uio_offset = uio->uio_offset;
5648 vector_upl_offset = upl_offset;
5649 }
5650
5651 vector_upl_set_subupl(vector_upl, upl, upl_size);
5652 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
5653 vector_upl_index++;
5654 vector_upl_size += upl_size;
5655 vector_upl_iosize += io_size;
5656
5657 if (issueVectorUPL || vector_upl_index == vector_upl_max_upls(vector_upl) || vector_upl_size >= max_vector_size) {
5658 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5659 reset_vector_run_state();
5660 }
5661 }
5662
5663 if (lock) {
5664 // We don't need to wait for the I/O to complete
5665 cluster_unlock_direct_read(lock);
5666 lock = NULL;
5667 }
5668
5669 /*
5670 * update the uio structure
5671 */
5672 if ((flags & IO_ENCRYPTED) && (max_io_size < io_size)) {
5673 uio_update(uio, (user_size_t)max_io_size);
5674 } else {
5675 uio_update(uio, (user_size_t)io_size);
5676 }
5677
5678 io_req_size -= io_size;
5679
5680 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
5681 upl, (int)uio->uio_offset, io_req_size, retval, 0);
5682 } /* end while */
5683
5684 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0 && uio->uio_offset < filesize) {
5685 retval = cluster_io_type(uio, read_type, read_length, 0);
5686
5687 if (retval == 0 && *read_type == IO_DIRECT) {
5688 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
5689 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
5690
5691 goto next_dread;
5692 }
5693 }
5694
5695 wait_for_dreads:
5696
5697 if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
5698 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5699 reset_vector_run_state();
5700 }
5701
5702 // We don't need to wait for the I/O to complete
5703 if (lock) {
5704 cluster_unlock_direct_read(lock);
5705 }
5706
5707 /*
5708 * make sure all async reads that are part of this stream
5709 * have completed before we return
5710 */
5711 cluster_iostate_wait(&iostate, 0, "cluster_read_direct");
5712
5713 if (iostate.io_error) {
5714 retval = iostate.io_error;
5715 }
5716
5717 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
5718
5719 if (io_throttled == TRUE && retval == 0) {
5720 retval = EAGAIN;
5721 }
5722
5723 vm_map_offset_t current_page_size, current_page_mask;
5724 current_page_size = vm_map_page_size(current_map());
5725 current_page_mask = vm_map_page_mask(current_map());
5726 if (uio_acct) {
5727 off_t bytes_to_prefault = 0, bytes_prefaulted = 0;
5728 user_addr_t curr_iov_base = 0;
5729 user_addr_t curr_iov_end = 0;
5730 user_size_t curr_iov_len = 0;
5731
5732 bytes_to_prefault = uio_offset(uio) - uio_offset(uio_acct);
5733
5734 for (; bytes_prefaulted < bytes_to_prefault;) {
5735 curr_iov_base = uio_curriovbase(uio_acct);
5736 curr_iov_len = MIN(uio_curriovlen(uio_acct), bytes_to_prefault - bytes_prefaulted);
5737 curr_iov_end = curr_iov_base + curr_iov_len;
5738
5739 for (; curr_iov_base < curr_iov_end;) {
5740 /*
5741 * This is specifically done for pmap accounting purposes.
5742 * vm_pre_fault() will call vm_fault() to enter the page into
5743 * the pmap if there isn't _a_ physical page for that VA already.
5744 */
5745 vm_pre_fault(vm_map_trunc_page(curr_iov_base, current_page_mask), VM_PROT_READ);
5746 curr_iov_base += current_page_size;
5747 bytes_prefaulted += current_page_size;
5748 }
5749 /*
5750 * Use update instead of advance so we can see how many iovs we processed.
5751 */
5752 uio_update(uio_acct, curr_iov_len);
5753 }
5754 uio_free(uio_acct);
5755 uio_acct = NULL;
5756 }
5757
5758 if (io_req_size && retval == 0) {
5759 /*
5760 * we couldn't handle the tail of this request in DIRECT mode
5761 * so fire it through the copy path
5762 */
5763 if (flags & IO_ENCRYPTED) {
5764 /*
5765 * We cannot fall back to the copy path for encrypted I/O. If this
5766 * happens, there is something wrong with the user buffer passed
5767 * down.
5768 */
5769 retval = EFAULT;
5770 } else {
5771 retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg);
5772 }
5773
5774 *read_type = IO_UNKNOWN;
5775 }
5776 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
5777 (int)uio->uio_offset, (int)uio_resid(uio), io_req_size, retval, 0);
5778
5779 return retval;
5780 }
5781
5782
5783 static int
cluster_read_contig(vnode_t vp,struct uio * uio,off_t filesize,int * read_type,u_int32_t * read_length,int (* callback)(buf_t,void *),void * callback_arg,int flags)5784 cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
5785 int (*callback)(buf_t, void *), void *callback_arg, int flags)
5786 {
5787 upl_page_info_t *pl;
5788 upl_t upl[MAX_VECTS];
5789 vm_offset_t upl_offset;
5790 addr64_t dst_paddr = 0;
5791 user_addr_t iov_base;
5792 off_t max_size;
5793 upl_size_t upl_size;
5794 vm_size_t upl_needed_size;
5795 mach_msg_type_number_t pages_in_pl;
5796 upl_control_flags_t upl_flags;
5797 kern_return_t kret;
5798 struct clios iostate;
5799 int error = 0;
5800 int cur_upl = 0;
5801 int num_upl = 0;
5802 int n;
5803 u_int32_t xsize;
5804 u_int32_t io_size;
5805 u_int32_t devblocksize;
5806 u_int32_t mem_alignment_mask;
5807 u_int32_t tail_size = 0;
5808 int bflag;
5809
5810 if (flags & IO_PASSIVE) {
5811 bflag = CL_PASSIVE;
5812 } else {
5813 bflag = 0;
5814 }
5815
5816 if (flags & IO_NOCACHE) {
5817 bflag |= CL_NOCACHE;
5818 }
5819
5820 /*
5821 * When we enter this routine, we know
5822 * -- the read_length will not exceed the current iov_len
5823 * -- the target address is physically contiguous for read_length
5824 */
5825 cluster_syncup(vp, filesize, callback, callback_arg, PUSH_SYNC);
5826
5827 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
5828 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
5829
5830 iostate.io_completed = 0;
5831 iostate.io_issued = 0;
5832 iostate.io_error = 0;
5833 iostate.io_wanted = 0;
5834
5835 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
5836
5837 next_cread:
5838 io_size = *read_length;
5839
5840 max_size = filesize - uio->uio_offset;
5841
5842 if (io_size > max_size) {
5843 io_size = (u_int32_t)max_size;
5844 }
5845
5846 iov_base = uio_curriovbase(uio);
5847
5848 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
5849 upl_needed_size = upl_offset + io_size;
5850
5851 pages_in_pl = 0;
5852 upl_size = (upl_size_t)upl_needed_size;
5853 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
5854
5855
5856 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_START,
5857 (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
5858
5859 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
5860 kret = vm_map_get_upl(map,
5861 vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
5862 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0);
5863
5864 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END,
5865 (int)upl_offset, upl_size, io_size, kret, 0);
5866
5867 if (kret != KERN_SUCCESS) {
5868 /*
5869 * failed to get pagelist
5870 */
5871 error = EINVAL;
5872 goto wait_for_creads;
5873 }
5874 num_upl++;
5875
5876 if (upl_size < upl_needed_size) {
5877 /*
5878 * The upl_size wasn't satisfied.
5879 */
5880 error = EINVAL;
5881 goto wait_for_creads;
5882 }
5883 pl = ubc_upl_pageinfo(upl[cur_upl]);
5884
5885 dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
5886
5887 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
5888 u_int32_t head_size;
5889
5890 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
5891
5892 if (head_size > io_size) {
5893 head_size = io_size;
5894 }
5895
5896 error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, CL_READ, callback, callback_arg);
5897
5898 if (error) {
5899 goto wait_for_creads;
5900 }
5901
5902 upl_offset += head_size;
5903 dst_paddr += head_size;
5904 io_size -= head_size;
5905
5906 iov_base += head_size;
5907 }
5908 if ((u_int32_t)iov_base & mem_alignment_mask) {
5909 /*
5910 * request doesn't set up on a memory boundary
5911 * the underlying DMA engine can handle...
5912 * return an error instead of going through
5913 * the slow copy path since the intent of this
5914 * path is direct I/O to device memory
5915 */
5916 error = EINVAL;
5917 goto wait_for_creads;
5918 }
5919
5920 tail_size = io_size & (devblocksize - 1);
5921
5922 io_size -= tail_size;
5923
5924 while (io_size && error == 0) {
5925 if (io_size > MAX_IO_CONTIG_SIZE) {
5926 xsize = MAX_IO_CONTIG_SIZE;
5927 } else {
5928 xsize = io_size;
5929 }
5930 /*
5931 * request asynchronously so that we can overlap
5932 * the preparation of the next I/O... we'll do
5933 * the commit after all the I/O has completed
5934 * since its all issued against the same UPL
5935 * if there are already too many outstanding reads
5936 * wait until some have completed before issuing the next
5937 */
5938 cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_read_contig");
5939
5940 if (iostate.io_error) {
5941 /*
5942 * one of the earlier reads we issued ran into a hard error
5943 * don't issue any more reads...
5944 * go wait for any other reads to complete before
5945 * returning the error to the caller
5946 */
5947 goto wait_for_creads;
5948 }
5949 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize,
5950 CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC | bflag,
5951 (buf_t)NULL, &iostate, callback, callback_arg);
5952 /*
5953 * The cluster_io read was issued successfully,
5954 * update the uio structure
5955 */
5956 if (error == 0) {
5957 uio_update(uio, (user_size_t)xsize);
5958
5959 dst_paddr += xsize;
5960 upl_offset += xsize;
5961 io_size -= xsize;
5962 }
5963 }
5964 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS && uio->uio_offset < filesize) {
5965 error = cluster_io_type(uio, read_type, read_length, 0);
5966
5967 if (error == 0 && *read_type == IO_CONTIG) {
5968 cur_upl++;
5969 goto next_cread;
5970 }
5971 } else {
5972 *read_type = IO_UNKNOWN;
5973 }
5974
5975 wait_for_creads:
5976 /*
5977 * make sure all async reads that are part of this stream
5978 * have completed before we proceed
5979 */
5980 cluster_iostate_wait(&iostate, 0, "cluster_read_contig");
5981
5982 if (iostate.io_error) {
5983 error = iostate.io_error;
5984 }
5985
5986 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
5987
5988 if (error == 0 && tail_size) {
5989 error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg);
5990 }
5991
5992 for (n = 0; n < num_upl; n++) {
5993 /*
5994 * just release our hold on each physically contiguous
5995 * region without changing any state
5996 */
5997 ubc_upl_abort(upl[n], 0);
5998 }
5999
6000 return error;
6001 }
6002
6003
6004 static int
cluster_io_type(struct uio * uio,int * io_type,u_int32_t * io_length,u_int32_t min_length)6005 cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length)
6006 {
6007 user_size_t iov_len;
6008 user_addr_t iov_base = 0;
6009 upl_t upl;
6010 upl_size_t upl_size;
6011 upl_control_flags_t upl_flags;
6012 int retval = 0;
6013
6014 /*
6015 * skip over any emtpy vectors
6016 */
6017 uio_update(uio, (user_size_t)0);
6018
6019 iov_len = uio_curriovlen(uio);
6020
6021 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_START, uio, (int)iov_len, 0, 0, 0);
6022
6023 if (iov_len) {
6024 iov_base = uio_curriovbase(uio);
6025 /*
6026 * make sure the size of the vector isn't too big...
6027 * internally, we want to handle all of the I/O in
6028 * chunk sizes that fit in a 32 bit int
6029 */
6030 if (iov_len > (user_size_t)MAX_IO_REQUEST_SIZE) {
6031 upl_size = MAX_IO_REQUEST_SIZE;
6032 } else {
6033 upl_size = (u_int32_t)iov_len;
6034 }
6035
6036 upl_flags = UPL_QUERY_OBJECT_TYPE;
6037
6038 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
6039 if ((vm_map_get_upl(map,
6040 vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
6041 &upl_size, &upl, NULL, NULL, &upl_flags, VM_KERN_MEMORY_FILE, 0)) != KERN_SUCCESS) {
6042 /*
6043 * the user app must have passed in an invalid address
6044 */
6045 retval = EFAULT;
6046 }
6047 if (upl_size == 0) {
6048 retval = EFAULT;
6049 }
6050
6051 *io_length = upl_size;
6052
6053 if (upl_flags & UPL_PHYS_CONTIG) {
6054 *io_type = IO_CONTIG;
6055 } else if (iov_len >= min_length) {
6056 *io_type = IO_DIRECT;
6057 } else {
6058 *io_type = IO_COPY;
6059 }
6060 } else {
6061 /*
6062 * nothing left to do for this uio
6063 */
6064 *io_length = 0;
6065 *io_type = IO_UNKNOWN;
6066 }
6067 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_END, iov_base, *io_type, *io_length, retval, 0);
6068
6069 if (*io_type == IO_DIRECT &&
6070 vm_map_page_shift(current_map()) < PAGE_SHIFT) {
6071 /* no direct I/O for sub-page-size address spaces */
6072 DEBUG4K_VFS("io_type IO_DIRECT -> IO_COPY\n");
6073 *io_type = IO_COPY;
6074 }
6075
6076 return retval;
6077 }
6078
6079
6080 /*
6081 * generate advisory I/O's in the largest chunks possible
6082 * the completed pages will be released into the VM cache
6083 */
6084 int
advisory_read(vnode_t vp,off_t filesize,off_t f_offset,int resid)6085 advisory_read(vnode_t vp, off_t filesize, off_t f_offset, int resid)
6086 {
6087 return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE);
6088 }
6089
6090 int
advisory_read_ext(vnode_t vp,off_t filesize,off_t f_offset,int resid,int (* callback)(buf_t,void *),void * callback_arg,int bflag)6091 advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
6092 {
6093 upl_page_info_t *pl;
6094 upl_t upl = NULL;
6095 vm_offset_t upl_offset;
6096 int upl_size;
6097 off_t upl_f_offset;
6098 int start_offset;
6099 int start_pg;
6100 int last_pg;
6101 int pages_in_upl;
6102 off_t max_size;
6103 int io_size;
6104 kern_return_t kret;
6105 int retval = 0;
6106 int issued_io;
6107 int skip_range;
6108 uint32_t max_io_size;
6109
6110
6111 if (!UBCINFOEXISTS(vp)) {
6112 return EINVAL;
6113 }
6114
6115 if (f_offset < 0 || resid < 0) {
6116 return EINVAL;
6117 }
6118
6119 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
6120
6121 if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
6122 if (max_io_size > speculative_prefetch_max_iosize) {
6123 max_io_size = speculative_prefetch_max_iosize;
6124 }
6125 }
6126
6127 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
6128 (int)f_offset, resid, (int)filesize, 0, 0);
6129
6130 while (resid && f_offset < filesize && retval == 0) {
6131 /*
6132 * compute the size of the upl needed to encompass
6133 * the requested read... limit each call to cluster_io
6134 * to the maximum UPL size... cluster_io will clip if
6135 * this exceeds the maximum io_size for the device,
6136 * make sure to account for
6137 * a starting offset that's not page aligned
6138 */
6139 start_offset = (int)(f_offset & PAGE_MASK_64);
6140 upl_f_offset = f_offset - (off_t)start_offset;
6141 max_size = filesize - f_offset;
6142
6143 if (resid < max_size) {
6144 io_size = resid;
6145 } else {
6146 io_size = (int)max_size;
6147 }
6148
6149 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
6150 if ((uint32_t)upl_size > max_io_size) {
6151 upl_size = max_io_size;
6152 }
6153
6154 skip_range = 0;
6155 /*
6156 * return the number of contiguously present pages in the cache
6157 * starting at upl_f_offset within the file
6158 */
6159 ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range);
6160
6161 if (skip_range) {
6162 /*
6163 * skip over pages already present in the cache
6164 */
6165 io_size = skip_range - start_offset;
6166
6167 f_offset += io_size;
6168 resid -= io_size;
6169
6170 if (skip_range == upl_size) {
6171 continue;
6172 }
6173 /*
6174 * have to issue some real I/O
6175 * at this point, we know it's starting on a page boundary
6176 * because we've skipped over at least the first page in the request
6177 */
6178 start_offset = 0;
6179 upl_f_offset += skip_range;
6180 upl_size -= skip_range;
6181 }
6182 pages_in_upl = upl_size / PAGE_SIZE;
6183
6184 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START,
6185 upl, (int)upl_f_offset, upl_size, start_offset, 0);
6186
6187 kret = ubc_create_upl_kernel(vp,
6188 upl_f_offset,
6189 upl_size,
6190 &upl,
6191 &pl,
6192 UPL_RET_ONLY_ABSENT | UPL_SET_LITE,
6193 VM_KERN_MEMORY_FILE);
6194 if (kret != KERN_SUCCESS) {
6195 return retval;
6196 }
6197 issued_io = 0;
6198
6199 /*
6200 * before we start marching forward, we must make sure we end on
6201 * a present page, otherwise we will be working with a freed
6202 * upl
6203 */
6204 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
6205 if (upl_page_present(pl, last_pg)) {
6206 break;
6207 }
6208 }
6209 pages_in_upl = last_pg + 1;
6210
6211
6212 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END,
6213 upl, (int)upl_f_offset, upl_size, start_offset, 0);
6214
6215
6216 for (last_pg = 0; last_pg < pages_in_upl;) {
6217 /*
6218 * scan from the beginning of the upl looking for the first
6219 * page that is present.... this will become the first page in
6220 * the request we're going to make to 'cluster_io'... if all
6221 * of the pages are absent, we won't call through to 'cluster_io'
6222 */
6223 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
6224 if (upl_page_present(pl, start_pg)) {
6225 break;
6226 }
6227 }
6228
6229 /*
6230 * scan from the starting present page looking for an absent
6231 * page before the end of the upl is reached, if we
6232 * find one, then it will terminate the range of pages being
6233 * presented to 'cluster_io'
6234 */
6235 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
6236 if (!upl_page_present(pl, last_pg)) {
6237 break;
6238 }
6239 }
6240
6241 if (last_pg > start_pg) {
6242 /*
6243 * we found a range of pages that must be filled
6244 * if the last page in this range is the last page of the file
6245 * we may have to clip the size of it to keep from reading past
6246 * the end of the last physical block associated with the file
6247 */
6248 upl_offset = start_pg * PAGE_SIZE;
6249 io_size = (last_pg - start_pg) * PAGE_SIZE;
6250
6251 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) {
6252 io_size = (int)(filesize - (upl_f_offset + upl_offset));
6253 }
6254
6255 /*
6256 * issue an asynchronous read to cluster_io
6257 */
6258 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
6259 CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
6260
6261 issued_io = 1;
6262 }
6263 }
6264 if (issued_io == 0) {
6265 ubc_upl_abort(upl, 0);
6266 }
6267
6268 io_size = upl_size - start_offset;
6269
6270 if (io_size > resid) {
6271 io_size = resid;
6272 }
6273 f_offset += io_size;
6274 resid -= io_size;
6275 }
6276
6277 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END,
6278 (int)f_offset, resid, retval, 0, 0);
6279
6280 return retval;
6281 }
6282
6283
6284 int
cluster_push(vnode_t vp,int flags)6285 cluster_push(vnode_t vp, int flags)
6286 {
6287 return cluster_push_ext(vp, flags, NULL, NULL);
6288 }
6289
6290
6291 int
cluster_push_ext(vnode_t vp,int flags,int (* callback)(buf_t,void *),void * callback_arg)6292 cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg)
6293 {
6294 return cluster_push_err(vp, flags, callback, callback_arg, NULL);
6295 }
6296
6297 /* write errors via err, but return the number of clusters written */
6298 extern uint32_t system_inshutdown;
6299 uint32_t cl_sparse_push_error = 0;
6300 int
cluster_push_err(vnode_t vp,int flags,int (* callback)(buf_t,void *),void * callback_arg,int * err)6301 cluster_push_err(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg, int *err)
6302 {
6303 int retval;
6304 int my_sparse_wait = 0;
6305 struct cl_writebehind *wbp;
6306 int local_err = 0;
6307
6308 if (err) {
6309 *err = 0;
6310 }
6311
6312 if (!UBCINFOEXISTS(vp)) {
6313 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -1, 0);
6314 return 0;
6315 }
6316 /* return if deferred write is set */
6317 if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) {
6318 return 0;
6319 }
6320 if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) {
6321 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -2, 0);
6322 return 0;
6323 }
6324 if (!ISSET(flags, IO_SYNC) && wbp->cl_number == 0 && wbp->cl_scmap == NULL) {
6325 lck_mtx_unlock(&wbp->cl_lockw);
6326
6327 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -3, 0);
6328 return 0;
6329 }
6330 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START,
6331 wbp->cl_scmap, wbp->cl_number, flags, 0, 0);
6332
6333 /*
6334 * if we have an fsync in progress, we don't want to allow any additional
6335 * sync/fsync/close(s) to occur until it finishes.
6336 * note that its possible for writes to continue to occur to this file
6337 * while we're waiting and also once the fsync starts to clean if we're
6338 * in the sparse map case
6339 */
6340 while (wbp->cl_sparse_wait) {
6341 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, kdebug_vnode(vp), 0, 0, 0, 0);
6342
6343 msleep((caddr_t)&wbp->cl_sparse_wait, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
6344
6345 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, kdebug_vnode(vp), 0, 0, 0, 0);
6346 }
6347 if (flags & IO_SYNC) {
6348 my_sparse_wait = 1;
6349 wbp->cl_sparse_wait = 1;
6350
6351 /*
6352 * this is an fsync (or equivalent)... we must wait for any existing async
6353 * cleaning operations to complete before we evaulate the current state
6354 * and finish cleaning... this insures that all writes issued before this
6355 * fsync actually get cleaned to the disk before this fsync returns
6356 */
6357 while (wbp->cl_sparse_pushes) {
6358 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_START, kdebug_vnode(vp), 0, 0, 0, 0);
6359
6360 msleep((caddr_t)&wbp->cl_sparse_pushes, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
6361
6362 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_END, kdebug_vnode(vp), 0, 0, 0, 0);
6363 }
6364 }
6365 if (wbp->cl_scmap) {
6366 void *scmap;
6367
6368 if (wbp->cl_sparse_pushes < SPARSE_PUSH_LIMIT) {
6369 scmap = wbp->cl_scmap;
6370 wbp->cl_scmap = NULL;
6371
6372 wbp->cl_sparse_pushes++;
6373
6374 lck_mtx_unlock(&wbp->cl_lockw);
6375
6376 retval = sparse_cluster_push(wbp, &scmap, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE);
6377
6378 lck_mtx_lock(&wbp->cl_lockw);
6379
6380 wbp->cl_sparse_pushes--;
6381
6382 if (retval) {
6383 if (wbp->cl_scmap != NULL) {
6384 /*
6385 * panic("cluster_push_err: Expected NULL cl_scmap\n");
6386 *
6387 * This can happen if we get an error from the underlying FS
6388 * e.g. ENOSPC, EPERM or EIO etc. We hope that these errors
6389 * are transient and the I/Os will succeed at a later point.
6390 *
6391 * The tricky part here is that a new sparse cluster has been
6392 * allocated and tracking a different set of dirty pages. So these
6393 * pages are not going to be pushed out with the next sparse_cluster_push.
6394 * An explicit msync or file close will, however, push the pages out.
6395 *
6396 * What if those calls still don't work? And so, during shutdown we keep
6397 * trying till we succeed...
6398 */
6399
6400 if (system_inshutdown) {
6401 if ((retval == ENOSPC) && (vp->v_mount->mnt_flag & (MNT_LOCAL | MNT_REMOVABLE)) == MNT_LOCAL) {
6402 os_atomic_inc(&cl_sparse_push_error, relaxed);
6403 }
6404 } else {
6405 vfs_drt_control(&scmap, 0); /* emit stats and free this memory. Dirty pages stay intact. */
6406 scmap = NULL;
6407 }
6408 } else {
6409 wbp->cl_scmap = scmap;
6410 }
6411 }
6412
6413 if (wbp->cl_sparse_wait && wbp->cl_sparse_pushes == 0) {
6414 wakeup((caddr_t)&wbp->cl_sparse_pushes);
6415 }
6416 } else {
6417 retval = sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE);
6418 }
6419
6420 local_err = retval;
6421
6422 if (err) {
6423 *err = retval;
6424 }
6425 retval = 1;
6426 } else {
6427 retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, &local_err, FALSE);
6428 if (err) {
6429 *err = local_err;
6430 }
6431 }
6432 lck_mtx_unlock(&wbp->cl_lockw);
6433
6434 if (flags & IO_SYNC) {
6435 (void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push");
6436 }
6437
6438 if (my_sparse_wait) {
6439 /*
6440 * I'm the owner of the serialization token
6441 * clear it and wakeup anyone that is waiting
6442 * for me to finish
6443 */
6444 lck_mtx_lock(&wbp->cl_lockw);
6445
6446 wbp->cl_sparse_wait = 0;
6447 wakeup((caddr_t)&wbp->cl_sparse_wait);
6448
6449 lck_mtx_unlock(&wbp->cl_lockw);
6450 }
6451 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
6452 wbp->cl_scmap, wbp->cl_number, retval, local_err, 0);
6453
6454 return retval;
6455 }
6456
6457
6458 __private_extern__ void
cluster_release(struct ubc_info * ubc)6459 cluster_release(struct ubc_info *ubc)
6460 {
6461 struct cl_writebehind *wbp;
6462 struct cl_readahead *rap;
6463
6464 if ((wbp = ubc->cl_wbehind)) {
6465 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, wbp->cl_scmap, 0, 0, 0);
6466
6467 if (wbp->cl_scmap) {
6468 vfs_drt_control(&(wbp->cl_scmap), 0);
6469 }
6470 lck_mtx_destroy(&wbp->cl_lockw, &cl_mtx_grp);
6471 zfree(cl_wr_zone, wbp);
6472 ubc->cl_wbehind = NULL;
6473 } else {
6474 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, 0, 0, 0, 0);
6475 }
6476
6477 if ((rap = ubc->cl_rahead)) {
6478 lck_mtx_destroy(&rap->cl_lockr, &cl_mtx_grp);
6479 zfree(cl_rd_zone, rap);
6480 ubc->cl_rahead = NULL;
6481 }
6482
6483 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, ubc, rap, wbp, 0, 0);
6484 }
6485
6486
6487 static int
cluster_try_push(struct cl_writebehind * wbp,vnode_t vp,off_t EOF,int push_flag,int io_flags,int (* callback)(buf_t,void *),void * callback_arg,int * err,boolean_t vm_initiated)6488 cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg, int *err, boolean_t vm_initiated)
6489 {
6490 int cl_index;
6491 int cl_index1;
6492 int min_index;
6493 int cl_len;
6494 int cl_pushed = 0;
6495 struct cl_wextent l_clusters[MAX_CLUSTERS];
6496 u_int max_cluster_pgcount;
6497 int error = 0;
6498
6499 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
6500 /*
6501 * the write behind context exists and has
6502 * already been locked...
6503 */
6504 if (wbp->cl_number == 0) {
6505 /*
6506 * no clusters to push
6507 * return number of empty slots
6508 */
6509 return MAX_CLUSTERS;
6510 }
6511
6512 /*
6513 * make a local 'sorted' copy of the clusters
6514 * and clear wbp->cl_number so that new clusters can
6515 * be developed
6516 */
6517 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
6518 for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) {
6519 if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr) {
6520 continue;
6521 }
6522 if (min_index == -1) {
6523 min_index = cl_index1;
6524 } else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr) {
6525 min_index = cl_index1;
6526 }
6527 }
6528 if (min_index == -1) {
6529 break;
6530 }
6531
6532 l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr;
6533 l_clusters[cl_index].e_addr = wbp->cl_clusters[min_index].e_addr;
6534 l_clusters[cl_index].io_flags = wbp->cl_clusters[min_index].io_flags;
6535
6536 wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr;
6537 }
6538 wbp->cl_number = 0;
6539
6540 cl_len = cl_index;
6541
6542 /* skip switching to the sparse cluster mechanism if on diskimage */
6543 if (((push_flag & PUSH_DELAY) && cl_len == MAX_CLUSTERS) &&
6544 !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV)) {
6545 int i;
6546
6547 /*
6548 * determine if we appear to be writing the file sequentially
6549 * if not, by returning without having pushed any clusters
6550 * we will cause this vnode to be pushed into the sparse cluster mechanism
6551 * used for managing more random I/O patterns
6552 *
6553 * we know that we've got all clusters currently in use and the next write doesn't fit into one of them...
6554 * that's why we're in try_push with PUSH_DELAY...
6555 *
6556 * check to make sure that all the clusters except the last one are 'full'... and that each cluster
6557 * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above
6558 * so we can just make a simple pass through, up to, but not including the last one...
6559 * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they
6560 * are sequential
6561 *
6562 * we let the last one be partial as long as it was adjacent to the previous one...
6563 * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out
6564 * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
6565 */
6566 for (i = 0; i < MAX_CLUSTERS - 1; i++) {
6567 if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != max_cluster_pgcount) {
6568 goto dont_try;
6569 }
6570 if (l_clusters[i].e_addr != l_clusters[i + 1].b_addr) {
6571 goto dont_try;
6572 }
6573 }
6574 }
6575 if (vm_initiated == TRUE) {
6576 lck_mtx_unlock(&wbp->cl_lockw);
6577 }
6578
6579 for (cl_index = 0; cl_index < cl_len; cl_index++) {
6580 int flags;
6581 struct cl_extent cl;
6582 int retval;
6583
6584 flags = io_flags & (IO_PASSIVE | IO_CLOSE);
6585
6586 /*
6587 * try to push each cluster in turn...
6588 */
6589 if (l_clusters[cl_index].io_flags & CLW_IONOCACHE) {
6590 flags |= IO_NOCACHE;
6591 }
6592
6593 if (l_clusters[cl_index].io_flags & CLW_IOPASSIVE) {
6594 flags |= IO_PASSIVE;
6595 }
6596
6597 if (push_flag & PUSH_SYNC) {
6598 flags |= IO_SYNC;
6599 }
6600
6601 cl.b_addr = l_clusters[cl_index].b_addr;
6602 cl.e_addr = l_clusters[cl_index].e_addr;
6603
6604 retval = cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg, vm_initiated);
6605
6606 if (retval == 0) {
6607 cl_pushed++;
6608
6609 l_clusters[cl_index].b_addr = 0;
6610 l_clusters[cl_index].e_addr = 0;
6611 } else if (error == 0) {
6612 error = retval;
6613 }
6614
6615 if (!(push_flag & PUSH_ALL)) {
6616 break;
6617 }
6618 }
6619 if (vm_initiated == TRUE) {
6620 lck_mtx_lock(&wbp->cl_lockw);
6621 }
6622
6623 if (err) {
6624 *err = error;
6625 }
6626
6627 dont_try:
6628 if (cl_len > cl_pushed) {
6629 /*
6630 * we didn't push all of the clusters, so
6631 * lets try to merge them back in to the vnode
6632 */
6633 if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) {
6634 /*
6635 * we picked up some new clusters while we were trying to
6636 * push the old ones... this can happen because I've dropped
6637 * the vnode lock... the sum of the
6638 * leftovers plus the new cluster count exceeds our ability
6639 * to represent them, so switch to the sparse cluster mechanism
6640 *
6641 * collect the active public clusters...
6642 */
6643 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated);
6644
6645 for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) {
6646 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) {
6647 continue;
6648 }
6649 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
6650 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
6651 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
6652
6653 cl_index1++;
6654 }
6655 /*
6656 * update the cluster count
6657 */
6658 wbp->cl_number = cl_index1;
6659
6660 /*
6661 * and collect the original clusters that were moved into the
6662 * local storage for sorting purposes
6663 */
6664 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated);
6665 } else {
6666 /*
6667 * we've got room to merge the leftovers back in
6668 * just append them starting at the next 'hole'
6669 * represented by wbp->cl_number
6670 */
6671 for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) {
6672 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) {
6673 continue;
6674 }
6675
6676 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
6677 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
6678 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
6679
6680 cl_index1++;
6681 }
6682 /*
6683 * update the cluster count
6684 */
6685 wbp->cl_number = cl_index1;
6686 }
6687 }
6688 return MAX_CLUSTERS - wbp->cl_number;
6689 }
6690
6691
6692
6693 static int
cluster_push_now(vnode_t vp,struct cl_extent * cl,off_t EOF,int flags,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6694 cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags,
6695 int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6696 {
6697 upl_page_info_t *pl;
6698 upl_t upl;
6699 vm_offset_t upl_offset;
6700 int upl_size;
6701 off_t upl_f_offset;
6702 int pages_in_upl;
6703 int start_pg;
6704 int last_pg;
6705 int io_size;
6706 int io_flags;
6707 int upl_flags;
6708 int bflag;
6709 int size;
6710 int error = 0;
6711 int retval;
6712 kern_return_t kret;
6713
6714 if (flags & IO_PASSIVE) {
6715 bflag = CL_PASSIVE;
6716 } else {
6717 bflag = 0;
6718 }
6719
6720 if (flags & IO_SKIP_ENCRYPTION) {
6721 bflag |= CL_ENCRYPTED;
6722 }
6723
6724 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START,
6725 (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0);
6726
6727 if ((pages_in_upl = (int)(cl->e_addr - cl->b_addr)) == 0) {
6728 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0);
6729
6730 return 0;
6731 }
6732 upl_size = pages_in_upl * PAGE_SIZE;
6733 upl_f_offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
6734
6735 if (upl_f_offset + upl_size >= EOF) {
6736 if (upl_f_offset >= EOF) {
6737 /*
6738 * must have truncated the file and missed
6739 * clearing a dangling cluster (i.e. it's completely
6740 * beyond the new EOF
6741 */
6742 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0);
6743
6744 return 0;
6745 }
6746 size = (int)(EOF - upl_f_offset);
6747
6748 upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
6749 pages_in_upl = upl_size / PAGE_SIZE;
6750 } else {
6751 size = upl_size;
6752 }
6753
6754
6755 if (vm_initiated) {
6756 vnode_pageout(vp, NULL, (upl_offset_t)0, upl_f_offset, (upl_size_t)upl_size,
6757 UPL_MSYNC | UPL_VNODE_PAGER | UPL_KEEPCACHED, &error);
6758
6759 return error;
6760 }
6761 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0);
6762
6763 /*
6764 * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior
6765 *
6766 * - only pages that are currently dirty are returned... these are the ones we need to clean
6767 * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
6768 * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
6769 * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if
6770 * someone dirties this page while the I/O is in progress, we don't lose track of the new state
6771 *
6772 * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard)
6773 */
6774
6775 if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE)) {
6776 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED;
6777 } else {
6778 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE;
6779 }
6780
6781 kret = ubc_create_upl_kernel(vp,
6782 upl_f_offset,
6783 upl_size,
6784 &upl,
6785 &pl,
6786 upl_flags,
6787 VM_KERN_MEMORY_FILE);
6788 if (kret != KERN_SUCCESS) {
6789 panic("cluster_push: failed to get pagelist");
6790 }
6791
6792 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, upl, upl_f_offset, 0, 0, 0);
6793
6794 /*
6795 * since we only asked for the dirty pages back
6796 * it's possible that we may only get a few or even none, so...
6797 * before we start marching forward, we must make sure we know
6798 * where the last present page is in the UPL, otherwise we could
6799 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
6800 * employed by commit_range and abort_range.
6801 */
6802 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
6803 if (upl_page_present(pl, last_pg)) {
6804 break;
6805 }
6806 }
6807 pages_in_upl = last_pg + 1;
6808
6809 if (pages_in_upl == 0) {
6810 ubc_upl_abort(upl, 0);
6811
6812 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0);
6813 return 0;
6814 }
6815
6816 for (last_pg = 0; last_pg < pages_in_upl;) {
6817 /*
6818 * find the next dirty page in the UPL
6819 * this will become the first page in the
6820 * next I/O to generate
6821 */
6822 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
6823 if (upl_dirty_page(pl, start_pg)) {
6824 break;
6825 }
6826 if (upl_page_present(pl, start_pg)) {
6827 /*
6828 * RET_ONLY_DIRTY will return non-dirty 'precious' pages
6829 * just release these unchanged since we're not going
6830 * to steal them or change their state
6831 */
6832 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
6833 }
6834 }
6835 if (start_pg >= pages_in_upl) {
6836 /*
6837 * done... no more dirty pages to push
6838 */
6839 break;
6840 }
6841 if (start_pg > last_pg) {
6842 /*
6843 * skipped over some non-dirty pages
6844 */
6845 size -= ((start_pg - last_pg) * PAGE_SIZE);
6846 }
6847
6848 /*
6849 * find a range of dirty pages to write
6850 */
6851 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
6852 if (!upl_dirty_page(pl, last_pg)) {
6853 break;
6854 }
6855 }
6856 upl_offset = start_pg * PAGE_SIZE;
6857
6858 io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
6859
6860 io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | bflag;
6861
6862 if (!(flags & IO_SYNC)) {
6863 io_flags |= CL_ASYNC;
6864 }
6865
6866 if (flags & IO_CLOSE) {
6867 io_flags |= CL_CLOSE;
6868 }
6869
6870 if (flags & IO_NOCACHE) {
6871 io_flags |= CL_NOCACHE;
6872 }
6873
6874 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
6875 io_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
6876
6877 if (error == 0 && retval) {
6878 error = retval;
6879 }
6880
6881 size -= io_size;
6882 }
6883 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, error, 0, 0);
6884
6885 return error;
6886 }
6887
6888
6889 /*
6890 * sparse_cluster_switch is called with the write behind lock held
6891 */
6892 static int
sparse_cluster_switch(struct cl_writebehind * wbp,vnode_t vp,off_t EOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6893 sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6894 {
6895 int cl_index;
6896 int error = 0;
6897
6898 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, 0, 0);
6899
6900 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
6901 int flags;
6902 struct cl_extent cl;
6903
6904 for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) {
6905 if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) {
6906 if (flags & UPL_POP_DIRTY) {
6907 cl.e_addr = cl.b_addr + 1;
6908
6909 error = sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg, vm_initiated);
6910
6911 if (error) {
6912 break;
6913 }
6914 }
6915 }
6916 }
6917 }
6918 wbp->cl_number -= cl_index;
6919
6920 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, error, 0);
6921
6922 return error;
6923 }
6924
6925
6926 /*
6927 * sparse_cluster_push must be called with the write-behind lock held if the scmap is
6928 * still associated with the write-behind context... however, if the scmap has been disassociated
6929 * from the write-behind context (the cluster_push case), the wb lock is not held
6930 */
6931 static int
sparse_cluster_push(struct cl_writebehind * wbp,void ** scmap,vnode_t vp,off_t EOF,int push_flag,int io_flags,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6932 sparse_cluster_push(struct cl_writebehind *wbp, void **scmap, vnode_t vp, off_t EOF, int push_flag,
6933 int io_flags, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6934 {
6935 struct cl_extent cl;
6936 off_t offset;
6937 u_int length;
6938 void *l_scmap;
6939 int error = 0;
6940
6941 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, kdebug_vnode(vp), (*scmap), 0, push_flag, 0);
6942
6943 if (push_flag & PUSH_ALL) {
6944 vfs_drt_control(scmap, 1);
6945 }
6946
6947 l_scmap = *scmap;
6948
6949 for (;;) {
6950 int retval;
6951
6952 if (vfs_drt_get_cluster(scmap, &offset, &length) != KERN_SUCCESS) {
6953 /*
6954 * Not finding anything to push will return KERN_FAILURE.
6955 * Confusing since it isn't really a failure. But that's the
6956 * reason we don't set 'error' here like we do below.
6957 */
6958 break;
6959 }
6960
6961 if (vm_initiated == TRUE) {
6962 lck_mtx_unlock(&wbp->cl_lockw);
6963 }
6964
6965 cl.b_addr = (daddr64_t)(offset / PAGE_SIZE_64);
6966 cl.e_addr = (daddr64_t)((offset + length) / PAGE_SIZE_64);
6967
6968 retval = cluster_push_now(vp, &cl, EOF, io_flags, callback, callback_arg, vm_initiated);
6969 if (error == 0 && retval) {
6970 error = retval;
6971 }
6972
6973 if (vm_initiated == TRUE) {
6974 lck_mtx_lock(&wbp->cl_lockw);
6975
6976 if (*scmap != l_scmap) {
6977 break;
6978 }
6979 }
6980
6981 if (error) {
6982 if (vfs_drt_mark_pages(scmap, offset, length, NULL) != KERN_SUCCESS) {
6983 panic("Failed to restore dirty state on failure");
6984 }
6985
6986 break;
6987 }
6988
6989 if (!(push_flag & PUSH_ALL)) {
6990 break;
6991 }
6992 }
6993 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0);
6994
6995 return error;
6996 }
6997
6998
6999 /*
7000 * sparse_cluster_add is called with the write behind lock held
7001 */
7002 static int
sparse_cluster_add(struct cl_writebehind * wbp,void ** scmap,vnode_t vp,struct cl_extent * cl,off_t EOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)7003 sparse_cluster_add(struct cl_writebehind *wbp, void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF,
7004 int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
7005 {
7006 u_int new_dirty;
7007 u_int length;
7008 off_t offset;
7009 int error = 0;
7010 int push_flag = 0; /* Is this a valid value? */
7011
7012 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (*scmap), 0, cl->b_addr, (int)cl->e_addr, 0);
7013
7014 offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
7015 length = ((u_int)(cl->e_addr - cl->b_addr)) * PAGE_SIZE;
7016
7017 while (vfs_drt_mark_pages(scmap, offset, length, &new_dirty) != KERN_SUCCESS) {
7018 /*
7019 * no room left in the map
7020 * only a partial update was done
7021 * push out some pages and try again
7022 */
7023
7024 if (vfs_get_scmap_push_behavior_internal(scmap, &push_flag)) {
7025 push_flag = 0;
7026 }
7027
7028 error = sparse_cluster_push(wbp, scmap, vp, EOF, push_flag, 0, callback, callback_arg, vm_initiated);
7029
7030 if (error) {
7031 break;
7032 }
7033
7034 offset += (new_dirty * PAGE_SIZE_64);
7035 length -= (new_dirty * PAGE_SIZE);
7036 }
7037 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0);
7038
7039 return error;
7040 }
7041
7042
7043 static int
cluster_align_phys_io(vnode_t vp,struct uio * uio,addr64_t usr_paddr,u_int32_t xsize,int flags,int (* callback)(buf_t,void *),void * callback_arg)7044 cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
7045 {
7046 upl_page_info_t *pl;
7047 upl_t upl;
7048 addr64_t ubc_paddr;
7049 kern_return_t kret;
7050 int error = 0;
7051 int did_read = 0;
7052 int abort_flags;
7053 int upl_flags;
7054 int bflag;
7055
7056 if (flags & IO_PASSIVE) {
7057 bflag = CL_PASSIVE;
7058 } else {
7059 bflag = 0;
7060 }
7061
7062 if (flags & IO_NOCACHE) {
7063 bflag |= CL_NOCACHE;
7064 }
7065
7066 upl_flags = UPL_SET_LITE;
7067
7068 if (!(flags & CL_READ)) {
7069 /*
7070 * "write" operation: let the UPL subsystem know
7071 * that we intend to modify the buffer cache pages
7072 * we're gathering.
7073 */
7074 upl_flags |= UPL_WILL_MODIFY;
7075 } else {
7076 /*
7077 * indicate that there is no need to pull the
7078 * mapping for this page... we're only going
7079 * to read from it, not modify it.
7080 */
7081 upl_flags |= UPL_FILE_IO;
7082 }
7083 kret = ubc_create_upl_kernel(vp,
7084 uio->uio_offset & ~PAGE_MASK_64,
7085 PAGE_SIZE,
7086 &upl,
7087 &pl,
7088 upl_flags,
7089 VM_KERN_MEMORY_FILE);
7090
7091 if (kret != KERN_SUCCESS) {
7092 return EINVAL;
7093 }
7094
7095 if (!upl_valid_page(pl, 0)) {
7096 /*
7097 * issue a synchronous read to cluster_io
7098 */
7099 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
7100 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
7101 if (error) {
7102 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
7103
7104 return error;
7105 }
7106 did_read = 1;
7107 }
7108 ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)(uio->uio_offset & PAGE_MASK_64);
7109
7110 /*
7111 * NOTE: There is no prototype for the following in BSD. It, and the definitions
7112 * of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
7113 * osfmk/ppc/mappings.h. They are not included here because there appears to be no
7114 * way to do so without exporting them to kexts as well.
7115 */
7116 if (flags & CL_READ) {
7117 // copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk); /* Copy physical to physical and flush the destination */
7118 copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 4); /* Copy physical to physical and flush the destination */
7119 } else {
7120 // copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc); /* Copy physical to physical and flush the source */
7121 copypv(usr_paddr, ubc_paddr, xsize, 2 | 1 | 8); /* Copy physical to physical and flush the source */
7122 }
7123 if (!(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) {
7124 /*
7125 * issue a synchronous write to cluster_io
7126 */
7127 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
7128 bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
7129 }
7130 if (error == 0) {
7131 uio_update(uio, (user_size_t)xsize);
7132 }
7133
7134 if (did_read) {
7135 abort_flags = UPL_ABORT_FREE_ON_EMPTY;
7136 } else {
7137 abort_flags = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
7138 }
7139
7140 ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags);
7141
7142 return error;
7143 }
7144
7145 int
cluster_copy_upl_data(struct uio * uio,upl_t upl,int upl_offset,int * io_resid)7146 cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid)
7147 {
7148 int pg_offset;
7149 int pg_index;
7150 int csize;
7151 int segflg;
7152 int retval = 0;
7153 int xsize;
7154 upl_page_info_t *pl;
7155 int dirty_count;
7156
7157 xsize = *io_resid;
7158
7159 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
7160 (int)uio->uio_offset, upl_offset, xsize, 0, 0);
7161
7162 segflg = uio->uio_segflg;
7163
7164 switch (segflg) {
7165 case UIO_USERSPACE32:
7166 case UIO_USERISPACE32:
7167 uio->uio_segflg = UIO_PHYS_USERSPACE32;
7168 break;
7169
7170 case UIO_USERSPACE:
7171 case UIO_USERISPACE:
7172 uio->uio_segflg = UIO_PHYS_USERSPACE;
7173 break;
7174
7175 case UIO_USERSPACE64:
7176 case UIO_USERISPACE64:
7177 uio->uio_segflg = UIO_PHYS_USERSPACE64;
7178 break;
7179
7180 case UIO_SYSSPACE:
7181 uio->uio_segflg = UIO_PHYS_SYSSPACE;
7182 break;
7183 }
7184 pl = ubc_upl_pageinfo(upl);
7185
7186 pg_index = upl_offset / PAGE_SIZE;
7187 pg_offset = upl_offset & PAGE_MASK;
7188 csize = min(PAGE_SIZE - pg_offset, xsize);
7189
7190 dirty_count = 0;
7191 while (xsize && retval == 0) {
7192 addr64_t paddr;
7193
7194 paddr = ((addr64_t)upl_phys_page(pl, pg_index) << PAGE_SHIFT) + pg_offset;
7195 if ((uio->uio_rw == UIO_WRITE) && (upl_dirty_page(pl, pg_index) == FALSE)) {
7196 dirty_count++;
7197 }
7198
7199 retval = uiomove64(paddr, csize, uio);
7200
7201 pg_index += 1;
7202 pg_offset = 0;
7203 xsize -= csize;
7204 csize = min(PAGE_SIZE, xsize);
7205 }
7206 *io_resid = xsize;
7207
7208 uio->uio_segflg = segflg;
7209
7210 task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, upl_lookup_vnode(upl));
7211 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7212 (int)uio->uio_offset, xsize, retval, segflg, 0);
7213
7214 return retval;
7215 }
7216
7217
7218 int
cluster_copy_ubc_data(vnode_t vp,struct uio * uio,int * io_resid,int mark_dirty)7219 cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty)
7220 {
7221 return cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1);
7222 }
7223
7224
7225 static int
cluster_copy_ubc_data_internal(vnode_t vp,struct uio * uio,int * io_resid,int mark_dirty,int take_reference)7226 cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference)
7227 {
7228 int segflg;
7229 int io_size;
7230 int xsize;
7231 int start_offset;
7232 int retval = 0;
7233 memory_object_control_t control;
7234
7235 io_size = *io_resid;
7236
7237 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
7238 (int)uio->uio_offset, io_size, mark_dirty, take_reference, 0);
7239
7240 control = ubc_getobject(vp, UBC_FLAGS_NONE);
7241
7242 if (control == MEMORY_OBJECT_CONTROL_NULL) {
7243 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7244 (int)uio->uio_offset, io_size, retval, 3, 0);
7245
7246 return 0;
7247 }
7248 segflg = uio->uio_segflg;
7249
7250 switch (segflg) {
7251 case UIO_USERSPACE32:
7252 case UIO_USERISPACE32:
7253 uio->uio_segflg = UIO_PHYS_USERSPACE32;
7254 break;
7255
7256 case UIO_USERSPACE64:
7257 case UIO_USERISPACE64:
7258 uio->uio_segflg = UIO_PHYS_USERSPACE64;
7259 break;
7260
7261 case UIO_USERSPACE:
7262 case UIO_USERISPACE:
7263 uio->uio_segflg = UIO_PHYS_USERSPACE;
7264 break;
7265
7266 case UIO_SYSSPACE:
7267 uio->uio_segflg = UIO_PHYS_SYSSPACE;
7268 break;
7269 }
7270
7271 if ((io_size = *io_resid)) {
7272 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
7273 xsize = (int)uio_resid(uio);
7274
7275 retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio,
7276 start_offset, io_size, mark_dirty, take_reference);
7277 xsize -= uio_resid(uio);
7278 io_size -= xsize;
7279 }
7280 uio->uio_segflg = segflg;
7281 *io_resid = io_size;
7282
7283 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7284 (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);
7285
7286 return retval;
7287 }
7288
7289
7290 int
is_file_clean(vnode_t vp,off_t filesize)7291 is_file_clean(vnode_t vp, off_t filesize)
7292 {
7293 off_t f_offset;
7294 int flags;
7295 int total_dirty = 0;
7296
7297 for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) {
7298 if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {
7299 if (flags & UPL_POP_DIRTY) {
7300 total_dirty++;
7301 }
7302 }
7303 }
7304 if (total_dirty) {
7305 return EINVAL;
7306 }
7307
7308 return 0;
7309 }
7310
7311
7312
7313 /*
7314 * Dirty region tracking/clustering mechanism.
7315 *
7316 * This code (vfs_drt_*) provides a mechanism for tracking and clustering
7317 * dirty regions within a larger space (file). It is primarily intended to
7318 * support clustering in large files with many dirty areas.
7319 *
7320 * The implementation assumes that the dirty regions are pages.
7321 *
7322 * To represent dirty pages within the file, we store bit vectors in a
7323 * variable-size circular hash.
7324 */
7325
7326 /*
7327 * Bitvector size. This determines the number of pages we group in a
7328 * single hashtable entry. Each hashtable entry is aligned to this
7329 * size within the file.
7330 */
7331 #define DRT_BITVECTOR_PAGES ((1024 * 256) / PAGE_SIZE)
7332
7333 /*
7334 * File offset handling.
7335 *
7336 * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
7337 * the correct formula is (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1))
7338 */
7339 #define DRT_ADDRESS_MASK (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1))
7340 #define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK)
7341
7342 /*
7343 * Hashtable address field handling.
7344 *
7345 * The low-order bits of the hashtable address are used to conserve
7346 * space.
7347 *
7348 * DRT_HASH_COUNT_MASK must be large enough to store the range
7349 * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
7350 * to indicate that the bucket is actually unoccupied.
7351 */
7352 #define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
7353 #define DRT_HASH_SET_ADDRESS(scm, i, a) \
7354 do { \
7355 (scm)->scm_hashtable[(i)].dhe_control = \
7356 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
7357 } while (0)
7358 #define DRT_HASH_COUNT_MASK 0x1ff
7359 #define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
7360 #define DRT_HASH_SET_COUNT(scm, i, c) \
7361 do { \
7362 (scm)->scm_hashtable[(i)].dhe_control = \
7363 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \
7364 } while (0)
7365 #define DRT_HASH_CLEAR(scm, i) \
7366 do { \
7367 (scm)->scm_hashtable[(i)].dhe_control = 0; \
7368 } while (0)
7369 #define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
7370 #define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
7371 #define DRT_HASH_COPY(oscm, oi, scm, i) \
7372 do { \
7373 (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \
7374 DRT_BITVECTOR_COPY(oscm, oi, scm, i); \
7375 } while(0);
7376
7377
7378 #if !defined(XNU_TARGET_OS_OSX)
7379 /*
7380 * Hash table moduli.
7381 *
7382 * Since the hashtable entry's size is dependent on the size of
7383 * the bitvector, and since the hashtable size is constrained to
7384 * both being prime and fitting within the desired allocation
7385 * size, these values need to be manually determined.
7386 *
7387 * For DRT_BITVECTOR_SIZE = 64, the entry size is 16 bytes.
7388 *
7389 * The small hashtable allocation is 4096 bytes, so the modulus is 251.
7390 * The large hashtable allocation is 32768 bytes, so the modulus is 2039.
7391 * The xlarge hashtable allocation is 131072 bytes, so the modulus is 8179.
7392 */
7393
7394 #define DRT_HASH_SMALL_MODULUS 251
7395 #define DRT_HASH_LARGE_MODULUS 2039
7396 #define DRT_HASH_XLARGE_MODULUS 8179
7397
7398 /*
7399 * Physical memory required before the large hash modulus is permitted.
7400 *
7401 * On small memory systems, the large hash modulus can lead to phsyical
7402 * memory starvation, so we avoid using it there.
7403 */
7404 #define DRT_HASH_LARGE_MEMORY_REQUIRED (1024LL * 1024LL * 1024LL) /* 1GiB */
7405 #define DRT_HASH_XLARGE_MEMORY_REQUIRED (8 * 1024LL * 1024LL * 1024LL) /* 8GiB */
7406
7407 #define DRT_SMALL_ALLOCATION 4096 /* 80 bytes spare */
7408 #define DRT_LARGE_ALLOCATION 32768 /* 144 bytes spare */
7409 #define DRT_XLARGE_ALLOCATION 131072 /* 208 bytes spare */
7410
7411 #else /* XNU_TARGET_OS_OSX */
7412 /*
7413 * Hash table moduli.
7414 *
7415 * Since the hashtable entry's size is dependent on the size of
7416 * the bitvector, and since the hashtable size is constrained to
7417 * both being prime and fitting within the desired allocation
7418 * size, these values need to be manually determined.
7419 *
7420 * For DRT_BITVECTOR_SIZE = 64, the entry size is 16 bytes.
7421 *
7422 * The small hashtable allocation is 16384 bytes, so the modulus is 1019.
7423 * The large hashtable allocation is 131072 bytes, so the modulus is 8179.
7424 * The xlarge hashtable allocation is 524288 bytes, so the modulus is 32749.
7425 */
7426
7427 #define DRT_HASH_SMALL_MODULUS 1019
7428 #define DRT_HASH_LARGE_MODULUS 8179
7429 #define DRT_HASH_XLARGE_MODULUS 32749
7430
7431 /*
7432 * Physical memory required before the large hash modulus is permitted.
7433 *
7434 * On small memory systems, the large hash modulus can lead to phsyical
7435 * memory starvation, so we avoid using it there.
7436 */
7437 #define DRT_HASH_LARGE_MEMORY_REQUIRED (4 * 1024LL * 1024LL * 1024LL) /* 4GiB */
7438 #define DRT_HASH_XLARGE_MEMORY_REQUIRED (32 * 1024LL * 1024LL * 1024LL) /* 32GiB */
7439
7440 #define DRT_SMALL_ALLOCATION 16384 /* 80 bytes spare */
7441 #define DRT_LARGE_ALLOCATION 131072 /* 208 bytes spare */
7442 #define DRT_XLARGE_ALLOCATION 524288 /* 304 bytes spare */
7443
7444 #endif /* ! XNU_TARGET_OS_OSX */
7445
7446 /* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
7447
7448 /*
7449 * Hashtable entry.
7450 */
7451 struct vfs_drt_hashentry {
7452 u_int64_t dhe_control;
7453 /*
7454 * dhe_bitvector was declared as dhe_bitvector[DRT_BITVECTOR_PAGES / 32];
7455 * DRT_BITVECTOR_PAGES is defined as ((1024 * 256) / PAGE_SIZE)
7456 * Since PAGE_SIZE is only known at boot time,
7457 * -define MAX_DRT_BITVECTOR_PAGES for smallest supported page size (4k)
7458 * -declare dhe_bitvector array for largest possible length
7459 */
7460 #define MAX_DRT_BITVECTOR_PAGES (1024 * 256)/( 4 * 1024)
7461 u_int32_t dhe_bitvector[MAX_DRT_BITVECTOR_PAGES / 32];
7462 };
7463
7464 /*
7465 * Hashtable bitvector handling.
7466 *
7467 * Bitvector fields are 32 bits long.
7468 */
7469
7470 #define DRT_HASH_SET_BIT(scm, i, bit) \
7471 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
7472
7473 #define DRT_HASH_CLEAR_BIT(scm, i, bit) \
7474 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
7475
7476 #define DRT_HASH_TEST_BIT(scm, i, bit) \
7477 ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
7478
7479 #define DRT_BITVECTOR_CLEAR(scm, i) \
7480 bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
7481
7482 #define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \
7483 bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \
7484 &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \
7485 (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
7486
7487 /*
7488 * Dirty Region Tracking structure.
7489 *
7490 * The hashtable is allocated entirely inside the DRT structure.
7491 *
7492 * The hash is a simple circular prime modulus arrangement, the structure
7493 * is resized from small to large if it overflows.
7494 */
7495
7496 struct vfs_drt_clustermap {
7497 u_int32_t scm_magic; /* sanity/detection */
7498 #define DRT_SCM_MAGIC 0x12020003
7499 u_int32_t scm_modulus; /* current ring size */
7500 u_int32_t scm_buckets; /* number of occupied buckets */
7501 u_int32_t scm_lastclean; /* last entry we cleaned */
7502 u_int32_t scm_iskips; /* number of slot skips */
7503
7504 struct vfs_drt_hashentry scm_hashtable[0];
7505 };
7506
7507
7508 #define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus)
7509 #define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus)
7510
7511 /*
7512 * Debugging codes and arguments.
7513 */
7514 #define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
7515 #define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
7516 #define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
7517 #define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
7518 #define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
7519 * dirty */
7520 /* 0, setcount */
7521 /* 1 (clean, no map) */
7522 /* 2 (map alloc fail) */
7523 /* 3, resid (partial) */
7524 #define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87))
7525 #define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
7526 * lastclean, iskips */
7527
7528
7529 static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp);
7530 static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap);
7531 static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap,
7532 u_int64_t offset, int *indexp);
7533 static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp,
7534 u_int64_t offset,
7535 int *indexp,
7536 int recursed);
7537 static kern_return_t vfs_drt_do_mark_pages(
7538 void **cmapp,
7539 u_int64_t offset,
7540 u_int length,
7541 u_int *setcountp,
7542 int dirty);
7543 static void vfs_drt_trace(
7544 struct vfs_drt_clustermap *cmap,
7545 int code,
7546 int arg1,
7547 int arg2,
7548 int arg3,
7549 int arg4);
7550
7551
7552 /*
7553 * Allocate and initialise a sparse cluster map.
7554 *
7555 * Will allocate a new map, resize or compact an existing map.
7556 *
7557 * XXX we should probably have at least one intermediate map size,
7558 * as the 1:16 ratio seems a bit drastic.
7559 */
7560 static kern_return_t
vfs_drt_alloc_map(struct vfs_drt_clustermap ** cmapp)7561 vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp)
7562 {
7563 struct vfs_drt_clustermap *cmap = NULL, *ocmap = NULL;
7564 kern_return_t kret = KERN_SUCCESS;
7565 u_int64_t offset = 0;
7566 u_int32_t i = 0;
7567 int modulus_size = 0, map_size = 0, active_buckets = 0, index = 0, copycount = 0;
7568
7569 ocmap = NULL;
7570 if (cmapp != NULL) {
7571 ocmap = *cmapp;
7572 }
7573
7574 /*
7575 * Decide on the size of the new map.
7576 */
7577 if (ocmap == NULL) {
7578 modulus_size = DRT_HASH_SMALL_MODULUS;
7579 map_size = DRT_SMALL_ALLOCATION;
7580 } else {
7581 /* count the number of active buckets in the old map */
7582 active_buckets = 0;
7583 for (i = 0; i < ocmap->scm_modulus; i++) {
7584 if (!DRT_HASH_VACANT(ocmap, i) &&
7585 (DRT_HASH_GET_COUNT(ocmap, i) != 0)) {
7586 active_buckets++;
7587 }
7588 }
7589 /*
7590 * If we're currently using the small allocation, check to
7591 * see whether we should grow to the large one.
7592 */
7593 if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
7594 /*
7595 * If the ring is nearly full and we are allowed to
7596 * use the large modulus, upgrade.
7597 */
7598 if ((active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) &&
7599 (max_mem >= DRT_HASH_LARGE_MEMORY_REQUIRED)) {
7600 modulus_size = DRT_HASH_LARGE_MODULUS;
7601 map_size = DRT_LARGE_ALLOCATION;
7602 } else {
7603 modulus_size = DRT_HASH_SMALL_MODULUS;
7604 map_size = DRT_SMALL_ALLOCATION;
7605 }
7606 } else if (ocmap->scm_modulus == DRT_HASH_LARGE_MODULUS) {
7607 if ((active_buckets > (DRT_HASH_LARGE_MODULUS - 5)) &&
7608 (max_mem >= DRT_HASH_XLARGE_MEMORY_REQUIRED)) {
7609 modulus_size = DRT_HASH_XLARGE_MODULUS;
7610 map_size = DRT_XLARGE_ALLOCATION;
7611 } else {
7612 /*
7613 * If the ring is completely full and we can't
7614 * expand, there's nothing useful for us to do.
7615 * Behave as though we had compacted into the new
7616 * array and return.
7617 */
7618 return KERN_SUCCESS;
7619 }
7620 } else {
7621 /* already using the xlarge modulus */
7622 modulus_size = DRT_HASH_XLARGE_MODULUS;
7623 map_size = DRT_XLARGE_ALLOCATION;
7624
7625 /*
7626 * If the ring is completely full, there's
7627 * nothing useful for us to do. Behave as
7628 * though we had compacted into the new
7629 * array and return.
7630 */
7631 if (active_buckets >= DRT_HASH_XLARGE_MODULUS) {
7632 return KERN_SUCCESS;
7633 }
7634 }
7635 }
7636
7637 /*
7638 * Allocate and initialise the new map.
7639 */
7640
7641 kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap, map_size,
7642 KMA_DATA, VM_KERN_MEMORY_FILE);
7643 if (kret != KERN_SUCCESS) {
7644 return kret;
7645 }
7646 cmap->scm_magic = DRT_SCM_MAGIC;
7647 cmap->scm_modulus = modulus_size;
7648 cmap->scm_buckets = 0;
7649 cmap->scm_lastclean = 0;
7650 cmap->scm_iskips = 0;
7651 for (i = 0; i < cmap->scm_modulus; i++) {
7652 DRT_HASH_CLEAR(cmap, i);
7653 DRT_HASH_VACATE(cmap, i);
7654 DRT_BITVECTOR_CLEAR(cmap, i);
7655 }
7656
7657 /*
7658 * If there's an old map, re-hash entries from it into the new map.
7659 */
7660 copycount = 0;
7661 if (ocmap != NULL) {
7662 for (i = 0; i < ocmap->scm_modulus; i++) {
7663 /* skip empty buckets */
7664 if (DRT_HASH_VACANT(ocmap, i) ||
7665 (DRT_HASH_GET_COUNT(ocmap, i) == 0)) {
7666 continue;
7667 }
7668 /* get new index */
7669 offset = DRT_HASH_GET_ADDRESS(ocmap, i);
7670 kret = vfs_drt_get_index(&cmap, offset, &index, 1);
7671 if (kret != KERN_SUCCESS) {
7672 /* XXX need to bail out gracefully here */
7673 panic("vfs_drt: new cluster map mysteriously too small");
7674 index = 0;
7675 }
7676 /* copy */
7677 DRT_HASH_COPY(ocmap, i, cmap, index);
7678 copycount++;
7679 }
7680 }
7681
7682 /* log what we've done */
7683 vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0);
7684
7685 /*
7686 * It's important to ensure that *cmapp always points to
7687 * a valid map, so we must overwrite it before freeing
7688 * the old map.
7689 */
7690 *cmapp = cmap;
7691 if (ocmap != NULL) {
7692 /* emit stats into trace buffer */
7693 vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA,
7694 ocmap->scm_modulus,
7695 ocmap->scm_buckets,
7696 ocmap->scm_lastclean,
7697 ocmap->scm_iskips);
7698
7699 vfs_drt_free_map(ocmap);
7700 }
7701 return KERN_SUCCESS;
7702 }
7703
7704
7705 /*
7706 * Free a sparse cluster map.
7707 */
7708 static kern_return_t
vfs_drt_free_map(struct vfs_drt_clustermap * cmap)7709 vfs_drt_free_map(struct vfs_drt_clustermap *cmap)
7710 {
7711 vm_size_t map_size = 0;
7712
7713 if (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
7714 map_size = DRT_SMALL_ALLOCATION;
7715 } else if (cmap->scm_modulus == DRT_HASH_LARGE_MODULUS) {
7716 map_size = DRT_LARGE_ALLOCATION;
7717 } else if (cmap->scm_modulus == DRT_HASH_XLARGE_MODULUS) {
7718 map_size = DRT_XLARGE_ALLOCATION;
7719 } else {
7720 panic("vfs_drt_free_map: Invalid modulus %d", cmap->scm_modulus);
7721 }
7722
7723 kmem_free(kernel_map, (vm_offset_t)cmap, map_size);
7724 return KERN_SUCCESS;
7725 }
7726
7727
7728 /*
7729 * Find the hashtable slot currently occupied by an entry for the supplied offset.
7730 */
7731 static kern_return_t
vfs_drt_search_index(struct vfs_drt_clustermap * cmap,u_int64_t offset,int * indexp)7732 vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp)
7733 {
7734 int index;
7735 u_int32_t i;
7736
7737 offset = DRT_ALIGN_ADDRESS(offset);
7738 index = DRT_HASH(cmap, offset);
7739
7740 /* traverse the hashtable */
7741 for (i = 0; i < cmap->scm_modulus; i++) {
7742 /*
7743 * If the slot is vacant, we can stop.
7744 */
7745 if (DRT_HASH_VACANT(cmap, index)) {
7746 break;
7747 }
7748
7749 /*
7750 * If the address matches our offset, we have success.
7751 */
7752 if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) {
7753 *indexp = index;
7754 return KERN_SUCCESS;
7755 }
7756
7757 /*
7758 * Move to the next slot, try again.
7759 */
7760 index = DRT_HASH_NEXT(cmap, index);
7761 }
7762 /*
7763 * It's not there.
7764 */
7765 return KERN_FAILURE;
7766 }
7767
7768 /*
7769 * Find the hashtable slot for the supplied offset. If we haven't allocated
7770 * one yet, allocate one and populate the address field. Note that it will
7771 * not have a nonzero page count and thus will still technically be free, so
7772 * in the case where we are called to clean pages, the slot will remain free.
7773 */
7774 static kern_return_t
vfs_drt_get_index(struct vfs_drt_clustermap ** cmapp,u_int64_t offset,int * indexp,int recursed)7775 vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed)
7776 {
7777 struct vfs_drt_clustermap *cmap;
7778 kern_return_t kret;
7779 u_int32_t index;
7780 u_int32_t i;
7781
7782 cmap = *cmapp;
7783
7784 /* look for an existing entry */
7785 kret = vfs_drt_search_index(cmap, offset, indexp);
7786 if (kret == KERN_SUCCESS) {
7787 return kret;
7788 }
7789
7790 /* need to allocate an entry */
7791 offset = DRT_ALIGN_ADDRESS(offset);
7792 index = DRT_HASH(cmap, offset);
7793
7794 /* scan from the index forwards looking for a vacant slot */
7795 for (i = 0; i < cmap->scm_modulus; i++) {
7796 /* slot vacant? */
7797 if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap, index) == 0) {
7798 cmap->scm_buckets++;
7799 if (index < cmap->scm_lastclean) {
7800 cmap->scm_lastclean = index;
7801 }
7802 DRT_HASH_SET_ADDRESS(cmap, index, offset);
7803 DRT_HASH_SET_COUNT(cmap, index, 0);
7804 DRT_BITVECTOR_CLEAR(cmap, index);
7805 *indexp = index;
7806 vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0);
7807 return KERN_SUCCESS;
7808 }
7809 cmap->scm_iskips += i;
7810 index = DRT_HASH_NEXT(cmap, index);
7811 }
7812
7813 /*
7814 * We haven't found a vacant slot, so the map is full. If we're not
7815 * already recursed, try reallocating/compacting it.
7816 */
7817 if (recursed) {
7818 return KERN_FAILURE;
7819 }
7820 kret = vfs_drt_alloc_map(cmapp);
7821 if (kret == KERN_SUCCESS) {
7822 /* now try to insert again */
7823 kret = vfs_drt_get_index(cmapp, offset, indexp, 1);
7824 }
7825 return kret;
7826 }
7827
7828 /*
7829 * Implementation of set dirty/clean.
7830 *
7831 * In the 'clean' case, not finding a map is OK.
7832 */
7833 static kern_return_t
vfs_drt_do_mark_pages(void ** private,u_int64_t offset,u_int length,u_int * setcountp,int dirty)7834 vfs_drt_do_mark_pages(
7835 void **private,
7836 u_int64_t offset,
7837 u_int length,
7838 u_int *setcountp,
7839 int dirty)
7840 {
7841 struct vfs_drt_clustermap *cmap, **cmapp;
7842 kern_return_t kret;
7843 int i, index, pgoff, pgcount, setcount, ecount;
7844
7845 cmapp = (struct vfs_drt_clustermap **)private;
7846 cmap = *cmapp;
7847
7848 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0);
7849
7850 if (setcountp != NULL) {
7851 *setcountp = 0;
7852 }
7853
7854 /* allocate a cluster map if we don't already have one */
7855 if (cmap == NULL) {
7856 /* no cluster map, nothing to clean */
7857 if (!dirty) {
7858 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0);
7859 return KERN_SUCCESS;
7860 }
7861 kret = vfs_drt_alloc_map(cmapp);
7862 if (kret != KERN_SUCCESS) {
7863 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0);
7864 return kret;
7865 }
7866 }
7867 setcount = 0;
7868
7869 /*
7870 * Iterate over the length of the region.
7871 */
7872 while (length > 0) {
7873 /*
7874 * Get the hashtable index for this offset.
7875 *
7876 * XXX this will add blank entries if we are clearing a range
7877 * that hasn't been dirtied.
7878 */
7879 kret = vfs_drt_get_index(cmapp, offset, &index, 0);
7880 cmap = *cmapp; /* may have changed! */
7881 /* this may be a partial-success return */
7882 if (kret != KERN_SUCCESS) {
7883 if (setcountp != NULL) {
7884 *setcountp = setcount;
7885 }
7886 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0);
7887
7888 return kret;
7889 }
7890
7891 /*
7892 * Work out how many pages we're modifying in this
7893 * hashtable entry.
7894 */
7895 pgoff = (int)((offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE);
7896 pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff));
7897
7898 /*
7899 * Iterate over pages, dirty/clearing as we go.
7900 */
7901 ecount = DRT_HASH_GET_COUNT(cmap, index);
7902 for (i = 0; i < pgcount; i++) {
7903 if (dirty) {
7904 if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
7905 if (ecount >= DRT_BITVECTOR_PAGES) {
7906 panic("ecount >= DRT_BITVECTOR_PAGES, cmap = %p, index = %d, bit = %d", cmap, index, pgoff + i);
7907 }
7908 DRT_HASH_SET_BIT(cmap, index, pgoff + i);
7909 ecount++;
7910 setcount++;
7911 }
7912 } else {
7913 if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
7914 if (ecount <= 0) {
7915 panic("ecount <= 0, cmap = %p, index = %d, bit = %d", cmap, index, pgoff + i);
7916 }
7917 assert(ecount > 0);
7918 DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i);
7919 ecount--;
7920 setcount++;
7921 }
7922 }
7923 }
7924 DRT_HASH_SET_COUNT(cmap, index, ecount);
7925
7926 offset += pgcount * PAGE_SIZE;
7927 length -= pgcount * PAGE_SIZE;
7928 }
7929 if (setcountp != NULL) {
7930 *setcountp = setcount;
7931 }
7932
7933 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0);
7934
7935 return KERN_SUCCESS;
7936 }
7937
7938 /*
7939 * Mark a set of pages as dirty/clean.
7940 *
7941 * This is a public interface.
7942 *
7943 * cmapp
7944 * Pointer to storage suitable for holding a pointer. Note that
7945 * this must either be NULL or a value set by this function.
7946 *
7947 * size
7948 * Current file size in bytes.
7949 *
7950 * offset
7951 * Offset of the first page to be marked as dirty, in bytes. Must be
7952 * page-aligned.
7953 *
7954 * length
7955 * Length of dirty region, in bytes. Must be a multiple of PAGE_SIZE.
7956 *
7957 * setcountp
7958 * Number of pages newly marked dirty by this call (optional).
7959 *
7960 * Returns KERN_SUCCESS if all the pages were successfully marked.
7961 */
7962 static kern_return_t
vfs_drt_mark_pages(void ** cmapp,off_t offset,u_int length,u_int * setcountp)7963 vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp)
7964 {
7965 /* XXX size unused, drop from interface */
7966 return vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1);
7967 }
7968
7969 #if 0
7970 static kern_return_t
7971 vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length)
7972 {
7973 return vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
7974 }
7975 #endif
7976
7977 /*
7978 * Get a cluster of dirty pages.
7979 *
7980 * This is a public interface.
7981 *
7982 * cmapp
7983 * Pointer to storage managed by drt_mark_pages. Note that this must
7984 * be NULL or a value set by drt_mark_pages.
7985 *
7986 * offsetp
7987 * Returns the byte offset into the file of the first page in the cluster.
7988 *
7989 * lengthp
7990 * Returns the length in bytes of the cluster of dirty pages.
7991 *
7992 * Returns success if a cluster was found. If KERN_FAILURE is returned, there
7993 * are no dirty pages meeting the minmum size criteria. Private storage will
7994 * be released if there are no more dirty pages left in the map
7995 *
7996 */
7997 static kern_return_t
vfs_drt_get_cluster(void ** cmapp,off_t * offsetp,u_int * lengthp)7998 vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp)
7999 {
8000 struct vfs_drt_clustermap *cmap;
8001 u_int64_t offset;
8002 u_int length;
8003 u_int32_t j;
8004 int index, i, fs, ls;
8005
8006 /* sanity */
8007 if ((cmapp == NULL) || (*cmapp == NULL)) {
8008 return KERN_FAILURE;
8009 }
8010 cmap = *cmapp;
8011
8012 /* walk the hashtable */
8013 for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) {
8014 index = DRT_HASH(cmap, offset);
8015
8016 if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0)) {
8017 continue;
8018 }
8019
8020 /* scan the bitfield for a string of bits */
8021 fs = -1;
8022
8023 for (i = 0; i < DRT_BITVECTOR_PAGES; i++) {
8024 if (DRT_HASH_TEST_BIT(cmap, index, i)) {
8025 fs = i;
8026 break;
8027 }
8028 }
8029 if (fs == -1) {
8030 /* didn't find any bits set */
8031 panic("vfs_drt: entry summary count > 0 but no bits set in map, cmap = %p, index = %d, count = %lld",
8032 cmap, index, DRT_HASH_GET_COUNT(cmap, index));
8033 }
8034 for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) {
8035 if (!DRT_HASH_TEST_BIT(cmap, index, i)) {
8036 break;
8037 }
8038 }
8039
8040 /* compute offset and length, mark pages clean */
8041 offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs);
8042 length = ls * PAGE_SIZE;
8043 vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
8044 cmap->scm_lastclean = index;
8045
8046 /* return successful */
8047 *offsetp = (off_t)offset;
8048 *lengthp = length;
8049
8050 vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0);
8051 return KERN_SUCCESS;
8052 }
8053 /*
8054 * We didn't find anything... hashtable is empty
8055 * emit stats into trace buffer and
8056 * then free it
8057 */
8058 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
8059 cmap->scm_modulus,
8060 cmap->scm_buckets,
8061 cmap->scm_lastclean,
8062 cmap->scm_iskips);
8063
8064 vfs_drt_free_map(cmap);
8065 *cmapp = NULL;
8066
8067 return KERN_FAILURE;
8068 }
8069
8070
8071 static kern_return_t
vfs_drt_control(void ** cmapp,int op_type)8072 vfs_drt_control(void **cmapp, int op_type)
8073 {
8074 struct vfs_drt_clustermap *cmap;
8075
8076 /* sanity */
8077 if ((cmapp == NULL) || (*cmapp == NULL)) {
8078 return KERN_FAILURE;
8079 }
8080 cmap = *cmapp;
8081
8082 switch (op_type) {
8083 case 0:
8084 /* emit stats into trace buffer */
8085 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
8086 cmap->scm_modulus,
8087 cmap->scm_buckets,
8088 cmap->scm_lastclean,
8089 cmap->scm_iskips);
8090
8091 vfs_drt_free_map(cmap);
8092 *cmapp = NULL;
8093 break;
8094
8095 case 1:
8096 cmap->scm_lastclean = 0;
8097 break;
8098 }
8099 return KERN_SUCCESS;
8100 }
8101
8102
8103
8104 /*
8105 * Emit a summary of the state of the clustermap into the trace buffer
8106 * along with some caller-provided data.
8107 */
8108 #if KDEBUG
8109 static void
vfs_drt_trace(__unused struct vfs_drt_clustermap * cmap,int code,int arg1,int arg2,int arg3,int arg4)8110 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, int code, int arg1, int arg2, int arg3, int arg4)
8111 {
8112 KERNEL_DEBUG(code, arg1, arg2, arg3, arg4, 0);
8113 }
8114 #else
8115 static void
vfs_drt_trace(__unused struct vfs_drt_clustermap * cmap,__unused int code,__unused int arg1,__unused int arg2,__unused int arg3,__unused int arg4)8116 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code,
8117 __unused int arg1, __unused int arg2, __unused int arg3,
8118 __unused int arg4)
8119 {
8120 }
8121 #endif
8122
8123 #if 0
8124 /*
8125 * Perform basic sanity check on the hash entry summary count
8126 * vs. the actual bits set in the entry.
8127 */
8128 static void
8129 vfs_drt_sanity(struct vfs_drt_clustermap *cmap)
8130 {
8131 int index, i;
8132 int bits_on;
8133
8134 for (index = 0; index < cmap->scm_modulus; index++) {
8135 if (DRT_HASH_VACANT(cmap, index)) {
8136 continue;
8137 }
8138
8139 for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) {
8140 if (DRT_HASH_TEST_BIT(cmap, index, i)) {
8141 bits_on++;
8142 }
8143 }
8144 if (bits_on != DRT_HASH_GET_COUNT(cmap, index)) {
8145 panic("bits_on = %d, index = %d", bits_on, index);
8146 }
8147 }
8148 }
8149 #endif
8150
8151 /*
8152 * Internal interface only.
8153 */
8154 static kern_return_t
vfs_get_scmap_push_behavior_internal(void ** cmapp,int * push_flag)8155 vfs_get_scmap_push_behavior_internal(void **cmapp, int *push_flag)
8156 {
8157 struct vfs_drt_clustermap *cmap;
8158
8159 /* sanity */
8160 if ((cmapp == NULL) || (*cmapp == NULL) || (push_flag == NULL)) {
8161 return KERN_FAILURE;
8162 }
8163 cmap = *cmapp;
8164
8165 if (cmap->scm_modulus == DRT_HASH_XLARGE_MODULUS) {
8166 /*
8167 * If we have a full xlarge sparse cluster,
8168 * we push it out all at once so the cluster
8169 * map can be available to absorb more I/Os.
8170 * This is done on large memory configs so
8171 * the small I/Os don't interfere with the
8172 * pro workloads.
8173 */
8174 *push_flag = PUSH_ALL;
8175 }
8176 return KERN_SUCCESS;
8177 }
8178