1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)vfs_cluster.c 8.10 (Berkeley) 3/28/95
62 */
63
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/buf_internal.h>
67 #include <sys/mount_internal.h>
68 #include <sys/vnode_internal.h>
69 #include <sys/trace.h>
70 #include <kern/kalloc.h>
71 #include <sys/time.h>
72 #include <sys/kernel.h>
73 #include <sys/resourcevar.h>
74 #include <miscfs/specfs/specdev.h>
75 #include <sys/uio_internal.h>
76 #include <libkern/libkern.h>
77 #include <machine/machine_routines.h>
78 #include <machine/smp.h>
79
80 #include <sys/ubc_internal.h>
81 #include <vm/vnode_pager.h>
82 #include <vm/vm_upl.h>
83
84 #include <mach/mach_types.h>
85 #include <mach/memory_object_types.h>
86 #include <mach/vm_map.h>
87 #include <mach/upl.h>
88 #include <mach/thread_info.h>
89 #include <kern/task.h>
90 #include <kern/policy_internal.h>
91 #include <kern/thread.h>
92
93 #include <vm/vm_kern_xnu.h>
94 #include <vm/vm_map_xnu.h>
95 #include <vm/vm_pageout_xnu.h>
96 #include <vm/vm_fault.h>
97 #include <vm/vm_ubc.h>
98
99 #include <sys/kdebug.h>
100 #include <sys/kdebug_triage.h>
101 #include <libkern/OSAtomic.h>
102
103 #include <sys/sdt.h>
104
105 #include <stdbool.h>
106
107 #include <vfs/vfs_disk_conditioner.h>
108
109 #if 0
110 #undef KERNEL_DEBUG
111 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
112 #endif
113
114
115 #define CL_READ 0x01
116 #define CL_WRITE 0x02
117 #define CL_ASYNC 0x04
118 #define CL_COMMIT 0x08
119 #define CL_PAGEOUT 0x10
120 #define CL_AGE 0x20
121 #define CL_NOZERO 0x40
122 #define CL_PAGEIN 0x80
123 #define CL_DEV_MEMORY 0x100
124 #define CL_PRESERVE 0x200
125 #define CL_THROTTLE 0x400
126 #define CL_KEEPCACHED 0x800
127 #define CL_DIRECT_IO 0x1000
128 #define CL_PASSIVE 0x2000
129 #define CL_IOSTREAMING 0x4000
130 #define CL_CLOSE 0x8000
131 #define CL_ENCRYPTED 0x10000
132 #define CL_RAW_ENCRYPTED 0x20000
133 #define CL_NOCACHE 0x40000
134 #define CL_DIRECT_IO_FSBLKSZ 0x80000
135
136 #define MAX_VECTOR_UPL_SIZE (2 * MAX_UPL_SIZE_BYTES)
137
138 #define CLUSTER_IO_WAITING ((buf_t)1)
139
140 extern void vector_upl_set_iostate(upl_t, upl_t, vm_offset_t, upl_size_t);
141
142 struct clios {
143 lck_mtx_t io_mtxp;
144 u_int io_completed; /* amount of io that has currently completed */
145 u_int io_issued; /* amount of io that was successfully issued */
146 int io_error; /* error code of first error encountered */
147 int io_wanted; /* someone is sleeping waiting for a change in state */
148 };
149
150 struct cl_direct_read_lock {
151 LIST_ENTRY(cl_direct_read_lock) chain;
152 int32_t ref_count;
153 vnode_t vp;
154 lck_rw_t rw_lock;
155 };
156
157 #define CL_DIRECT_READ_LOCK_BUCKETS 61
158
159 static LIST_HEAD(cl_direct_read_locks, cl_direct_read_lock)
160 cl_direct_read_locks[CL_DIRECT_READ_LOCK_BUCKETS];
161
162 static LCK_GRP_DECLARE(cl_mtx_grp, "cluster I/O");
163 static LCK_MTX_DECLARE(cl_transaction_mtxp, &cl_mtx_grp);
164 static LCK_SPIN_DECLARE(cl_direct_read_spin_lock, &cl_mtx_grp);
165
166 static ZONE_DEFINE(cl_rd_zone, "cluster_read",
167 sizeof(struct cl_readahead), ZC_ZFREE_CLEARMEM);
168
169 static ZONE_DEFINE(cl_wr_zone, "cluster_write",
170 sizeof(struct cl_writebehind), ZC_ZFREE_CLEARMEM);
171
172 #define IO_UNKNOWN 0
173 #define IO_DIRECT 1
174 #define IO_CONTIG 2
175 #define IO_COPY 3
176
177 #define PUSH_DELAY 0x01
178 #define PUSH_ALL 0x02
179 #define PUSH_SYNC 0x04
180
181
182 static void cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset, size_t verify_block_size);
183 static void cluster_wait_IO(buf_t cbp_head, int async);
184 static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait);
185
186 static int cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length);
187
188 static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
189 int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg);
190 static void cluster_iodone_verify_continue(void);
191 static int cluster_iodone(buf_t bp, void *callback_arg);
192 static int cluster_iodone_finish(buf_t cbp_head, void *callback_arg);
193 static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp);
194 static int cluster_is_throttled(vnode_t vp);
195
196 static void cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name);
197
198 static void cluster_syncup(vnode_t vp, off_t newEOF, int (*)(buf_t, void *), void *callback_arg, int flags);
199
200 static int cluster_handle_split_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
201 u_int io_size, int rounded_size, int local_flags, int (*callback)(buf_t, void *), void *callback_arg);
202
203 static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference);
204 static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference);
205
206 static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags,
207 int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
208 static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
209 int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
210 static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
211 int (*)(buf_t, void *), void *callback_arg, int flags) __attribute__((noinline));
212
213 static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF,
214 off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline));
215 static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
216 int flags, int (*callback)(buf_t, void *), void *callback_arg, uint32_t min_io_size) __attribute__((noinline));
217 static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF,
218 int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag) __attribute__((noinline));
219
220 static void cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes, boolean_t *first_pass,
221 off_t write_off, int write_cnt, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
222
223 static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg);
224
225 static int cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
226 static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra,
227 int (*callback)(buf_t, void *), void *callback_arg, int bflag);
228
229 static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_ioitiated);
230
231 static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *),
232 void *callback_arg, int *err, boolean_t vm_initiated);
233
234 static int sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
235 static int sparse_cluster_push(struct cl_writebehind *, void **cmapp, vnode_t vp, off_t EOF, int push_flag,
236 int io_flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
237 static int sparse_cluster_add(struct cl_writebehind *, void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF,
238 int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated);
239
240 static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp);
241 static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp);
242 static kern_return_t vfs_drt_control(void **cmapp, int op_type);
243 static kern_return_t vfs_get_scmap_push_behavior_internal(void **cmapp, int *push_flag);
244
245
246 /*
247 * For throttled IO to check whether
248 * a block is cached by the boot cache
249 * and thus it can avoid delaying the IO.
250 *
251 * bootcache_contains_block is initially
252 * NULL. The BootCache will set it while
253 * the cache is active and clear it when
254 * the cache is jettisoned.
255 *
256 * Returns 0 if the block is not
257 * contained in the cache, 1 if it is
258 * contained.
259 *
260 * The function pointer remains valid
261 * after the cache has been evicted even
262 * if bootcache_contains_block has been
263 * cleared.
264 *
265 * See rdar://9974130 The new throttling mechanism breaks the boot cache for throttled IOs
266 */
267 int (*bootcache_contains_block)(dev_t device, u_int64_t blkno) = NULL;
268
269
270 /*
271 * limit the internal I/O size so that we
272 * can represent it in a 32 bit int
273 */
274 #define MAX_IO_REQUEST_SIZE (1024 * 1024 * 512)
275 #define MAX_IO_CONTIG_SIZE MAX_UPL_SIZE_BYTES
276 #define MAX_VECTS 16
277 /*
278 * The MIN_DIRECT_WRITE_SIZE governs how much I/O should be issued before we consider
279 * allowing the caller to bypass the buffer cache. For small I/Os (less than 16k),
280 * we have not historically allowed the write to bypass the UBC.
281 */
282 #define MIN_DIRECT_WRITE_SIZE (16384)
283
284 #define WRITE_THROTTLE 6
285 #define WRITE_THROTTLE_SSD 2
286 #define WRITE_BEHIND 1
287 #define WRITE_BEHIND_SSD 1
288
289 #if !defined(XNU_TARGET_OS_OSX)
290 #define PREFETCH 1
291 #define PREFETCH_SSD 1
292 uint32_t speculative_prefetch_max = (2048 * 1024); /* maximum bytes in a specluative read-ahead */
293 uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead */
294 #else /* XNU_TARGET_OS_OSX */
295 #define PREFETCH 3
296 #define PREFETCH_SSD 2
297 uint32_t speculative_prefetch_max = (MAX_UPL_SIZE_BYTES * 3); /* maximum bytes in a specluative read-ahead */
298 uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead on SSDs*/
299 #endif /* ! XNU_TARGET_OS_OSX */
300
301 /* maximum bytes for read-ahead */
302 uint32_t prefetch_max = (1024 * 1024 * 1024);
303 /* maximum bytes for outstanding reads */
304 uint32_t overlapping_read_max = (1024 * 1024 * 1024);
305 /* maximum bytes for outstanding writes */
306 uint32_t overlapping_write_max = (1024 * 1024 * 1024);
307
308 #define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * (base))
309 #define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
310
311 int speculative_reads_disabled = 0;
312
313 /*
314 * throttle the number of async writes that
315 * can be outstanding on a single vnode
316 * before we issue a synchronous write
317 */
318 #define THROTTLE_MAXCNT 0
319
320 uint32_t throttle_max_iosize = (128 * 1024);
321
322 #define THROTTLE_MAX_IOSIZE (throttle_max_iosize)
323
324 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_max_iosize, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_max_iosize, 0, "");
325
326 uint32_t split_pgin = 1;
327 uint32_t split_all_pgin = 1;
328 uint32_t split_all_pgin_equal = 0;
329 uint32_t split_pgin_headio = 0;
330
331 SYSCTL_INT(_kern, OID_AUTO, split_pagein_io, CTLFLAG_RW | CTLFLAG_LOCKED, &split_pgin, 0, "");
332 #if DEVELOPMENT || DEBUG
333 SYSCTL_INT(_kern, OID_AUTO, split_pagein_io_all, CTLFLAG_RW | CTLFLAG_LOCKED, &split_all_pgin, 0, "");
334 SYSCTL_INT(_kern, OID_AUTO, split_pagein_io_equal, CTLFLAG_RW | CTLFLAG_LOCKED, &split_all_pgin_equal, 0, "");
335 SYSCTL_INT(_kern, OID_AUTO, split_pagein_do_headio, CTLFLAG_RW | CTLFLAG_LOCKED, &split_pgin_headio, 0, "");
336 #endif
337
338 struct verify_buf {
339 TAILQ_ENTRY(verify_buf) vb_entry;
340 buf_t vb_cbp;
341 void* vb_callback_arg;
342 int32_t vb_whichq;
343 };
344
345 TAILQ_HEAD(, verify_buf) verify_free_head;
346 TAILQ_HEAD(, verify_buf) verify_work_head;
347
348 #define MAX_VERIFY_THREADS 4
349 #define MAX_REQUESTS_PER_THREAD 2
350
351 static struct verify_buf verify_bufs[MAX_VERIFY_THREADS * MAX_REQUESTS_PER_THREAD];
352 /*
353 * Each thread needs to check if the item at the head of the queue has a UPL
354 * pointer that is any of the threads are currently operating on.
355 * slot 0 is for the io completion thread to do the request inline if there are no free
356 * queue slots.
357 */
358 static int verify_in_flight = 0;
359
360 #if defined(XNU_TARGET_OS_IOS) || defined(XNU_TARGET_OS_XR)
361 #define NUM_DEFAULT_THREADS 2
362 #elif defined(XNU_TARGET_OS_OSX)
363 #define NUM_DEFAULT_THREADS 4
364 #else
365 #define NUM_DEFAULT_THREADS 0
366 #endif
367
368 static TUNABLE(uint32_t, num_verify_threads, "num_verify_threads", NUM_DEFAULT_THREADS);
369 static uint32_t cluster_verify_threads = 0; /* will be launched as needed upto num_verify_threads */
370
371 #if __AMP__
372 static TUNABLE(uint32_t, ecore_verify_threads, "ecore_verify_threads", false);
373 #endif /* __AMP__ */
374
375 static void
cluster_verify_init(void)376 cluster_verify_init(void)
377 {
378 TAILQ_INIT(&verify_free_head);
379 TAILQ_INIT(&verify_work_head);
380
381 if (num_verify_threads > MAX_VERIFY_THREADS) {
382 num_verify_threads = MAX_VERIFY_THREADS;
383 }
384
385 for (int i = 0; i < num_verify_threads * MAX_REQUESTS_PER_THREAD; i++) {
386 TAILQ_INSERT_TAIL(&verify_free_head, &verify_bufs[i], vb_entry);
387 }
388 }
389
390 void
cluster_init(void)391 cluster_init(void)
392 {
393 for (int i = 0; i < CL_DIRECT_READ_LOCK_BUCKETS; ++i) {
394 LIST_INIT(&cl_direct_read_locks[i]);
395 }
396
397 cluster_verify_init();
398 }
399
400 uint32_t
cluster_max_io_size(mount_t mp,int type)401 cluster_max_io_size(mount_t mp, int type)
402 {
403 uint32_t max_io_size;
404 uint32_t segcnt;
405 uint32_t maxcnt;
406
407 switch (type) {
408 case CL_READ:
409 segcnt = mp->mnt_segreadcnt;
410 maxcnt = mp->mnt_maxreadcnt;
411 break;
412 case CL_WRITE:
413 segcnt = mp->mnt_segwritecnt;
414 maxcnt = mp->mnt_maxwritecnt;
415 break;
416 default:
417 segcnt = min(mp->mnt_segreadcnt, mp->mnt_segwritecnt);
418 maxcnt = min(mp->mnt_maxreadcnt, mp->mnt_maxwritecnt);
419 break;
420 }
421 if (segcnt > (MAX_UPL_SIZE_BYTES >> PAGE_SHIFT)) {
422 /*
423 * don't allow a size beyond the max UPL size we can create
424 */
425 segcnt = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
426 }
427 max_io_size = min((segcnt * PAGE_SIZE), maxcnt);
428
429 if (max_io_size < MAX_UPL_TRANSFER_BYTES) {
430 /*
431 * don't allow a size smaller than the old fixed limit
432 */
433 max_io_size = MAX_UPL_TRANSFER_BYTES;
434 } else {
435 /*
436 * make sure the size specified is a multiple of PAGE_SIZE
437 */
438 max_io_size &= ~PAGE_MASK;
439 }
440 return max_io_size;
441 }
442
443 /*
444 * Returns max prefetch value. If the value overflows or exceeds the specified
445 * 'prefetch_limit', it will be capped at 'prefetch_limit' value.
446 */
447 static inline uint32_t
cluster_max_prefetch(vnode_t vp,uint32_t max_io_size,uint32_t prefetch_limit)448 cluster_max_prefetch(vnode_t vp, uint32_t max_io_size, uint32_t prefetch_limit)
449 {
450 bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
451 uint32_t io_scale = IO_SCALE(vp, is_ssd ? PREFETCH_SSD : PREFETCH);
452 uint32_t prefetch = 0;
453
454 if (__improbable(os_mul_overflow(max_io_size, io_scale, &prefetch) ||
455 (prefetch > prefetch_limit))) {
456 prefetch = prefetch_limit;
457 }
458
459 return prefetch;
460 }
461
462 static inline uint32_t
calculate_max_throttle_size(vnode_t vp)463 calculate_max_throttle_size(vnode_t vp)
464 {
465 bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
466 uint32_t io_scale = IO_SCALE(vp, is_ssd ? 2 : 1);
467
468 return MIN(io_scale * THROTTLE_MAX_IOSIZE, MAX_UPL_TRANSFER_BYTES);
469 }
470
471 static inline uint32_t
calculate_max_throttle_cnt(vnode_t vp)472 calculate_max_throttle_cnt(vnode_t vp)
473 {
474 bool is_ssd = disk_conditioner_mount_is_ssd(vp->v_mount);
475 uint32_t io_scale = IO_SCALE(vp, 1);
476
477 return is_ssd ? MIN(io_scale, 4) : THROTTLE_MAXCNT;
478 }
479
480 #define CLW_ALLOCATE 0x01
481 #define CLW_RETURNLOCKED 0x02
482 #define CLW_IONOCACHE 0x04
483 #define CLW_IOPASSIVE 0x08
484
485 /*
486 * if the read ahead context doesn't yet exist,
487 * allocate and initialize it...
488 * the vnode lock serializes multiple callers
489 * during the actual assignment... first one
490 * to grab the lock wins... the other callers
491 * will release the now unnecessary storage
492 *
493 * once the context is present, try to grab (but don't block on)
494 * the lock associated with it... if someone
495 * else currently owns it, than the read
496 * will run without read-ahead. this allows
497 * multiple readers to run in parallel and
498 * since there's only 1 read ahead context,
499 * there's no real loss in only allowing 1
500 * reader to have read-ahead enabled.
501 */
502 static struct cl_readahead *
cluster_get_rap(vnode_t vp)503 cluster_get_rap(vnode_t vp)
504 {
505 struct ubc_info *ubc;
506 struct cl_readahead *rap;
507
508 ubc = vp->v_ubcinfo;
509
510 if ((rap = ubc->cl_rahead) == NULL) {
511 rap = zalloc_flags(cl_rd_zone, Z_WAITOK | Z_ZERO);
512 rap->cl_lastr = -1;
513 lck_mtx_init(&rap->cl_lockr, &cl_mtx_grp, LCK_ATTR_NULL);
514
515 vnode_lock(vp);
516
517 if (ubc->cl_rahead == NULL) {
518 ubc->cl_rahead = rap;
519 } else {
520 lck_mtx_destroy(&rap->cl_lockr, &cl_mtx_grp);
521 zfree(cl_rd_zone, rap);
522 rap = ubc->cl_rahead;
523 }
524 vnode_unlock(vp);
525 }
526 if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE) {
527 return rap;
528 }
529
530 return (struct cl_readahead *)NULL;
531 }
532
533
534 /*
535 * if the write behind context doesn't yet exist,
536 * and CLW_ALLOCATE is specified, allocate and initialize it...
537 * the vnode lock serializes multiple callers
538 * during the actual assignment... first one
539 * to grab the lock wins... the other callers
540 * will release the now unnecessary storage
541 *
542 * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
543 * the lock associated with the write behind context before
544 * returning
545 */
546
547 static struct cl_writebehind *
cluster_get_wbp(vnode_t vp,int flags)548 cluster_get_wbp(vnode_t vp, int flags)
549 {
550 struct ubc_info *ubc;
551 struct cl_writebehind *wbp;
552
553 ubc = vp->v_ubcinfo;
554
555 if ((wbp = ubc->cl_wbehind) == NULL) {
556 if (!(flags & CLW_ALLOCATE)) {
557 return (struct cl_writebehind *)NULL;
558 }
559
560 wbp = zalloc_flags(cl_wr_zone, Z_WAITOK | Z_ZERO);
561
562 lck_mtx_init(&wbp->cl_lockw, &cl_mtx_grp, LCK_ATTR_NULL);
563
564 vnode_lock(vp);
565
566 if (ubc->cl_wbehind == NULL) {
567 ubc->cl_wbehind = wbp;
568 } else {
569 lck_mtx_destroy(&wbp->cl_lockw, &cl_mtx_grp);
570 zfree(cl_wr_zone, wbp);
571 wbp = ubc->cl_wbehind;
572 }
573 vnode_unlock(vp);
574 }
575 if (flags & CLW_RETURNLOCKED) {
576 lck_mtx_lock(&wbp->cl_lockw);
577 }
578
579 return wbp;
580 }
581
582
583 static void
cluster_syncup(vnode_t vp,off_t newEOF,int (* callback)(buf_t,void *),void * callback_arg,int flags)584 cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, int flags)
585 {
586 struct cl_writebehind *wbp;
587
588 if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
589 if (wbp->cl_number) {
590 lck_mtx_lock(&wbp->cl_lockw);
591
592 cluster_try_push(wbp, vp, newEOF, PUSH_ALL | flags, 0, callback, callback_arg, NULL, FALSE);
593
594 lck_mtx_unlock(&wbp->cl_lockw);
595 }
596 }
597 }
598
599
600 static int
cluster_io_present_in_BC(vnode_t vp,off_t f_offset)601 cluster_io_present_in_BC(vnode_t vp, off_t f_offset)
602 {
603 daddr64_t blkno;
604 size_t io_size;
605 int (*bootcache_check_fn)(dev_t device, u_int64_t blkno) = bootcache_contains_block;
606
607 if (bootcache_check_fn && vp->v_mount && vp->v_mount->mnt_devvp) {
608 if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL)) {
609 return 0;
610 }
611
612 if (io_size == 0) {
613 return 0;
614 }
615
616 if (bootcache_check_fn(vp->v_mount->mnt_devvp->v_rdev, blkno)) {
617 return 1;
618 }
619 }
620 return 0;
621 }
622
623
624 static int
cluster_is_throttled(vnode_t vp)625 cluster_is_throttled(vnode_t vp)
626 {
627 return throttle_io_will_be_throttled(-1, vp->v_mount);
628 }
629
630
631 static void
cluster_iostate_wait(struct clios * iostate,u_int target,const char * wait_name)632 cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name)
633 {
634 lck_mtx_lock(&iostate->io_mtxp);
635
636 while ((iostate->io_issued - iostate->io_completed) > target) {
637 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
638 iostate->io_issued, iostate->io_completed, target, 0, 0);
639
640 iostate->io_wanted = 1;
641 msleep((caddr_t)&iostate->io_wanted, &iostate->io_mtxp, PRIBIO + 1, wait_name, NULL);
642
643 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
644 iostate->io_issued, iostate->io_completed, target, 0, 0);
645 }
646 lck_mtx_unlock(&iostate->io_mtxp);
647 }
648
649
650 static void
cluster_handle_associated_upl(struct clios * iostate,upl_t upl,upl_offset_t upl_offset,upl_size_t size,off_t f_offset)651 cluster_handle_associated_upl(struct clios *iostate, upl_t upl,
652 upl_offset_t upl_offset, upl_size_t size, off_t f_offset)
653 {
654 if (!size) {
655 return;
656 }
657
658 upl_t associated_upl = upl_associated_upl(upl);
659
660 if (!associated_upl) {
661 return;
662 }
663
664 /*
665 * The associated upl functions as a "range lock" for the file.
666 *
667 * The associated upl is created and is attached to to the upl in
668 * cluster_io when the direct io write is being started. Since the
669 * upl may be released in parts so the corresponding associated upl
670 * has to be released in parts as well.
671 *
672 * We have the f_offset, upl_offset and size and from that we have figure
673 * out the associated upl offset and length, we are interested in.
674 */
675 upl_offset_t assoc_upl_offset, assoc_upl_end;
676
677 /* ALIGNED UPL's */
678 if ((upl_offset & PAGE_MASK) == (f_offset & PAGE_MASK)) {
679 assoc_upl_offset = trunc_page_32(upl_offset);
680 assoc_upl_end = round_page_32(upl_offset + size);
681 goto do_commit;
682 }
683
684 /*
685 * HANDLE UNALIGNED UPLS
686 *
687 * ( See also cluster_io where the associated upl is created )
688 * While we create the upl in one go, we will be dumping the pages in
689 * the upl in "transaction sized chunks" relative to the upl. Except
690 * for the first transction, the upl_offset will always be page aligned.
691 * and when the upl's are not aligned the associated upl offset will not
692 * be page aligned and so we have to truncate and round up the starting
693 * and the end of the pages in question and see if they are shared with
694 * other transctions or not. If two transctions "share" a page in the
695 * associated upl, the first one to complete "marks" it and skips that
696 * page and the second one will include it in the "commit range"
697 *
698 * As an example, consider the case where 4 transctions are needed (this
699 * is the worst case).
700 *
701 * Transaction for 0-1 (size -> PAGE_SIZE - upl_offset)
702 *
703 * This covers the associated upl from a -> c. a->b is not shared but
704 * b-c is shared with the next transction so the first one to complete
705 * will only "mark" it.
706 *
707 * Transaction for 1-2 (size -> PAGE_SIZE)
708 *
709 * For transaction 1, assoc_upl_offset would be 0 (corresponding to the
710 * file offset a or b depending on what file offset the upl_offset
711 * corrssponds to ) and assoc_upl_end would correspond to the file
712 * offset c.
713 *
714 * (associated_upl - based on f_offset alignment)
715 * 0 a b c d e f
716 * <----|----|----|----|----|----|-----|---->
717 *
718 *
719 * (upl - based on user buffer address alignment)
720 * <__--|----|----|--__>
721 *
722 * 0 1 2 3
723 *
724 */
725 upl_size_t assoc_upl_size = upl_get_size(associated_upl);
726 #if 0
727 /* knock off the simple case first -> this transaction covers the entire UPL */
728 upl_offset_t upl_end = round_page_32(upl_offset + size);
729 upl_size_t upl_size = vector_upl_get_size(upl);
730
731 if ((trunc_page_32(upl_offset) == 0) && (upl_end == upl_size)) {
732 assoc_upl_offset = 0;
733 assoc_upl_end = assoc_upl_size;
734 goto do_commit;
735 }
736 #endif
737 off_t assoc_upl_start_f_offset = upl_adjusted_offset(associated_upl, PAGE_MASK);
738
739 assoc_upl_offset = (upl_offset_t)trunc_page_64(f_offset - assoc_upl_start_f_offset);
740 assoc_upl_end = round_page_64(f_offset + size) - assoc_upl_start_f_offset;
741
742 /*
743 * We can only sanity check the offset returned by upl_adjusted_offset
744 * for the first transaction for this UPL i.e. when (upl_offset < PAGE_SIZE)
745 */
746 assertf((upl_offset >= PAGE_SIZE) || ((assoc_upl_start_f_offset == trunc_page_64(f_offset)) && (assoc_upl_offset == 0)),
747 "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_offset = %d",
748 upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_offset);
749
750 assertf((upl_offset == assoc_upl_offset) || (upl_offset > assoc_upl_offset && ((upl_offset - assoc_upl_offset) <= PAGE_SIZE)) ||
751 (assoc_upl_offset > upl_offset && ((assoc_upl_offset - upl_offset) <= PAGE_SIZE)),
752 "abs(upl_offset - assoc_upl_offset) > PAGE_SIZE : "
753 "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_offset = %d",
754 upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_offset);
755
756 assertf(assoc_upl_end <= assoc_upl_size,
757 "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_size = %d, assoc_upl_offset = %d, assoc_upl_end = %d",
758 upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_size, assoc_upl_offset, assoc_upl_end);
759
760 assertf((assoc_upl_size > PAGE_SIZE) || (assoc_upl_offset == 0 && assoc_upl_end == PAGE_SIZE),
761 "upl_offset = %d, f_offset = %lld, size = %d, start_f_offset = %lld, assoc_upl_size = %d, assoc_upl_offset = %d, assoc_upl_end = %d",
762 upl_offset, f_offset, size, assoc_upl_start_f_offset, assoc_upl_size, assoc_upl_offset, assoc_upl_end);
763
764 if (assoc_upl_size == PAGE_SIZE) {
765 assoc_upl_offset = 0;
766 assoc_upl_end = PAGE_SIZE;
767 goto do_commit;
768 }
769
770 /*
771 * We have to check if the first and last pages of the associated UPL
772 * range could potentially be shared with other transactions and if the
773 * "sharing transactions" are both done. The first one sets the mark bit
774 * and the second one checks it and if set it includes that page in the
775 * pages to be "freed".
776 */
777 bool check_first_pg = (assoc_upl_offset != 0) || ((f_offset + size) < (assoc_upl_start_f_offset + PAGE_SIZE));
778 bool check_last_pg = (assoc_upl_end != assoc_upl_size) || (f_offset > ((assoc_upl_start_f_offset + assoc_upl_size) - PAGE_SIZE));
779
780 if (check_first_pg || check_last_pg) {
781 int first_pg = assoc_upl_offset >> PAGE_SHIFT;
782 int last_pg = trunc_page_32(assoc_upl_end - 1) >> PAGE_SHIFT;
783 upl_page_info_t *assoc_pl = UPL_GET_INTERNAL_PAGE_LIST(associated_upl);
784
785 lck_mtx_lock_spin(&iostate->io_mtxp);
786 if (check_first_pg && !upl_page_get_mark(assoc_pl, first_pg)) {
787 /*
788 * The first page isn't marked so let another transaction
789 * completion handle it.
790 */
791 upl_page_set_mark(assoc_pl, first_pg, true);
792 assoc_upl_offset += PAGE_SIZE;
793 }
794 if (check_last_pg && !upl_page_get_mark(assoc_pl, last_pg)) {
795 /*
796 * The last page isn't marked so mark the page and let another
797 * transaction completion handle it.
798 */
799 upl_page_set_mark(assoc_pl, last_pg, true);
800 assoc_upl_end -= PAGE_SIZE;
801 }
802 lck_mtx_unlock(&iostate->io_mtxp);
803 }
804
805 if (assoc_upl_end <= assoc_upl_offset) {
806 return;
807 }
808
809 do_commit:
810 size = assoc_upl_end - assoc_upl_offset;
811
812 boolean_t empty;
813
814 /*
815 * We can unlock these pages now and as this is for a
816 * direct/uncached write, we want to dump the pages too.
817 */
818 kern_return_t kr = upl_abort_range(associated_upl, assoc_upl_offset, size,
819 UPL_ABORT_DUMP_PAGES, &empty);
820
821 assert(!kr);
822
823 if (!kr && empty) {
824 upl_set_associated_upl(upl, NULL);
825 upl_deallocate(associated_upl);
826 }
827 }
828
829 static void
cluster_iodone_verify_continue(void)830 cluster_iodone_verify_continue(void)
831 {
832 lck_mtx_lock_spin(&cl_transaction_mtxp);
833 for (;;) {
834 struct verify_buf *vb = TAILQ_FIRST(&verify_work_head);
835
836 if (!vb) {
837 assert_wait(&verify_work_head, (THREAD_UNINT));
838 break;
839 }
840 buf_t cbp = vb->vb_cbp;
841 void* callback_arg = vb->vb_callback_arg;
842
843 TAILQ_REMOVE(&verify_work_head, vb, vb_entry);
844 vb->vb_cbp = NULL;
845 vb->vb_callback_arg = NULL;
846 vb->vb_whichq = 0;
847 TAILQ_INSERT_TAIL(&verify_free_head, vb, vb_entry);
848 lck_mtx_unlock(&cl_transaction_mtxp);
849
850 (void)cluster_iodone_finish(cbp, callback_arg);
851 cbp = NULL;
852 lck_mtx_lock_spin(&cl_transaction_mtxp);
853 }
854 lck_mtx_unlock(&cl_transaction_mtxp);
855 thread_block((thread_continue_t)cluster_iodone_verify_continue);
856 /* NOT REACHED */
857 }
858
859 static void
cluster_verify_thread(void)860 cluster_verify_thread(void)
861 {
862 thread_t self = current_thread();
863
864 thread_set_thread_name(self, "cluster_verify_thread");
865 #if __AMP__
866 if (ecore_verify_threads) {
867 thread_soft_bind_cluster_type(self, 'E');
868 }
869 #endif /* __AMP__ */
870 #if !defined(__x86_64__)
871 thread_group_join_io_storage();
872 #endif /* __x86_64__ */
873 cluster_iodone_verify_continue();
874 /* NOT REACHED */
875 }
876
877 static bool
enqueue_buf_for_verify(buf_t cbp,void * callback_arg)878 enqueue_buf_for_verify(buf_t cbp, void *callback_arg)
879 {
880 struct verify_buf *vb;
881
882 vb = TAILQ_FIRST(&verify_free_head);
883 if (vb) {
884 TAILQ_REMOVE(&verify_free_head, vb, vb_entry);
885 vb->vb_cbp = cbp;
886 vb->vb_callback_arg = callback_arg;
887 vb->vb_whichq = 1;
888 TAILQ_INSERT_TAIL(&verify_work_head, vb, vb_entry);
889 return true;
890 } else {
891 return false;
892 }
893 }
894
895 static int
cluster_handle_verification(buf_t cbp_head,vnode_t vp,upl_t upl,int upl_offset,int transaction_size,int error)896 cluster_handle_verification(buf_t cbp_head, vnode_t vp, upl_t upl, int upl_offset, int transaction_size, int error)
897 {
898 off_t start_off = cbp_head->b_clfoffset;
899 void *verify_ctx = cbp_head->b_attr.ba_un.verify_ctx;
900 caddr_t verify_buf = NULL;
901 uint32_t verify_length = transaction_size;
902 vnode_verify_flags_t verify_flags = VNODE_VERIFY_CONTEXT_FREE;
903 int verify_error = EAGAIN;
904
905 assert(cbp_head->b_attr.ba_flags & BA_WILL_VERIFY);
906
907 cbp_head->b_attr.ba_un.verify_ctx = NULL;
908 if (error) {
909 goto free_context;
910 }
911
912 /*
913 * If we don't have a precomputed hash, we make a single call to both
914 * verify and free the context. If we have a precomputed hash, then we
915 * make two separate calls - one to verify the hash and the second one to
916 * free. If the filesystem returns EAGAIN we fall back to the non
917 * precomputed hash case.
918 */
919 if (cbp_head->b_attr.ba_verify_type && cbp_head->b_attr.ba_flags & BA_VERIFY_VALID) {
920 verify_buf = (caddr_t)buf_verifyptr_with_size(cbp_head, transaction_size, &verify_length);
921 verify_flags = VNODE_VERIFY_WITH_CONTEXT | VNODE_VERIFY_PRECOMPUTED;
922
923 if (verify_buf && verify_length) {
924 verify_error = VNOP_VERIFY(vp, start_off, (uint8_t *)verify_buf, verify_length,
925 NULL, &verify_ctx, verify_flags, NULL, NULL);
926 } else {
927 verify_error = EAGAIN;
928 }
929
930 verify_buf = NULL;
931 verify_length = transaction_size;
932 verify_flags = VNODE_VERIFY_CONTEXT_FREE;
933 }
934
935 if (verify_error != EAGAIN) {
936 error = verify_error;
937 } else {
938 vm_offset_t vaddr;
939
940 /*
941 * Map it in.
942 *
943 * ubc_upl_map_range unfortunately cannot handle concurrent map
944 * requests for the same UPL and returns failures when it can't
945 * map. The map exclusive mechanism enforces mutual exclusion
946 * for concurrent requests.
947 */
948 verify_error = 0;
949 os_atomic_inc(&verify_in_flight, relaxed);
950 upl_set_map_exclusive(upl);
951 error = ubc_upl_map_range(upl, upl_offset, round_page(transaction_size), VM_PROT_DEFAULT, &vaddr);
952 if (error) {
953 upl_clear_map_exclusive(upl);
954 printf("ubc_upl_map_range returned error %d upl = %p, upl_offset = %d, size = %d",
955 error, upl, (int)upl_offset, (int)round_page(transaction_size));
956 error = EIO;
957 if (os_atomic_dec_orig(&verify_in_flight, relaxed) == 0) {
958 panic("verify_in_flight underflow");
959 }
960 } else {
961 verify_buf = (caddr_t)vaddr;
962 verify_flags |= VNODE_VERIFY_WITH_CONTEXT;
963 }
964 }
965
966 free_context:
967 verify_error = VNOP_VERIFY(vp, start_off, (uint8_t *)verify_buf, verify_length,
968 NULL, &verify_ctx, verify_flags, NULL, NULL);
969 if (!error) {
970 error = verify_error;
971 }
972
973 if (verify_buf) {
974 (void)ubc_upl_unmap_range(upl, upl_offset, round_page(transaction_size));
975 upl_clear_map_exclusive(upl);
976 verify_buf = NULL;
977 if (os_atomic_dec_orig(&verify_in_flight, relaxed) == 0) {
978 panic("verify_in_flight underflow");
979 }
980 }
981
982 return error;
983 }
984
985 static int
cluster_ioerror(upl_t upl,int upl_offset,int abort_size,int error,int io_flags,vnode_t vp)986 cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp)
987 {
988 int upl_abort_code = 0;
989 int page_in = 0;
990 int page_out = 0;
991
992 if ((io_flags & (B_PHYS | B_CACHE)) == (B_PHYS | B_CACHE)) {
993 /*
994 * direct write of any flavor, or a direct read that wasn't aligned
995 */
996 ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY);
997 } else {
998 if (io_flags & B_PAGEIO) {
999 if (io_flags & B_READ) {
1000 page_in = 1;
1001 } else {
1002 page_out = 1;
1003 }
1004 }
1005 if (io_flags & B_CACHE) {
1006 /*
1007 * leave pages in the cache unchanged on error
1008 */
1009 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
1010 } else if (((io_flags & B_READ) == 0) && ((error != ENXIO) || vnode_isswap(vp))) {
1011 /*
1012 * transient error on pageout/write path... leave pages unchanged
1013 */
1014 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
1015 } else if (page_in) {
1016 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
1017 } else {
1018 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY /* | UPL_ABORT_DUMP_PAGES */;
1019 }
1020
1021 ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code);
1022 }
1023 return upl_abort_code;
1024 }
1025
1026
1027 static int
cluster_iodone(buf_t bp,void * callback_arg)1028 cluster_iodone(buf_t bp, void *callback_arg)
1029 {
1030 buf_t cbp;
1031 buf_t cbp_head;
1032 int error = 0;
1033 boolean_t transaction_complete = FALSE;
1034 bool async;
1035
1036 __IGNORE_WCASTALIGN(cbp_head = (buf_t)(bp->b_trans_head));
1037
1038 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START,
1039 cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
1040
1041 async = cluster_verify_threads &&
1042 (os_atomic_load(&cbp_head->b_attr.ba_flags, acquire) & BA_ASYNC_VERIFY);
1043
1044 assert(!async || cbp_head->b_attr.ba_un.verify_ctx);
1045
1046 if (cbp_head->b_trans_next || !(cbp_head->b_flags & B_EOT)) {
1047 lck_mtx_lock_spin(&cl_transaction_mtxp);
1048
1049 bp->b_flags |= B_TDONE;
1050
1051 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
1052 /*
1053 * all I/O requests that are part of this transaction
1054 * have to complete before we can process it
1055 */
1056 if (!(cbp->b_flags & B_TDONE)) {
1057 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
1058 cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
1059
1060 lck_mtx_unlock(&cl_transaction_mtxp);
1061
1062 return 0;
1063 }
1064
1065 if (cbp->b_trans_next == CLUSTER_IO_WAITING) {
1066 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
1067 cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
1068
1069 lck_mtx_unlock(&cl_transaction_mtxp);
1070 wakeup(cbp);
1071
1072 return 0;
1073 }
1074
1075 if (cbp->b_flags & B_EOT) {
1076 transaction_complete = TRUE;
1077
1078 if (async) {
1079 async = enqueue_buf_for_verify(cbp_head, callback_arg);
1080 }
1081 }
1082 }
1083 lck_mtx_unlock(&cl_transaction_mtxp);
1084
1085 if (transaction_complete == FALSE) {
1086 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
1087 cbp_head, 0, 0, 0, 0);
1088 return 0;
1089 }
1090 } else if (async) {
1091 lck_mtx_lock_spin(&cl_transaction_mtxp);
1092 async = enqueue_buf_for_verify(cbp_head, callback_arg);
1093 lck_mtx_unlock(&cl_transaction_mtxp);
1094 }
1095
1096 if (async) {
1097 wakeup(&verify_work_head);
1098 } else {
1099 error = cluster_iodone_finish(cbp_head, callback_arg);
1100 }
1101
1102 return error;
1103 }
1104
1105 static int
cluster_iodone_finish(buf_t cbp_head,void * callback_arg)1106 cluster_iodone_finish(buf_t cbp_head, void *callback_arg)
1107 {
1108 int b_flags;
1109 int error;
1110 int total_size;
1111 int total_resid;
1112 int upl_offset;
1113 int zero_offset;
1114 int pg_offset = 0;
1115 int commit_size = 0;
1116 int upl_flags = 0;
1117 int transaction_size = 0;
1118 upl_t upl;
1119 buf_t cbp;
1120 buf_t cbp_next;
1121 buf_t real_bp;
1122 vnode_t vp;
1123 struct clios *iostate;
1124
1125 error = 0;
1126 total_size = 0;
1127 total_resid = 0;
1128
1129 cbp = cbp_head;
1130 vp = cbp->b_vp;
1131 upl_offset = cbp->b_uploffset;
1132 upl = cbp->b_upl;
1133 b_flags = cbp->b_flags;
1134 real_bp = cbp->b_real_bp;
1135 zero_offset = cbp->b_validend;
1136 iostate = (struct clios *)cbp->b_iostate;
1137
1138 if (real_bp) {
1139 real_bp->b_dev = cbp->b_dev;
1140 }
1141
1142 while (cbp) {
1143 if ((cbp->b_flags & B_ERROR) && error == 0) {
1144 error = cbp->b_error;
1145 }
1146
1147 total_resid += cbp->b_resid;
1148 total_size += cbp->b_bcount;
1149
1150 cbp_next = cbp->b_trans_next;
1151
1152 if (cbp_next == NULL) {
1153 /*
1154 * compute the overall size of the transaction
1155 * in case we created one that has 'holes' in it
1156 * 'total_size' represents the amount of I/O we
1157 * did, not the span of the transaction w/r to the UPL
1158 */
1159 transaction_size = cbp->b_uploffset + cbp->b_bcount - upl_offset;
1160 }
1161
1162 cbp = cbp_next;
1163 }
1164
1165 if (ISSET(b_flags, B_COMMIT_UPL)) {
1166 cluster_handle_associated_upl(iostate,
1167 cbp_head->b_upl,
1168 upl_offset,
1169 transaction_size,
1170 cbp_head->b_clfoffset);
1171 }
1172
1173 if (error == 0 && total_resid) {
1174 error = EIO;
1175 }
1176
1177 if (error == 0) {
1178 int (*cliodone_func)(buf_t, void *) = (int (*)(buf_t, void *))(cbp_head->b_cliodone);
1179
1180 if (cliodone_func != NULL) {
1181 cbp_head->b_bcount = transaction_size;
1182
1183 error = (*cliodone_func)(cbp_head, callback_arg);
1184 }
1185 }
1186 if (zero_offset) {
1187 cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
1188 }
1189
1190 if (cbp_head->b_attr.ba_un.verify_ctx) {
1191 error = cluster_handle_verification(cbp_head, vp, upl, upl_offset, transaction_size, error);
1192 } else if (cbp_head->b_attr.ba_flags & BA_WILL_VERIFY) {
1193 error = EBADMSG;
1194 }
1195
1196 if (iostate) {
1197 int need_wakeup = 0;
1198
1199 /*
1200 * someone has issued multiple I/Os asynchrounsly
1201 * and is waiting for them to complete (streaming)
1202 */
1203 lck_mtx_lock_spin(&iostate->io_mtxp);
1204
1205 if (error && iostate->io_error == 0) {
1206 iostate->io_error = error;
1207 }
1208
1209 iostate->io_completed += total_size;
1210
1211 if (iostate->io_wanted) {
1212 /*
1213 * someone is waiting for the state of
1214 * this io stream to change
1215 */
1216 iostate->io_wanted = 0;
1217 need_wakeup = 1;
1218 }
1219 lck_mtx_unlock(&iostate->io_mtxp);
1220
1221 if (need_wakeup) {
1222 wakeup((caddr_t)&iostate->io_wanted);
1223 }
1224 }
1225
1226 if (b_flags & B_COMMIT_UPL) {
1227 pg_offset = upl_offset & PAGE_MASK;
1228 commit_size = (pg_offset + transaction_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1229
1230 if (error) {
1231 upl_set_iodone_error(upl, error);
1232
1233 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags, vp);
1234 } else {
1235 upl_flags = UPL_COMMIT_FREE_ON_EMPTY;
1236
1237 if ((b_flags & B_PHYS) && (b_flags & B_READ)) {
1238 upl_flags |= UPL_COMMIT_SET_DIRTY;
1239 }
1240
1241 if (b_flags & B_AGE) {
1242 upl_flags |= UPL_COMMIT_INACTIVATE;
1243 }
1244
1245 ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags);
1246 }
1247 }
1248
1249 cbp = cbp_head->b_trans_next;
1250 while (cbp) {
1251 cbp_next = cbp->b_trans_next;
1252
1253 if (cbp != cbp_head) {
1254 free_io_buf(cbp);
1255 }
1256
1257 cbp = cbp_next;
1258 }
1259 free_io_buf(cbp_head);
1260
1261 if (real_bp) {
1262 if (error) {
1263 real_bp->b_flags |= B_ERROR;
1264 real_bp->b_error = error;
1265 }
1266 real_bp->b_resid = total_resid;
1267
1268 buf_biodone(real_bp);
1269 }
1270 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
1271 upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0);
1272
1273 return error;
1274 }
1275
1276
1277 uint32_t
cluster_throttle_io_limit(vnode_t vp,uint32_t * limit)1278 cluster_throttle_io_limit(vnode_t vp, uint32_t *limit)
1279 {
1280 if (cluster_is_throttled(vp)) {
1281 *limit = calculate_max_throttle_size(vp);
1282 return 1;
1283 }
1284 return 0;
1285 }
1286
1287
1288 void
cluster_zero(upl_t upl,upl_offset_t upl_offset,int size,buf_t bp)1289 cluster_zero(upl_t upl, upl_offset_t upl_offset, int size, buf_t bp)
1290 {
1291 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START,
1292 upl_offset, size, bp, 0, 0);
1293
1294 if (bp == NULL || bp->b_datap == 0) {
1295 upl_page_info_t *pl;
1296 addr64_t zero_addr;
1297
1298 pl = ubc_upl_pageinfo(upl);
1299
1300 if (upl_device_page(pl) == TRUE) {
1301 zero_addr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + upl_offset;
1302
1303 bzero_phys_nc(zero_addr, size);
1304 } else {
1305 while (size) {
1306 int page_offset;
1307 int page_index;
1308 int zero_cnt;
1309
1310 page_index = upl_offset / PAGE_SIZE;
1311 page_offset = upl_offset & PAGE_MASK;
1312
1313 zero_addr = ((addr64_t)upl_phys_page(pl, page_index) << PAGE_SHIFT) + page_offset;
1314 zero_cnt = min(PAGE_SIZE - page_offset, size);
1315
1316 bzero_phys(zero_addr, zero_cnt);
1317
1318 size -= zero_cnt;
1319 upl_offset += zero_cnt;
1320 }
1321 }
1322 } else {
1323 bzero((caddr_t)((vm_offset_t)bp->b_datap + upl_offset), size);
1324 }
1325
1326 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END,
1327 upl_offset, size, 0, 0, 0);
1328 }
1329
1330
1331 static void
cluster_EOT(buf_t cbp_head,buf_t cbp_tail,int zero_offset,size_t verify_block_size)1332 cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset, size_t verify_block_size)
1333 {
1334 /*
1335 * We will assign a verification context to cbp_head.
1336 * This will be passed back to the filesystem when
1337 * verifying (in cluster_iodone).
1338 */
1339 if (verify_block_size) {
1340 off_t start_off = cbp_head->b_clfoffset;
1341 size_t length;
1342 void *verify_ctx = NULL;
1343 int error = 0;
1344 vnode_t vp = buf_vnode(cbp_head);
1345
1346 if (cbp_head == cbp_tail) {
1347 length = cbp_head->b_bcount;
1348 } else {
1349 length = (cbp_tail->b_clfoffset + cbp_tail->b_bcount) - start_off;
1350 }
1351
1352 /*
1353 * zero_offset is non zero for the transaction containing the EOF
1354 * (if the filesize is not page aligned). In that case we might
1355 * have the transaction size not be page/verify block size aligned
1356 */
1357 if ((zero_offset == 0) &&
1358 ((length < verify_block_size) || (length % verify_block_size)) != 0) {
1359 panic("%s length = %zu, verify_block_size = %zu",
1360 __FUNCTION__, length, verify_block_size);
1361 }
1362
1363 error = VNOP_VERIFY(vp, start_off, NULL, length,
1364 &verify_block_size, &verify_ctx, VNODE_VERIFY_CONTEXT_ALLOC, NULL, NULL);
1365
1366 assert(!(error && verify_ctx));
1367
1368 if (verify_ctx) {
1369 if (num_verify_threads && (os_atomic_load(&cluster_verify_threads, relaxed) == 0)) {
1370 if (os_atomic_inc_orig(&cluster_verify_threads, relaxed) == 0) {
1371 thread_t thread;
1372 int i;
1373
1374 for (i = 0; i < num_verify_threads && i < MAX_VERIFY_THREADS; i++) {
1375 kernel_thread_start((thread_continue_t)cluster_verify_thread, NULL, &thread);
1376 thread_deallocate(thread);
1377 }
1378 os_atomic_store(&cluster_verify_threads, i, relaxed);
1379 } else {
1380 os_atomic_dec(&cluster_verify_threads, relaxed);
1381 }
1382 }
1383 cbp_head->b_attr.ba_un.verify_ctx = verify_ctx;
1384 /*
1385 * At least one thread is busy (at the time we
1386 * checked), so we can let it get queued for
1387 * async processing. It's fine if we occasionally get
1388 * this wrong.
1389 */
1390 if (os_atomic_load(&verify_in_flight, relaxed)) {
1391 /* This flag and the setting of ba_un.verify_ctx needs to be ordered */
1392 os_atomic_or(&cbp_head->b_attr.ba_flags, BA_ASYNC_VERIFY, release);
1393 }
1394 }
1395 } else {
1396 cbp_head->b_attr.ba_un.verify_ctx = NULL;
1397 }
1398
1399 cbp_head->b_validend = zero_offset;
1400 cbp_tail->b_flags |= B_EOT;
1401 }
1402
1403 static void
cluster_wait_IO(buf_t cbp_head,int async)1404 cluster_wait_IO(buf_t cbp_head, int async)
1405 {
1406 buf_t cbp;
1407
1408 if (async) {
1409 /*
1410 * Async callback completion will not normally generate a
1411 * wakeup upon I/O completion. To get woken up, we set
1412 * b_trans_next (which is safe for us to modify) on the last
1413 * buffer to CLUSTER_IO_WAITING so that cluster_iodone knows
1414 * to wake us up when all buffers as part of this transaction
1415 * are completed. This is done under the umbrella of
1416 * cl_transaction_mtxp which is also taken in cluster_iodone.
1417 */
1418 bool done = true;
1419 buf_t last = NULL;
1420
1421 lck_mtx_lock_spin(&cl_transaction_mtxp);
1422
1423 for (cbp = cbp_head; cbp; last = cbp, cbp = cbp->b_trans_next) {
1424 if (!ISSET(cbp->b_flags, B_TDONE)) {
1425 done = false;
1426 }
1427 }
1428
1429 if (!done) {
1430 last->b_trans_next = CLUSTER_IO_WAITING;
1431
1432 DTRACE_IO1(wait__start, buf_t, last);
1433 do {
1434 msleep(last, &cl_transaction_mtxp, PSPIN | (PRIBIO + 1), "cluster_wait_IO", NULL);
1435
1436 /*
1437 * We should only have been woken up if all the
1438 * buffers are completed, but just in case...
1439 */
1440 done = true;
1441 for (cbp = cbp_head; cbp != CLUSTER_IO_WAITING; cbp = cbp->b_trans_next) {
1442 if (!ISSET(cbp->b_flags, B_TDONE)) {
1443 done = false;
1444 break;
1445 }
1446 }
1447 } while (!done);
1448 DTRACE_IO1(wait__done, buf_t, last);
1449
1450 last->b_trans_next = NULL;
1451 }
1452
1453 lck_mtx_unlock(&cl_transaction_mtxp);
1454 } else { // !async
1455 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
1456 buf_biowait(cbp);
1457 }
1458 }
1459 }
1460
1461 static void
cluster_complete_transaction(buf_t * cbp_head,void * callback_arg,int * retval,int flags,int needwait)1462 cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait)
1463 {
1464 buf_t cbp;
1465 int error;
1466 boolean_t isswapout = FALSE;
1467
1468 /*
1469 * cluster_complete_transaction will
1470 * only be called if we've issued a complete chain in synchronous mode
1471 * or, we've already done a cluster_wait_IO on an incomplete chain
1472 */
1473 if (needwait) {
1474 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) {
1475 buf_biowait(cbp);
1476 }
1477 }
1478 /*
1479 * we've already waited on all of the I/Os in this transaction,
1480 * so mark all of the buf_t's in this transaction as B_TDONE
1481 * so that cluster_iodone sees the transaction as completed
1482 */
1483 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) {
1484 cbp->b_flags |= B_TDONE;
1485 cbp->b_attr.ba_flags &= ~BA_ASYNC_VERIFY;
1486 }
1487 cbp = *cbp_head;
1488
1489 if ((flags & (CL_ASYNC | CL_PAGEOUT)) == CL_PAGEOUT && vnode_isswap(cbp->b_vp)) {
1490 isswapout = TRUE;
1491 }
1492
1493 error = cluster_iodone(cbp, callback_arg);
1494
1495 if (!(flags & CL_ASYNC) && error && *retval == 0) {
1496 if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO)) {
1497 *retval = error;
1498 } else if (isswapout == TRUE) {
1499 *retval = error;
1500 }
1501 }
1502 *cbp_head = (buf_t)NULL;
1503 }
1504
1505 uint64_t cluster_direct_write_wired = 0;
1506
1507 static int
cluster_io(vnode_t vp,upl_t upl,vm_offset_t upl_offset,off_t f_offset,int non_rounded_size,int flags,buf_t real_bp,struct clios * iostate,int (* callback)(buf_t,void *),void * callback_arg)1508 cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
1509 int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
1510 {
1511 buf_t cbp;
1512 u_int size;
1513 u_int io_size;
1514 int io_flags;
1515 int bmap_flags;
1516 int error = 0;
1517 int retval = 0;
1518 buf_t cbp_head = NULL;
1519 buf_t cbp_tail = NULL;
1520 int trans_count = 0;
1521 int max_trans_count;
1522 u_int pg_count;
1523 int pg_offset;
1524 u_int max_iosize;
1525 u_int max_vectors;
1526 int priv;
1527 int zero_offset = 0;
1528 int async_throttle = 0;
1529 mount_t mp;
1530 size_t verify_block_size = 0;
1531 vm_offset_t upl_end_offset;
1532 vnode_verify_kind_t verify_kind = VK_HASH_NONE;
1533 boolean_t need_EOT = FALSE;
1534
1535 /*
1536 * we currently don't support buffers larger than a page
1537 */
1538 if (real_bp && non_rounded_size > PAGE_SIZE) {
1539 panic("%s(): Called with real buffer of size %d bytes which "
1540 "is greater than the maximum allowed size of "
1541 "%d bytes (the system PAGE_SIZE).\n",
1542 __FUNCTION__, non_rounded_size, PAGE_SIZE);
1543 }
1544
1545 mp = vp->v_mount;
1546
1547 /*
1548 * we don't want to do any funny rounding of the size for IO requests
1549 * coming through the DIRECT or CONTIGUOUS paths... those pages don't
1550 * belong to us... we can't extend (nor do we need to) the I/O to fill
1551 * out a page
1552 */
1553 if (mp->mnt_devblocksize > 1 && !(flags & (CL_DEV_MEMORY | CL_DIRECT_IO))) {
1554 /*
1555 * round the requested size up so that this I/O ends on a
1556 * page boundary in case this is a 'write'... if the filesystem
1557 * has blocks allocated to back the page beyond the EOF, we want to
1558 * make sure to write out the zero's that are sitting beyond the EOF
1559 * so that in case the filesystem doesn't explicitly zero this area
1560 * if a hole is created via a lseek/write beyond the current EOF,
1561 * it will return zeros when it's read back from the disk. If the
1562 * physical allocation doesn't extend for the whole page, we'll
1563 * only write/read from the disk up to the end of this allocation
1564 * via the extent info returned from the VNOP_BLOCKMAP call.
1565 */
1566 pg_offset = upl_offset & PAGE_MASK;
1567
1568 size = (((non_rounded_size + pg_offset) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - pg_offset;
1569 } else {
1570 /*
1571 * anyone advertising a blocksize of 1 byte probably
1572 * can't deal with us rounding up the request size
1573 * AFP is one such filesystem/device
1574 */
1575 size = non_rounded_size;
1576 }
1577 upl_end_offset = upl_offset + size;
1578
1579 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, (int)f_offset, size, upl_offset, flags, 0);
1580
1581 /*
1582 * Set the maximum transaction size to the maximum desired number of
1583 * buffers.
1584 */
1585 max_trans_count = 8;
1586 if (flags & CL_DEV_MEMORY) {
1587 max_trans_count = 16;
1588 }
1589
1590 if (flags & CL_READ) {
1591 io_flags = B_READ;
1592 bmap_flags = VNODE_READ;
1593
1594 max_iosize = mp->mnt_maxreadcnt;
1595 max_vectors = mp->mnt_segreadcnt;
1596
1597 /* See if we can do cluster verification (pageins and aligned reads) */
1598 if ((flags & CL_PAGEIN || cluster_verify_threads) &&
1599 !(mp->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1600 (VNOP_VERIFY(vp, f_offset, NULL, 0, &verify_block_size, NULL, VNODE_VERIFY_DEFAULT, NULL, &verify_kind) == 0) &&
1601 verify_block_size) {
1602 if (verify_block_size != PAGE_SIZE) {
1603 verify_block_size = 0;
1604 }
1605 if (real_bp && verify_block_size) {
1606 panic("%s(): Called with real buffer and needs verification ",
1607 __FUNCTION__);
1608 }
1609 /*
1610 * For reads, only allow cluster verification if f_offset
1611 * and upl_offset are both page aligned. Additionally, for direct reads,
1612 * require that the length of the write also be page aligned.
1613 * If they are not page aligned, leave it to the filesystem to do verification.
1614 * Strictly speaking, the alignments need to be for verify_block_size
1615 * but since the only verify_block_size that is currently supported
1616 * is page size, we check against page alignment.
1617 */
1618 if (verify_block_size && !(flags & CL_PAGEIN) &&
1619 ((f_offset & PAGE_MASK) || (upl_offset & PAGE_MASK) ||
1620 ((flags & CL_DIRECT_IO) && (non_rounded_size & PAGE_MASK)))) {
1621 verify_block_size = 0;
1622 verify_kind = VK_HASH_NONE;
1623 }
1624 if (verify_block_size && verify_kind && !upl_has_fs_verify_info(upl)) {
1625 upl_set_fs_verify_info(upl,
1626 (upl_adjusted_size(upl, PAGE_MASK) / mp->mnt_devblocksize) * get_num_bytes_for_verify_kind(verify_kind));
1627 }
1628 }
1629 } else {
1630 io_flags = B_WRITE;
1631 bmap_flags = VNODE_WRITE;
1632
1633 max_iosize = mp->mnt_maxwritecnt;
1634 max_vectors = mp->mnt_segwritecnt;
1635 }
1636 if (verify_block_size) {
1637 bmap_flags |= VNODE_CLUSTER_VERIFY;
1638 }
1639 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_NONE, max_iosize, max_vectors, mp->mnt_devblocksize, 0, 0);
1640
1641 /*
1642 * make sure the maximum iosize is a
1643 * multiple of the page size
1644 */
1645 max_iosize &= ~PAGE_MASK;
1646
1647 /*
1648 * Ensure the maximum iosize is sensible.
1649 */
1650 if (!max_iosize) {
1651 max_iosize = PAGE_SIZE;
1652 }
1653
1654 if (flags & CL_THROTTLE) {
1655 if (!(flags & CL_PAGEOUT) && cluster_is_throttled(vp)) {
1656 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
1657
1658 if (max_iosize > max_throttle_size) {
1659 max_iosize = max_throttle_size;
1660 }
1661 async_throttle = calculate_max_throttle_cnt(vp);
1662 } else {
1663 if ((flags & CL_DEV_MEMORY)) {
1664 async_throttle = IO_SCALE(vp, VNODE_ASYNC_THROTTLE);
1665 } else {
1666 u_int max_cluster;
1667 u_int max_cluster_size;
1668 u_int scale;
1669
1670 if (vp->v_mount->mnt_minsaturationbytecount) {
1671 max_cluster_size = vp->v_mount->mnt_minsaturationbytecount;
1672
1673 scale = 1;
1674 } else {
1675 max_cluster_size = MAX_CLUSTER_SIZE(vp);
1676
1677 if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
1678 scale = WRITE_THROTTLE_SSD;
1679 } else {
1680 scale = WRITE_THROTTLE;
1681 }
1682 }
1683 if (max_iosize > max_cluster_size) {
1684 max_cluster = max_cluster_size;
1685 } else {
1686 max_cluster = max_iosize;
1687 }
1688
1689 if (size < max_cluster) {
1690 max_cluster = size;
1691 }
1692
1693 if (flags & CL_CLOSE) {
1694 scale += MAX_CLUSTERS;
1695 }
1696
1697 async_throttle = min(IO_SCALE(vp, VNODE_ASYNC_THROTTLE), ((scale * max_cluster_size) / max_cluster) - 1);
1698 }
1699 }
1700 }
1701 if (flags & CL_AGE) {
1702 io_flags |= B_AGE;
1703 }
1704 if (flags & (CL_PAGEIN | CL_PAGEOUT)) {
1705 io_flags |= B_PAGEIO;
1706 }
1707 if (flags & (CL_IOSTREAMING)) {
1708 io_flags |= B_IOSTREAMING;
1709 }
1710 if (flags & CL_COMMIT) {
1711 io_flags |= B_COMMIT_UPL;
1712 }
1713 if (flags & CL_DIRECT_IO) {
1714 io_flags |= B_PHYS;
1715 }
1716 if (flags & (CL_PRESERVE | CL_KEEPCACHED)) {
1717 io_flags |= B_CACHE;
1718 }
1719 if (flags & CL_PASSIVE) {
1720 io_flags |= B_PASSIVE;
1721 }
1722 if (flags & CL_ENCRYPTED) {
1723 io_flags |= B_ENCRYPTED_IO;
1724 }
1725
1726 if (vp->v_flag & VSYSTEM) {
1727 io_flags |= B_META;
1728 }
1729
1730 if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
1731 /*
1732 * then we are going to end up
1733 * with a page that we can't complete (the file size wasn't a multiple
1734 * of PAGE_SIZE and we're trying to read to the end of the file
1735 * so we'll go ahead and zero out the portion of the page we can't
1736 * read in from the file
1737 */
1738 zero_offset = (int)(upl_offset + non_rounded_size);
1739 } else if (!ISSET(flags, CL_READ) && ISSET(flags, CL_DIRECT_IO)) {
1740 assert(ISSET(flags, CL_COMMIT));
1741
1742 // For a direct/uncached write, we need to lock pages...
1743 upl_t cached_upl = NULL;
1744 upl_page_info_t *cached_pl;
1745
1746 assert(upl_offset < PAGE_SIZE);
1747
1748 /*
1749 *
1750 * f_offset = b
1751 * upl_offset = 8K
1752 *
1753 * (cached_upl - based on f_offset alignment)
1754 * 0 a b c
1755 * <----|----|----|----|----|----|-----|---->
1756 *
1757 *
1758 * (upl - based on user buffer address alignment)
1759 * <__--|----|----|--__>
1760 *
1761 * 0 1x 2x 3x
1762 *
1763 */
1764 const off_t cached_upl_f_offset = trunc_page_64(f_offset);
1765 const int cached_upl_size = round_page_32((f_offset - cached_upl_f_offset) + non_rounded_size);
1766 int num_retries = 0;
1767
1768 /*
1769 * Create a UPL to lock the pages in the cache whilst the
1770 * write is in progress.
1771 */
1772 create_cached_upl:
1773 ubc_create_upl_kernel(vp, cached_upl_f_offset, cached_upl_size, &cached_upl,
1774 &cached_pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
1775 if (cached_upl && upl_has_wired_pages(cached_upl)) {
1776 /*
1777 * Pages in this UPL would contain stale data after our direct write
1778 * (which is intended to overwrite these pages on disk). The UPL is
1779 * just holding these pages "busy" to synchronize with any other I/O
1780 * or mmap() access and we have to dump these pages when the direct
1781 * write is done.
1782 * But we can't do that for wired pages, so let's release this UPL
1783 * and fall back to the "cached" path.
1784 */
1785 // printf("******* FBDP %s:%d vp %p offset 0x%llx size 0x%llx - switching from direct to cached write\n", __FUNCTION__, __LINE__, vp, cached_upl_f_offset, (uint64_t)cached_upl_size);
1786 ubc_upl_abort_range(cached_upl, 0, cached_upl_size, UPL_ABORT_FREE_ON_EMPTY);
1787 cached_upl = NULL;
1788 cached_pl = NULL;
1789 cluster_direct_write_wired++;
1790 return ENOTSUP;
1791 }
1792
1793 /*
1794 * If we are not overwriting the first and last pages completely
1795 * we need to write them out first if they are dirty. These pages
1796 * will be discarded after the write completes so we might lose
1797 * the writes for the parts that are not overwrrtten.
1798 */
1799 bool first_page_needs_sync = false;
1800 bool last_page_needs_sync = false;
1801
1802 if (cached_upl && (cached_upl_f_offset < f_offset) && upl_dirty_page(cached_pl, 0)) {
1803 first_page_needs_sync = true;
1804 }
1805
1806 if (cached_upl && (cached_upl_f_offset + cached_upl_size) > (f_offset + non_rounded_size)) {
1807 int last_page = (cached_upl_size / PAGE_SIZE) - 1;
1808
1809 if ((last_page != 0 || !first_page_needs_sync) && upl_dirty_page(cached_pl, last_page)) {
1810 last_page_needs_sync = true;
1811 }
1812 }
1813
1814 if (first_page_needs_sync || last_page_needs_sync) {
1815 ubc_upl_abort_range(cached_upl, 0, cached_upl_size, UPL_ABORT_FREE_ON_EMPTY);
1816 cached_upl = NULL;
1817 cached_pl = NULL;
1818 if (first_page_needs_sync) {
1819 ubc_msync(vp, cached_upl_f_offset, cached_upl_f_offset + PAGE_SIZE, NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
1820 }
1821 if (last_page_needs_sync) {
1822 off_t cached_upl_end_offset = cached_upl_f_offset + cached_upl_size;
1823
1824 ubc_msync(vp, cached_upl_end_offset - PAGE_SIZE, cached_upl_end_offset, NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
1825 }
1826 if (++num_retries < 16) {
1827 goto create_cached_upl;
1828 }
1829 printf("%s : Number of retries for syncing first or last page reached %d\n", __FUNCTION__, num_retries);
1830 assertf(num_retries < 16, "%s : Number of retries for syncing first or last page reached %d\n", __FUNCTION__, num_retries);
1831 }
1832
1833 /*
1834 * Attach this UPL to the other UPL so that we can find it
1835 * later.
1836 */
1837 upl_set_associated_upl(upl, cached_upl);
1838 assertf(!cached_upl ||
1839 (upl_adjusted_offset(cached_upl, PAGE_MASK) == cached_upl_f_offset),
1840 "upl_adjusted_offset(cached_upl, PAGE_MASK) = %lld, cached_upl_f_offset = %lld",
1841 upl_adjusted_offset(cached_upl, PAGE_MASK), cached_upl_f_offset);
1842 }
1843
1844 while (size) {
1845 daddr64_t blkno;
1846 daddr64_t lblkno;
1847 size_t io_size_tmp;
1848 u_int io_size_wanted;
1849
1850 if (size > max_iosize) {
1851 io_size = max_iosize;
1852 } else {
1853 io_size = size;
1854 }
1855
1856 io_size_wanted = io_size;
1857 io_size_tmp = (size_t)io_size;
1858
1859 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL))) {
1860 break;
1861 }
1862
1863 if (io_size_tmp > io_size_wanted) {
1864 io_size = io_size_wanted;
1865 } else {
1866 io_size = (u_int)io_size_tmp;
1867 }
1868
1869 if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno)) {
1870 real_bp->b_blkno = blkno;
1871 }
1872
1873 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE,
1874 (int)f_offset, (int)(blkno >> 32), (int)blkno, io_size, 0);
1875
1876 if (io_size == 0) {
1877 /*
1878 * vnop_blockmap didn't return an error... however, it did
1879 * return an extent size of 0 which means we can't
1880 * make forward progress on this I/O... a hole in the
1881 * file would be returned as a blkno of -1 with a non-zero io_size
1882 * a real extent is returned with a blkno != -1 and a non-zero io_size
1883 */
1884 error = EINVAL;
1885 break;
1886 }
1887 if (!(flags & CL_READ) && blkno == -1) {
1888 off_t e_offset;
1889 int pageout_flags;
1890
1891 if (upl_get_internal_vectorupl(upl)) {
1892 panic("Vector UPLs should not take this code-path");
1893 }
1894 /*
1895 * we're writing into a 'hole'
1896 */
1897 if (flags & CL_PAGEOUT) {
1898 /*
1899 * if we got here via cluster_pageout
1900 * then just error the request and return
1901 * the 'hole' should already have been covered
1902 */
1903 error = EINVAL;
1904 break;
1905 }
1906 /*
1907 * we can get here if the cluster code happens to
1908 * pick up a page that was dirtied via mmap vs
1909 * a 'write' and the page targets a 'hole'...
1910 * i.e. the writes to the cluster were sparse
1911 * and the file was being written for the first time
1912 *
1913 * we can also get here if the filesystem supports
1914 * 'holes' that are less than PAGE_SIZE.... because
1915 * we can't know if the range in the page that covers
1916 * the 'hole' has been dirtied via an mmap or not,
1917 * we have to assume the worst and try to push the
1918 * entire page to storage.
1919 *
1920 * Try paging out the page individually before
1921 * giving up entirely and dumping it (the pageout
1922 * path will insure that the zero extent accounting
1923 * has been taken care of before we get back into cluster_io)
1924 *
1925 * go direct to vnode_pageout so that we don't have to
1926 * unbusy the page from the UPL... we used to do this
1927 * so that we could call ubc_msync, but that results
1928 * in a potential deadlock if someone else races us to acquire
1929 * that page and wins and in addition needs one of the pages
1930 * we're continuing to hold in the UPL
1931 */
1932 pageout_flags = UPL_MSYNC | UPL_VNODE_PAGER | UPL_NESTED_PAGEOUT;
1933
1934 if (!(flags & CL_ASYNC)) {
1935 pageout_flags |= UPL_IOSYNC;
1936 }
1937 if (!(flags & CL_COMMIT)) {
1938 pageout_flags |= UPL_NOCOMMIT;
1939 }
1940
1941 if (cbp_head) {
1942 buf_t prev_cbp;
1943 uint32_t bytes_in_last_page;
1944
1945 /*
1946 * first we have to wait for the the current outstanding I/Os
1947 * to complete... EOT hasn't been set yet on this transaction
1948 * so the pages won't be released
1949 */
1950 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1951
1952 bytes_in_last_page = cbp_head->b_uploffset & PAGE_MASK;
1953 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
1954 bytes_in_last_page += cbp->b_bcount;
1955 }
1956 bytes_in_last_page &= PAGE_MASK;
1957
1958 while (bytes_in_last_page) {
1959 /*
1960 * we've got a transcation that
1961 * includes the page we're about to push out through vnode_pageout...
1962 * find the bp's in the list which intersect this page and either
1963 * remove them entirely from the transaction (there could be multiple bp's), or
1964 * round it's iosize down to the page boundary (there can only be one)...
1965 *
1966 * find the last bp in the list and act on it
1967 */
1968 for (prev_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next) {
1969 prev_cbp = cbp;
1970 }
1971
1972 if (bytes_in_last_page >= cbp->b_bcount) {
1973 /*
1974 * this buf no longer has any I/O associated with it
1975 */
1976 bytes_in_last_page -= cbp->b_bcount;
1977 cbp->b_bcount = 0;
1978
1979 free_io_buf(cbp);
1980
1981 if (cbp == cbp_head) {
1982 assert(bytes_in_last_page == 0);
1983 /*
1984 * the buf we just freed was the only buf in
1985 * this transaction... so there's no I/O to do
1986 */
1987 cbp_head = NULL;
1988 cbp_tail = NULL;
1989 } else {
1990 /*
1991 * remove the buf we just freed from
1992 * the transaction list
1993 */
1994 prev_cbp->b_trans_next = NULL;
1995 cbp_tail = prev_cbp;
1996 }
1997 } else {
1998 /*
1999 * this is the last bp that has I/O
2000 * intersecting the page of interest
2001 * only some of the I/O is in the intersection
2002 * so clip the size but keep it in the transaction list
2003 */
2004 cbp->b_bcount -= bytes_in_last_page;
2005 cbp_tail = cbp;
2006 bytes_in_last_page = 0;
2007 }
2008 }
2009 if (cbp_head) {
2010 /*
2011 * there was more to the current transaction
2012 * than just the page we are pushing out via vnode_pageout...
2013 * mark it as finished and complete it... we've already
2014 * waited for the I/Os to complete above in the call to cluster_wait_IO
2015 */
2016 cluster_EOT(cbp_head, cbp_tail, 0, 0);
2017
2018 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
2019
2020 trans_count = 0;
2021 }
2022 }
2023 if (vnode_pageout(vp, upl, (upl_offset_t)trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
2024 error = EINVAL;
2025 }
2026 e_offset = round_page_64(f_offset + 1);
2027 io_size = (u_int)(e_offset - f_offset);
2028
2029 f_offset += io_size;
2030 upl_offset += io_size;
2031
2032 if (size >= io_size) {
2033 size -= io_size;
2034 } else {
2035 size = 0;
2036 }
2037 /*
2038 * keep track of how much of the original request
2039 * that we've actually completed... non_rounded_size
2040 * may go negative due to us rounding the request
2041 * to a page size multiple (i.e. size > non_rounded_size)
2042 */
2043 non_rounded_size -= io_size;
2044
2045 if (non_rounded_size <= 0) {
2046 /*
2047 * we've transferred all of the data in the original
2048 * request, but we were unable to complete the tail
2049 * of the last page because the file didn't have
2050 * an allocation to back that portion... this is ok.
2051 */
2052 size = 0;
2053 }
2054 if (error) {
2055 if (size == 0) {
2056 flags &= ~CL_COMMIT;
2057 }
2058 break;
2059 }
2060 continue;
2061 }
2062
2063 lblkno = (daddr64_t)(f_offset / CLUSTER_IO_BLOCK_SIZE);
2064
2065 /*
2066 * we have now figured out how much I/O we can do - this is in 'io_size'
2067 * pg_offset is the starting point in the first page for the I/O
2068 * pg_count is the number of full and partial pages that 'io_size' encompasses
2069 */
2070 pg_offset = upl_offset & PAGE_MASK;
2071
2072 if (flags & CL_DEV_MEMORY) {
2073 /*
2074 * treat physical requests as one 'giant' page
2075 */
2076 pg_count = 1;
2077 } else {
2078 pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
2079 }
2080
2081 if ((flags & CL_READ) && blkno == -1) {
2082 vm_offset_t commit_offset;
2083 int bytes_to_zero;
2084 int complete_transaction_now = 0;
2085
2086 /*
2087 * if we're reading and blkno == -1, then we've got a
2088 * 'hole' in the file that we need to deal with by zeroing
2089 * out the affected area in the upl
2090 */
2091 if (io_size >= (u_int)non_rounded_size) {
2092 /*
2093 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
2094 * than 'zero_offset' will be non-zero
2095 * if the 'hole' returned by vnop_blockmap extends all the way to the eof
2096 * (indicated by the io_size finishing off the I/O request for this UPL)
2097 * than we're not going to issue an I/O for the
2098 * last page in this upl... we need to zero both the hole and the tail
2099 * of the page beyond the EOF, since the delayed zero-fill won't kick in
2100 */
2101 bytes_to_zero = non_rounded_size;
2102 if (!(flags & CL_NOZERO)) {
2103 bytes_to_zero = (int)((((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset);
2104 }
2105
2106 zero_offset = 0;
2107 } else {
2108 bytes_to_zero = io_size;
2109 }
2110
2111 pg_count = 0;
2112
2113 cluster_zero(upl, (upl_offset_t)upl_offset, bytes_to_zero, real_bp);
2114
2115 if (cbp_head) {
2116 int pg_resid;
2117
2118 /*
2119 * if there is a current I/O chain pending
2120 * then the first page of the group we just zero'd
2121 * will be handled by the I/O completion if the zero
2122 * fill started in the middle of the page
2123 */
2124 commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2125
2126 pg_resid = (int)(commit_offset - upl_offset);
2127
2128 if (bytes_to_zero >= pg_resid) {
2129 /*
2130 * the last page of the current I/O
2131 * has been completed...
2132 * compute the number of fully zero'd
2133 * pages that are beyond it
2134 * plus the last page if its partial
2135 * and we have no more I/O to issue...
2136 * otherwise a partial page is left
2137 * to begin the next I/O
2138 */
2139 if ((int)io_size >= non_rounded_size) {
2140 pg_count = (bytes_to_zero - pg_resid + (PAGE_SIZE - 1)) / PAGE_SIZE;
2141 } else {
2142 pg_count = (bytes_to_zero - pg_resid) / PAGE_SIZE;
2143 }
2144
2145 complete_transaction_now = 1;
2146 }
2147 } else {
2148 /*
2149 * no pending I/O to deal with
2150 * so, commit all of the fully zero'd pages
2151 * plus the last page if its partial
2152 * and we have no more I/O to issue...
2153 * otherwise a partial page is left
2154 * to begin the next I/O
2155 */
2156 if ((int)io_size >= non_rounded_size) {
2157 pg_count = (pg_offset + bytes_to_zero + (PAGE_SIZE - 1)) / PAGE_SIZE;
2158 } else {
2159 pg_count = (pg_offset + bytes_to_zero) / PAGE_SIZE;
2160 }
2161
2162 commit_offset = upl_offset & ~PAGE_MASK;
2163 }
2164
2165 // Associated UPL is currently only used in the direct write path
2166 assert(!upl_associated_upl(upl));
2167
2168 if ((flags & CL_COMMIT) && pg_count) {
2169 ubc_upl_commit_range(upl, (upl_offset_t)commit_offset,
2170 pg_count * PAGE_SIZE,
2171 UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
2172 }
2173 upl_offset += io_size;
2174 f_offset += io_size;
2175 size -= io_size;
2176
2177 /*
2178 * keep track of how much of the original request
2179 * that we've actually completed... non_rounded_size
2180 * may go negative due to us rounding the request
2181 * to a page size multiple (i.e. size > non_rounded_size)
2182 */
2183 non_rounded_size -= io_size;
2184
2185 if (non_rounded_size <= 0) {
2186 /*
2187 * we've transferred all of the data in the original
2188 * request, but we were unable to complete the tail
2189 * of the last page because the file didn't have
2190 * an allocation to back that portion... this is ok.
2191 */
2192 size = 0;
2193 }
2194 if (cbp_head && (complete_transaction_now || size == 0)) {
2195 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
2196
2197 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0, verify_block_size);
2198
2199 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
2200
2201 trans_count = 0;
2202 }
2203 continue;
2204 }
2205 if (pg_count > max_vectors) {
2206 if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
2207 io_size = PAGE_SIZE - pg_offset;
2208 pg_count = 1;
2209 } else {
2210 io_size -= (pg_count - max_vectors) * PAGE_SIZE;
2211 pg_count = max_vectors;
2212 }
2213 }
2214 /*
2215 * If the transaction is going to reach the maximum number of
2216 * desired elements, truncate the i/o to the nearest page so
2217 * that the actual i/o is initiated after this buffer is
2218 * created and added to the i/o chain.
2219 *
2220 * I/O directed to physically contiguous memory
2221 * doesn't have a requirement to make sure we 'fill' a page
2222 */
2223 if (!(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count &&
2224 ((upl_offset + io_size) & PAGE_MASK)) {
2225 vm_offset_t aligned_ofs;
2226
2227 aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
2228 /*
2229 * If the io_size does not actually finish off even a
2230 * single page we have to keep adding buffers to the
2231 * transaction despite having reached the desired limit.
2232 *
2233 * Eventually we get here with the page being finished
2234 * off (and exceeded) and then we truncate the size of
2235 * this i/o request so that it is page aligned so that
2236 * we can finally issue the i/o on the transaction.
2237 */
2238 if (aligned_ofs > upl_offset) {
2239 io_size = (u_int)(aligned_ofs - upl_offset);
2240 pg_count--;
2241 }
2242 }
2243
2244 if (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV)) {
2245 /*
2246 * if we're not targeting a virtual device i.e. a disk image
2247 * it's safe to dip into the reserve pool since real devices
2248 * can complete this I/O request without requiring additional
2249 * bufs from the alloc_io_buf pool
2250 */
2251 priv = 1;
2252 } else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT) && !cbp_head) {
2253 /*
2254 * Throttle the speculative IO
2255 *
2256 * We can only throttle this if it is the first iobuf
2257 * for the transaction. alloc_io_buf implements
2258 * additional restrictions for diskimages anyway.
2259 */
2260 priv = 0;
2261 } else {
2262 priv = 1;
2263 }
2264
2265 cbp = alloc_io_buf(vp, priv);
2266
2267 if (flags & CL_PAGEOUT) {
2268 u_int i;
2269
2270 /*
2271 * since blocks are in offsets of CLUSTER_IO_BLOCK_SIZE, scale
2272 * iteration to (PAGE_SIZE * pg_count) of blks.
2273 */
2274 for (i = 0; i < (PAGE_SIZE * pg_count) / CLUSTER_IO_BLOCK_SIZE; i++) {
2275 if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY) {
2276 panic("BUSY bp found in cluster_io");
2277 }
2278 }
2279 }
2280 if (flags & CL_ASYNC) {
2281 if (buf_setcallback(cbp, (void *)cluster_iodone, callback_arg)) {
2282 panic("buf_setcallback failed");
2283 }
2284 }
2285 cbp->b_cliodone = (void *)callback;
2286 cbp->b_flags |= io_flags;
2287 if (flags & CL_NOCACHE) {
2288 cbp->b_attr.ba_flags |= BA_NOCACHE;
2289 }
2290 if (verify_block_size) {
2291 cbp->b_attr.ba_flags |= BA_WILL_VERIFY;
2292 if (verify_kind) {
2293 cbp->b_attr.ba_verify_type = verify_kind;
2294 }
2295 }
2296
2297 cbp->b_lblkno = lblkno;
2298 cbp->b_clfoffset = f_offset;
2299 cbp->b_blkno = blkno;
2300 cbp->b_bcount = io_size;
2301
2302 if (buf_setupl(cbp, upl, (uint32_t)upl_offset)) {
2303 panic("buf_setupl failed");
2304 }
2305 #if CONFIG_IOSCHED
2306 upl_set_blkno(upl, upl_offset, io_size, blkno);
2307 #endif
2308 cbp->b_trans_next = (buf_t)NULL;
2309
2310 if ((cbp->b_iostate = (void *)iostate)) {
2311 /*
2312 * caller wants to track the state of this
2313 * io... bump the amount issued against this stream
2314 */
2315 iostate->io_issued += io_size;
2316 }
2317
2318 if (flags & CL_READ) {
2319 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE,
2320 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
2321 } else {
2322 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE,
2323 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
2324 }
2325
2326 if (cbp_head) {
2327 cbp_tail->b_trans_next = cbp;
2328 cbp_tail = cbp;
2329 } else {
2330 cbp_head = cbp;
2331 cbp_tail = cbp;
2332
2333 if ((cbp_head->b_real_bp = real_bp)) {
2334 real_bp = (buf_t)NULL;
2335 }
2336 }
2337 *(buf_t *)(&cbp->b_trans_head) = cbp_head;
2338
2339 trans_count++;
2340
2341 upl_offset += io_size;
2342 f_offset += io_size;
2343 size -= io_size;
2344 /*
2345 * keep track of how much of the original request
2346 * that we've actually completed... non_rounded_size
2347 * may go negative due to us rounding the request
2348 * to a page size multiple (i.e. size > non_rounded_size)
2349 */
2350 non_rounded_size -= io_size;
2351
2352 if (non_rounded_size <= 0) {
2353 /*
2354 * we've transferred all of the data in the original
2355 * request, but we were unable to complete the tail
2356 * of the last page because the file didn't have
2357 * an allocation to back that portion... this is ok.
2358 */
2359 size = 0;
2360 }
2361 if (size == 0) {
2362 /*
2363 * we have no more I/O to issue, so go
2364 * finish the final transaction
2365 */
2366 need_EOT = TRUE;
2367 } else if (((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) &&
2368 ((flags & CL_ASYNC) || trans_count > max_trans_count)) {
2369 /*
2370 * I/O directed to physically contiguous memory...
2371 * which doesn't have a requirement to make sure we 'fill' a page
2372 * or...
2373 * the current I/O we've prepared fully
2374 * completes the last page in this request
2375 * and ...
2376 * it's either an ASYNC request or
2377 * we've already accumulated more than 8 I/O's into
2378 * this transaction so mark it as complete so that
2379 * it can finish asynchronously or via the cluster_complete_transaction
2380 * below if the request is synchronous
2381 */
2382 need_EOT = TRUE;
2383 }
2384 if (need_EOT == TRUE) {
2385 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0, verify_block_size);
2386 }
2387
2388 if (flags & CL_THROTTLE) {
2389 (void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io");
2390 }
2391
2392 if (!(io_flags & B_READ)) {
2393 vnode_startwrite(vp);
2394 }
2395
2396 if (flags & CL_RAW_ENCRYPTED) {
2397 /*
2398 * User requested raw encrypted bytes.
2399 * Twiddle the bit in the ba_flags for the buffer
2400 */
2401 cbp->b_attr.ba_flags |= BA_RAW_ENCRYPTED_IO;
2402 }
2403
2404 (void) VNOP_STRATEGY(cbp);
2405
2406 if (need_EOT == TRUE) {
2407 if (!(flags & CL_ASYNC)) {
2408 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1);
2409 }
2410
2411 need_EOT = FALSE;
2412 trans_count = 0;
2413 cbp_head = NULL;
2414 }
2415 }
2416 if (error) {
2417 int abort_size;
2418
2419 io_size = 0;
2420
2421 if (cbp_head) {
2422 /*
2423 * Wait until all of the outstanding I/O
2424 * for this partial transaction has completed
2425 */
2426 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
2427
2428 /*
2429 * Rewind the upl offset to the beginning of the
2430 * transaction.
2431 */
2432 upl_offset = cbp_head->b_uploffset;
2433 }
2434
2435 if (ISSET(flags, CL_COMMIT)) {
2436 cluster_handle_associated_upl(iostate, upl,
2437 (upl_offset_t)upl_offset,
2438 (upl_size_t)(upl_end_offset - upl_offset),
2439 cbp_head ? cbp_head->b_clfoffset : f_offset);
2440 }
2441
2442 // Free all the IO buffers in this transaction
2443 for (cbp = cbp_head; cbp;) {
2444 buf_t cbp_next;
2445
2446 size += cbp->b_bcount;
2447 io_size += cbp->b_bcount;
2448
2449 cbp_next = cbp->b_trans_next;
2450 free_io_buf(cbp);
2451 cbp = cbp_next;
2452 }
2453
2454 if (iostate) {
2455 int need_wakeup = 0;
2456
2457 /*
2458 * update the error condition for this stream
2459 * since we never really issued the io
2460 * just go ahead and adjust it back
2461 */
2462 lck_mtx_lock_spin(&iostate->io_mtxp);
2463
2464 if (iostate->io_error == 0) {
2465 iostate->io_error = error;
2466 }
2467 iostate->io_issued -= io_size;
2468
2469 if (iostate->io_wanted) {
2470 /*
2471 * someone is waiting for the state of
2472 * this io stream to change
2473 */
2474 iostate->io_wanted = 0;
2475 need_wakeup = 1;
2476 }
2477 lck_mtx_unlock(&iostate->io_mtxp);
2478
2479 if (need_wakeup) {
2480 wakeup((caddr_t)&iostate->io_wanted);
2481 }
2482 }
2483
2484 if (flags & CL_COMMIT) {
2485 int upl_flags;
2486
2487 pg_offset = upl_offset & PAGE_MASK;
2488 abort_size = (int)((upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK);
2489
2490 upl_flags = cluster_ioerror(upl, (int)(upl_offset - pg_offset),
2491 abort_size, error, io_flags, vp);
2492
2493 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
2494 upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
2495 }
2496 if (retval == 0) {
2497 retval = error;
2498 }
2499 } else if (cbp_head) {
2500 panic("%s(): cbp_head is not NULL.", __FUNCTION__);
2501 }
2502
2503 if (real_bp) {
2504 /*
2505 * can get here if we either encountered an error
2506 * or we completely zero-filled the request and
2507 * no I/O was issued
2508 */
2509 if (error) {
2510 real_bp->b_flags |= B_ERROR;
2511 real_bp->b_error = error;
2512 }
2513 buf_biodone(real_bp);
2514 }
2515 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, (int)f_offset, size, upl_offset, retval, 0);
2516
2517 return retval;
2518 }
2519
2520 #define reset_vector_run_state() \
2521 issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0;
2522
2523 static int
vector_cluster_io(vnode_t vp,upl_t vector_upl,vm_offset_t vector_upl_offset,off_t v_upl_uio_offset,int vector_upl_iosize,int io_flag,buf_t real_bp,struct clios * iostate,int (* callback)(buf_t,void *),void * callback_arg)2524 vector_cluster_io(vnode_t vp, upl_t vector_upl, vm_offset_t vector_upl_offset, off_t v_upl_uio_offset, int vector_upl_iosize,
2525 int io_flag, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
2526 {
2527 vector_upl_set_pagelist(vector_upl);
2528
2529 if (io_flag & CL_READ) {
2530 if (vector_upl_offset == 0 && ((vector_upl_iosize & PAGE_MASK) == 0)) {
2531 io_flag &= ~CL_PRESERVE; /*don't zero fill*/
2532 } else {
2533 io_flag |= CL_PRESERVE; /*zero fill*/
2534 }
2535 }
2536 return cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg);
2537 }
2538
2539 static int
cluster_read_prefetch(vnode_t vp,off_t f_offset,u_int size,off_t filesize,int (* callback)(buf_t,void *),void * callback_arg,int bflag)2540 cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
2541 {
2542 int pages_in_prefetch;
2543
2544 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
2545 (int)f_offset, size, (int)filesize, 0, 0);
2546
2547 if (f_offset >= filesize) {
2548 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
2549 (int)f_offset, 0, 0, 0, 0);
2550 return 0;
2551 }
2552 if ((off_t)size > (filesize - f_offset)) {
2553 size = (u_int)(filesize - f_offset);
2554 }
2555 pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
2556
2557 advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag);
2558
2559 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
2560 (int)f_offset + size, pages_in_prefetch, 0, 1, 0);
2561
2562 return pages_in_prefetch;
2563 }
2564
2565
2566
2567 static void
cluster_read_ahead(vnode_t vp,struct cl_extent * extent,off_t filesize,struct cl_readahead * rap,int (* callback)(buf_t,void *),void * callback_arg,int bflag)2568 cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg,
2569 int bflag)
2570 {
2571 daddr64_t r_addr;
2572 off_t f_offset;
2573 int size_of_prefetch;
2574 u_int max_prefetch;
2575
2576
2577 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
2578 (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0);
2579
2580 if (extent->b_addr == rap->cl_lastr && extent->b_addr == extent->e_addr) {
2581 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2582 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0);
2583 return;
2584 }
2585 if (rap->cl_lastr == -1 || (extent->b_addr != rap->cl_lastr && extent->b_addr != (rap->cl_lastr + 1))) {
2586 rap->cl_ralen = 0;
2587 rap->cl_maxra = 0;
2588
2589 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2590 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0);
2591
2592 return;
2593 }
2594
2595 max_prefetch = cluster_max_prefetch(vp,
2596 cluster_max_io_size(vp->v_mount, CL_READ), speculative_prefetch_max);
2597
2598 if (max_prefetch <= PAGE_SIZE) {
2599 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2600 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 6, 0);
2601 return;
2602 }
2603 if (extent->e_addr < rap->cl_maxra && rap->cl_ralen >= 4) {
2604 if ((rap->cl_maxra - extent->e_addr) > (rap->cl_ralen / 4)) {
2605 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2606 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0);
2607 return;
2608 }
2609 }
2610 r_addr = MAX(extent->e_addr, rap->cl_maxra) + 1;
2611 f_offset = (off_t)(r_addr * PAGE_SIZE_64);
2612
2613 size_of_prefetch = 0;
2614
2615 ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
2616
2617 if (size_of_prefetch) {
2618 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2619 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0);
2620 return;
2621 }
2622 if (f_offset < filesize) {
2623 daddr64_t read_size;
2624
2625 rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1;
2626
2627 read_size = (extent->e_addr + 1) - extent->b_addr;
2628
2629 if (read_size > rap->cl_ralen) {
2630 if (read_size > max_prefetch / PAGE_SIZE) {
2631 rap->cl_ralen = max_prefetch / PAGE_SIZE;
2632 } else {
2633 rap->cl_ralen = (int)read_size;
2634 }
2635 }
2636 size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag);
2637
2638 if (size_of_prefetch) {
2639 rap->cl_maxra = (r_addr + size_of_prefetch) - 1;
2640 }
2641 }
2642 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2643 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0);
2644 }
2645
2646
2647 int
cluster_pageout(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags)2648 cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2649 int size, off_t filesize, int flags)
2650 {
2651 return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
2652 }
2653
2654
2655 int
cluster_pageout_ext(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)2656 cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2657 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2658 {
2659 int io_size;
2660 int rounded_size;
2661 off_t max_size;
2662 int local_flags;
2663
2664 local_flags = CL_PAGEOUT | CL_THROTTLE;
2665
2666 if ((flags & UPL_IOSYNC) == 0) {
2667 local_flags |= CL_ASYNC;
2668 }
2669 if ((flags & UPL_NOCOMMIT) == 0) {
2670 local_flags |= CL_COMMIT;
2671 }
2672 if ((flags & UPL_KEEPCACHED)) {
2673 local_flags |= CL_KEEPCACHED;
2674 }
2675 if (flags & UPL_PAGING_ENCRYPTED) {
2676 local_flags |= CL_ENCRYPTED;
2677 }
2678
2679
2680 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
2681 (int)f_offset, size, (int)filesize, local_flags, 0);
2682
2683 /*
2684 * If they didn't specify any I/O, then we are done...
2685 * we can't issue an abort because we don't know how
2686 * big the upl really is
2687 */
2688 if (size <= 0) {
2689 return EINVAL;
2690 }
2691
2692 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
2693 if (local_flags & CL_COMMIT) {
2694 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
2695 }
2696 return EROFS;
2697 }
2698 /*
2699 * can't page-in from a negative offset
2700 * or if we're starting beyond the EOF
2701 * or if the file offset isn't page aligned
2702 * or the size requested isn't a multiple of PAGE_SIZE
2703 */
2704 if (f_offset < 0 || f_offset >= filesize ||
2705 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) {
2706 if (local_flags & CL_COMMIT) {
2707 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
2708 }
2709 return EINVAL;
2710 }
2711 max_size = filesize - f_offset;
2712
2713 if (size < max_size) {
2714 io_size = size;
2715 } else {
2716 io_size = (int)max_size;
2717 }
2718
2719 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2720
2721 if (size > rounded_size) {
2722 if (local_flags & CL_COMMIT) {
2723 ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
2724 UPL_ABORT_FREE_ON_EMPTY);
2725 }
2726 }
2727 return cluster_io(vp, upl, upl_offset, f_offset, io_size,
2728 local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2729 }
2730
2731
2732 int
cluster_pagein(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags)2733 cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2734 int size, off_t filesize, int flags)
2735 {
2736 return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
2737 }
2738
2739 #define SPLIT_PAGEIN_MAX_IOSIZE 32768
2740
2741 /*
2742 * Do a big pagein request as multiple I/Os - the first I/O will be for
2743 * SPLIT_PAGEIN_MAX_IOSIZE (32K)sized which includes the page that the caused
2744 * the fault and then i/o will be initiated for the remaining.
2745 */
2746 static int
cluster_handle_split_pagein(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,u_int io_size,int rounded_size,int local_flags,int (* callback)(buf_t,void *),void * callback_arg)2747 cluster_handle_split_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2748 u_int io_size, int rounded_size, int local_flags, int (*callback)(buf_t, void *), void *callback_arg)
2749 {
2750 upl_page_info_t *pl = ubc_upl_pageinfo(upl);
2751 const off_t start_f_offset = f_offset;
2752 const upl_offset_t start_upl_offset = upl_offset;
2753 const int start_pg = upl_offset >> PAGE_SHIFT;
2754 const int last_pg = ((upl_offset + rounded_size) >> PAGE_SHIFT) - 1;
2755 u_int split_io_size = SPLIT_PAGEIN_MAX_IOSIZE;
2756 u_int head_io_size = 0;
2757 int retval = 0;
2758 int error = 0;
2759 int pg;
2760
2761 assert(SPLIT_PAGEIN_MAX_IOSIZE >= (2 * PAGE_SIZE));
2762
2763 for (pg = start_pg; (pg <= last_pg) && !(upl_page_is_needed(pl, pg)); pg++) {
2764 ;
2765 }
2766
2767 /*
2768 * The global variables affecting behaviour
2769 * split_all_pgin -> Split pageins even if we don't find the needed page.
2770 * split_pgin_headio -> for a pagein in which there is a head calculated,
2771 * do the head i/o or not.
2772 *
2773 * split_all_pgin_equal -> split the entire bug request into equal sized small i/os of 32K.
2774 *
2775 * Whichever way the i/o is split, the i/o for the needed page always happens first and then we decide
2776 * whether we have to do i/o for the head and then if we need to issue equal sized i/o.
2777 *
2778 * By default we are set up to do only the i/o for the needed page, followed by a "unsplit" tail.
2779 */
2780 if ((pg > start_pg) && (pg <= last_pg)) {
2781 head_io_size = ((pg - start_pg) * PAGE_SIZE);
2782
2783 if (head_io_size < SPLIT_PAGEIN_MAX_IOSIZE) {
2784 head_io_size = 0;
2785 } else if (!split_all_pgin) {
2786 goto out;
2787 } else if ((rounded_size - head_io_size) <= SPLIT_PAGEIN_MAX_IOSIZE) {
2788 head_io_size = (rounded_size - SPLIT_PAGEIN_MAX_IOSIZE);
2789 } else {
2790 head_io_size &= ~(SPLIT_PAGEIN_MAX_IOSIZE - 1);
2791 }
2792
2793 assertf(io_size > head_io_size, "io_size is %d, head_io_size = %d", io_size, head_io_size);
2794
2795 if (head_io_size) {
2796 upl_offset += head_io_size;
2797 f_offset += head_io_size;
2798 io_size -= head_io_size;
2799
2800 if (!split_pgin_headio) {
2801 if (local_flags & CL_COMMIT) {
2802 ubc_upl_abort_range(upl, start_upl_offset, head_io_size,
2803 UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2804 }
2805 head_io_size = 0;
2806 }
2807
2808 split_io_size = MIN(SPLIT_PAGEIN_MAX_IOSIZE, io_size);
2809 }
2810
2811 assertf(io_size >= split_io_size, "io_size is %d, split_io_size = %d", io_size, split_io_size);
2812 } else if ((pg > last_pg) && !split_all_pgin) {
2813 goto out;
2814 }
2815
2816 /* This is the 32K i/o for the "needed" page */
2817 retval = cluster_io(vp, upl, upl_offset, f_offset, split_io_size,
2818 local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2819
2820 io_size -= split_io_size;
2821
2822 if (io_size) {
2823 upl_offset += split_io_size;
2824 f_offset += split_io_size;
2825 } else if (head_io_size) {
2826 io_size = head_io_size;
2827 head_io_size = 0;
2828 upl_offset = start_upl_offset;
2829 f_offset = start_f_offset;
2830 }
2831
2832 while (io_size) {
2833 if (split_all_pgin_equal && (io_size > SPLIT_PAGEIN_MAX_IOSIZE)) {
2834 split_io_size = SPLIT_PAGEIN_MAX_IOSIZE;
2835 } else {
2836 split_io_size = io_size;
2837 }
2838
2839 assertf(io_size >= split_io_size, "io_size is %d, split_io_size = %d", io_size, split_io_size);
2840
2841 /* We have to issue this i/o anyway even if we get an error from any of the previous ones */
2842 error = cluster_io(vp, upl, upl_offset, f_offset, split_io_size,
2843 local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2844 if (!retval) {
2845 retval = error;
2846 }
2847
2848 io_size -= split_io_size;
2849
2850 if ((io_size == 0) && head_io_size) {
2851 io_size = head_io_size;
2852 head_io_size = 0;
2853 upl_offset = start_upl_offset;
2854 f_offset = start_f_offset;
2855 } else if (io_size) {
2856 upl_offset += split_io_size;
2857 f_offset += split_io_size;
2858 }
2859 }
2860
2861 return retval;
2862 out:
2863 return cluster_io(vp, upl, upl_offset, f_offset, io_size,
2864 local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2865 }
2866
2867 int
cluster_pagein_ext(vnode_t vp,upl_t upl,upl_offset_t upl_offset,off_t f_offset,int size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)2868 cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2869 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2870 {
2871 u_int io_size;
2872 int rounded_size;
2873 off_t max_size;
2874 int retval;
2875 int local_flags = 0;
2876
2877 if (upl == NULL || size < 0) {
2878 panic("cluster_pagein: NULL upl passed in");
2879 }
2880
2881 if ((flags & UPL_IOSYNC) == 0) {
2882 local_flags |= CL_ASYNC;
2883 }
2884 if ((flags & UPL_NOCOMMIT) == 0) {
2885 local_flags |= CL_COMMIT;
2886 }
2887 if (flags & UPL_IOSTREAMING) {
2888 local_flags |= CL_IOSTREAMING;
2889 }
2890 if (flags & UPL_PAGING_ENCRYPTED) {
2891 local_flags |= CL_ENCRYPTED;
2892 }
2893
2894
2895 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
2896 (int)f_offset, size, (int)filesize, local_flags, 0);
2897
2898 /*
2899 * can't page-in from a negative offset
2900 * or if we're starting beyond the EOF
2901 * or if the file offset isn't page aligned
2902 * or the size requested isn't a multiple of PAGE_SIZE
2903 */
2904 if (f_offset < 0 || f_offset >= filesize ||
2905 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) {
2906 if (local_flags & CL_COMMIT) {
2907 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2908 }
2909
2910 if (f_offset >= filesize) {
2911 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CLUSTER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CL_PGIN_PAST_EOF), 0 /* arg */);
2912 }
2913
2914 return EINVAL;
2915 }
2916 max_size = filesize - f_offset;
2917
2918 if (size < max_size) {
2919 io_size = size;
2920 } else {
2921 io_size = (int)max_size;
2922 }
2923
2924 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2925
2926 if (size > rounded_size && (local_flags & CL_COMMIT)) {
2927 ubc_upl_abort_range(upl, upl_offset + rounded_size,
2928 size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2929 }
2930
2931 if ((io_size > SPLIT_PAGEIN_MAX_IOSIZE) && vnode_isonssd(vp) && split_pgin) {
2932 return cluster_handle_split_pagein(vp, upl, upl_offset, f_offset, io_size,
2933 rounded_size, local_flags, callback, callback_arg);
2934 }
2935
2936 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
2937 local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2938
2939 return retval;
2940 }
2941
2942
2943 int
cluster_bp(buf_t bp)2944 cluster_bp(buf_t bp)
2945 {
2946 return cluster_bp_ext(bp, NULL, NULL);
2947 }
2948
2949
2950 int
cluster_bp_ext(buf_t bp,int (* callback)(buf_t,void *),void * callback_arg)2951 cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg)
2952 {
2953 off_t f_offset;
2954 int flags;
2955
2956 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
2957 bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
2958
2959 if (bp->b_flags & B_READ) {
2960 flags = CL_ASYNC | CL_READ;
2961 } else {
2962 flags = CL_ASYNC;
2963 }
2964 if (bp->b_flags & B_PASSIVE) {
2965 flags |= CL_PASSIVE;
2966 }
2967
2968 f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
2969
2970 return cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg);
2971 }
2972
2973
2974
2975 int
cluster_write(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int xflags)2976 cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags)
2977 {
2978 return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL);
2979 }
2980
2981
2982 int
cluster_write_ext(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int xflags,int (* callback)(buf_t,void *),void * callback_arg)2983 cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff,
2984 int xflags, int (*callback)(buf_t, void *), void *callback_arg)
2985 {
2986 user_ssize_t cur_resid;
2987 int retval = 0;
2988 int flags;
2989 int zflags;
2990 int bflag;
2991 int write_type = IO_COPY;
2992 u_int32_t write_length = 0, saved_write_length;
2993 uint32_t min_direct_size = MIN_DIRECT_WRITE_SIZE;
2994
2995 flags = xflags;
2996
2997 if (flags & IO_PASSIVE) {
2998 bflag = CL_PASSIVE;
2999 } else {
3000 bflag = 0;
3001 }
3002
3003 if (vp->v_flag & VNOCACHE_DATA) {
3004 flags |= IO_NOCACHE;
3005 bflag |= CL_NOCACHE;
3006 }
3007 if (uio == NULL) {
3008 /*
3009 * no user data...
3010 * this call is being made to zero-fill some range in the file
3011 */
3012 retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
3013
3014 return retval;
3015 }
3016 /*
3017 * do a write through the cache if one of the following is true....
3018 * NOCACHE is not true or NODIRECT is true
3019 * the uio request doesn't target USERSPACE
3020 * otherwise, find out if we want the direct or contig variant for
3021 * the first vector in the uio request
3022 */
3023 if (((flags & (IO_NOCACHE | IO_NODIRECT)) == IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
3024 if (flags & IO_NOCACHE_SWRITE) {
3025 uint32_t fs_bsize = vp->v_mount->mnt_vfsstat.f_bsize;
3026
3027 if (fs_bsize && (fs_bsize < MIN_DIRECT_WRITE_SIZE) &&
3028 ((fs_bsize & (fs_bsize - 1)) == 0)) {
3029 min_direct_size = fs_bsize;
3030 }
3031 }
3032 retval = cluster_io_type(uio, &write_type, &write_length, min_direct_size);
3033 }
3034
3035 if ((flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT) {
3036 /*
3037 * must go through the cached variant in this case
3038 */
3039 write_type = IO_COPY;
3040 }
3041
3042 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < newEOF && retval == 0) {
3043 switch (write_type) {
3044 case IO_COPY:
3045 /*
3046 * make sure the uio_resid isn't too big...
3047 * internally, we want to handle all of the I/O in
3048 * chunk sizes that fit in a 32 bit int
3049 */
3050 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
3051 /*
3052 * we're going to have to call cluster_write_copy
3053 * more than once...
3054 *
3055 * only want the last call to cluster_write_copy to
3056 * have the IO_TAILZEROFILL flag set and only the
3057 * first call should have IO_HEADZEROFILL
3058 */
3059 zflags = flags & ~IO_TAILZEROFILL;
3060 flags &= ~IO_HEADZEROFILL;
3061
3062 write_length = MAX_IO_REQUEST_SIZE;
3063 } else {
3064 /*
3065 * last call to cluster_write_copy
3066 */
3067 zflags = flags;
3068
3069 write_length = (u_int32_t)cur_resid;
3070 }
3071 retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg);
3072 break;
3073
3074 case IO_CONTIG:
3075 zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL);
3076
3077 if (flags & IO_HEADZEROFILL) {
3078 /*
3079 * only do this once per request
3080 */
3081 flags &= ~IO_HEADZEROFILL;
3082
3083 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset,
3084 headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
3085 if (retval) {
3086 break;
3087 }
3088 }
3089 retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag);
3090
3091 if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) {
3092 /*
3093 * we're done with the data from the user specified buffer(s)
3094 * and we've been requested to zero fill at the tail
3095 * treat this as an IO_HEADZEROFILL which doesn't require a uio
3096 * by rearranging the args and passing in IO_HEADZEROFILL
3097 */
3098
3099 /*
3100 * Update the oldEOF to reflect the current EOF. If the UPL page
3101 * to zero-fill is not valid (when F_NOCACHE is set), the
3102 * cluster_write_copy() will perform RMW on the UPL page when
3103 * the oldEOF is not aligned on page boundary due to unaligned
3104 * write.
3105 */
3106 if (uio->uio_offset > oldEOF) {
3107 oldEOF = uio->uio_offset;
3108 }
3109 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)oldEOF, tailOff, uio->uio_offset,
3110 (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
3111 }
3112 break;
3113
3114 case IO_DIRECT:
3115 /*
3116 * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
3117 */
3118 saved_write_length = write_length;
3119 retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg, min_direct_size);
3120 if (retval == ENOTSUP) {
3121 /* direct I/O didn't work; retry with cached I/O */
3122 // printf("******* FBDP %s:%d ENOTSUP cnt %d resid 0x%llx offset 0x%llx write_length 0x%x -> 0x%x\n", __FUNCTION__, __LINE__, uio_iovcnt(uio), (uint64_t) uio_resid(uio), uio_offset(uio), write_length, saved_write_length);
3123 write_length = saved_write_length;
3124 write_type = IO_COPY;
3125 retval = 0;
3126 }
3127 break;
3128
3129 case IO_UNKNOWN:
3130 retval = cluster_io_type(uio, &write_type, &write_length, min_direct_size);
3131 break;
3132 }
3133 /*
3134 * in case we end up calling cluster_write_copy (from cluster_write_direct)
3135 * multiple times to service a multi-vector request that is not aligned properly
3136 * we need to update the oldEOF so that we
3137 * don't zero-fill the head of a page if we've successfully written
3138 * data to that area... 'cluster_write_copy' will zero-fill the head of a
3139 * page that is beyond the oldEOF if the write is unaligned... we only
3140 * want that to happen for the very first page of the cluster_write,
3141 * NOT the first page of each vector making up a multi-vector write.
3142 */
3143 if (uio->uio_offset > oldEOF) {
3144 oldEOF = uio->uio_offset;
3145 }
3146 }
3147 return retval;
3148 }
3149
3150
3151 static int
cluster_write_direct(vnode_t vp,struct uio * uio,off_t oldEOF,off_t newEOF,int * write_type,u_int32_t * write_length,int flags,int (* callback)(buf_t,void *),void * callback_arg,uint32_t min_io_size)3152 cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
3153 int flags, int (*callback)(buf_t, void *), void *callback_arg, uint32_t min_io_size)
3154 {
3155 upl_t upl = NULL;
3156 upl_page_info_t *pl;
3157 vm_offset_t upl_offset;
3158 vm_offset_t vector_upl_offset = 0;
3159 u_int32_t io_req_size;
3160 u_int32_t offset_in_file;
3161 u_int32_t offset_in_iovbase;
3162 u_int32_t io_size;
3163 int io_flag = 0;
3164 upl_size_t upl_size = 0, vector_upl_size = 0;
3165 vm_size_t upl_needed_size;
3166 mach_msg_type_number_t pages_in_pl = 0;
3167 upl_control_flags_t upl_flags;
3168 kern_return_t kret = KERN_SUCCESS;
3169 mach_msg_type_number_t i = 0;
3170 int force_data_sync;
3171 int retval = 0;
3172 int first_IO = 1;
3173 struct clios iostate;
3174 user_addr_t iov_base;
3175 u_int32_t mem_alignment_mask;
3176 u_int32_t devblocksize;
3177 u_int32_t max_io_size;
3178 u_int32_t max_upl_size;
3179 u_int32_t max_vector_size;
3180 u_int32_t bytes_outstanding_limit;
3181 boolean_t io_throttled = FALSE;
3182
3183 u_int32_t vector_upl_iosize = 0;
3184 int issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1);
3185 off_t v_upl_uio_offset = 0;
3186 int vector_upl_index = 0;
3187 upl_t vector_upl = NULL;
3188 uio_t snapshot_uio = NULL;
3189
3190 uint32_t io_align_mask;
3191
3192 /*
3193 * When we enter this routine, we know
3194 * -- the resid will not exceed iov_len
3195 */
3196 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
3197 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
3198
3199 assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT);
3200
3201 max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
3202
3203 io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO;
3204
3205 if (flags & IO_PASSIVE) {
3206 io_flag |= CL_PASSIVE;
3207 }
3208
3209 if (flags & IO_NOCACHE) {
3210 io_flag |= CL_NOCACHE;
3211 }
3212
3213 if (flags & IO_SKIP_ENCRYPTION) {
3214 io_flag |= CL_ENCRYPTED;
3215 }
3216
3217 iostate.io_completed = 0;
3218 iostate.io_issued = 0;
3219 iostate.io_error = 0;
3220 iostate.io_wanted = 0;
3221
3222 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
3223
3224 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
3225 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
3226
3227 if (devblocksize == 1) {
3228 /*
3229 * the AFP client advertises a devblocksize of 1
3230 * however, its BLOCKMAP routine maps to physical
3231 * blocks that are PAGE_SIZE in size...
3232 * therefore we can't ask for I/Os that aren't page aligned
3233 * or aren't multiples of PAGE_SIZE in size
3234 * by setting devblocksize to PAGE_SIZE, we re-instate
3235 * the old behavior we had before the mem_alignment_mask
3236 * changes went in...
3237 */
3238 devblocksize = PAGE_SIZE;
3239 }
3240
3241 io_align_mask = PAGE_MASK;
3242 if (min_io_size < MIN_DIRECT_WRITE_SIZE) {
3243 /* The process has opted into fs blocksize direct io writes */
3244 assert((min_io_size & (min_io_size - 1)) == 0);
3245 io_align_mask = min_io_size - 1;
3246 io_flag |= CL_DIRECT_IO_FSBLKSZ;
3247 }
3248
3249 if (uio_iovcnt(uio) > 1) {
3250 /* vector uio -> take a snapshot so we can rollback if needed */
3251 if (snapshot_uio) {
3252 uio_free(snapshot_uio);
3253 snapshot_uio = NULL;
3254 }
3255 snapshot_uio = uio_duplicate(uio);
3256 }
3257
3258 next_dwrite:
3259 io_req_size = *write_length;
3260 iov_base = uio_curriovbase(uio);
3261
3262 offset_in_file = (u_int32_t)(uio->uio_offset & io_align_mask);
3263 offset_in_iovbase = (u_int32_t)(iov_base & mem_alignment_mask);
3264
3265 if (offset_in_file || offset_in_iovbase) {
3266 /*
3267 * one of the 2 important offsets is misaligned
3268 * so fire an I/O through the cache for this entire vector
3269 */
3270 goto wait_for_dwrites;
3271 }
3272 if (iov_base & (devblocksize - 1)) {
3273 /*
3274 * the offset in memory must be on a device block boundary
3275 * so that we can guarantee that we can generate an
3276 * I/O that ends on a page boundary in cluster_io
3277 */
3278 goto wait_for_dwrites;
3279 }
3280
3281 task_update_logical_writes(current_task(), (io_req_size & ~PAGE_MASK), TASK_WRITE_IMMEDIATE, vp);
3282 while ((io_req_size >= PAGE_SIZE || io_req_size >= min_io_size) && uio->uio_offset < newEOF && retval == 0) {
3283 int throttle_type;
3284
3285 if ((throttle_type = cluster_is_throttled(vp))) {
3286 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
3287
3288 /*
3289 * we're in the throttle window, at the very least
3290 * we want to limit the size of the I/O we're about
3291 * to issue
3292 */
3293 if ((flags & IO_RETURN_ON_THROTTLE) && throttle_type == THROTTLE_NOW) {
3294 /*
3295 * we're in the throttle window and at least 1 I/O
3296 * has already been issued by a throttleable thread
3297 * in this window, so return with EAGAIN to indicate
3298 * to the FS issuing the cluster_write call that it
3299 * should now throttle after dropping any locks
3300 */
3301 throttle_info_update_by_mount(vp->v_mount);
3302
3303 io_throttled = TRUE;
3304 goto wait_for_dwrites;
3305 }
3306 max_vector_size = max_throttle_size;
3307 max_io_size = max_throttle_size;
3308 } else {
3309 max_vector_size = MAX_VECTOR_UPL_SIZE;
3310 max_io_size = max_upl_size;
3311 }
3312
3313 if (first_IO) {
3314 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
3315 first_IO = 0;
3316 }
3317 io_size = io_req_size & ~io_align_mask;
3318 iov_base = uio_curriovbase(uio);
3319
3320 if (io_size > max_io_size) {
3321 io_size = max_io_size;
3322 }
3323
3324 if (useVectorUPL && (iov_base & PAGE_MASK)) {
3325 /*
3326 * We have an iov_base that's not page-aligned.
3327 * Issue all I/O's that have been collected within
3328 * this Vectored UPL.
3329 */
3330 if (vector_upl_index) {
3331 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3332 if (retval == ENOTSUP) {
3333 goto enotsup;
3334 }
3335 reset_vector_run_state();
3336 }
3337
3338 /*
3339 * After this point, if we are using the Vector UPL path and the base is
3340 * not page-aligned then the UPL with that base will be the first in the vector UPL.
3341 */
3342 }
3343
3344 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
3345 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
3346
3347 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
3348 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
3349
3350 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
3351 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
3352 pages_in_pl = 0;
3353 upl_size = (upl_size_t)upl_needed_size;
3354 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
3355 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
3356
3357 kret = vm_map_get_upl(map,
3358 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
3359 vm_memtag_canonicalize(map, (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK))),
3360 #else /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
3361 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
3362 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
3363 &upl_size,
3364 &upl,
3365 NULL,
3366 &pages_in_pl,
3367 &upl_flags,
3368 VM_KERN_MEMORY_FILE,
3369 force_data_sync);
3370
3371 if (kret != KERN_SUCCESS) {
3372 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3373 0, 0, 0, kret, 0);
3374 /*
3375 * failed to get pagelist
3376 *
3377 * we may have already spun some portion of this request
3378 * off as async requests... we need to wait for the I/O
3379 * to complete before returning
3380 */
3381 goto wait_for_dwrites;
3382 }
3383 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
3384 pages_in_pl = upl_size / PAGE_SIZE;
3385
3386 for (i = 0; i < pages_in_pl; i++) {
3387 if (!upl_valid_page(pl, i)) {
3388 break;
3389 }
3390 }
3391 if (i == pages_in_pl) {
3392 break;
3393 }
3394
3395 /*
3396 * didn't get all the pages back that we
3397 * needed... release this upl and try again
3398 */
3399 ubc_upl_abort(upl, 0);
3400 }
3401 if (force_data_sync >= 3) {
3402 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3403 i, pages_in_pl, upl_size, kret, 0);
3404 /*
3405 * for some reason, we couldn't acquire a hold on all
3406 * the pages needed in the user's address space
3407 *
3408 * we may have already spun some portion of this request
3409 * off as async requests... we need to wait for the I/O
3410 * to complete before returning
3411 */
3412 goto wait_for_dwrites;
3413 }
3414
3415 /*
3416 * Consider the possibility that upl_size wasn't satisfied.
3417 */
3418 if (upl_size < upl_needed_size) {
3419 if (upl_size && upl_offset == 0) {
3420 io_size = upl_size;
3421 } else {
3422 io_size = 0;
3423 }
3424 }
3425 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
3426 (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
3427
3428 if (io_size == 0) {
3429 ubc_upl_abort(upl, 0);
3430 upl = NULL;
3431 /*
3432 * we may have already spun some portion of this request
3433 * off as async requests... we need to wait for the I/O
3434 * to complete before returning
3435 */
3436 goto wait_for_dwrites;
3437 }
3438
3439 if (useVectorUPL) {
3440 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
3441 if (end_off) {
3442 issueVectorUPL = 1;
3443 }
3444 /*
3445 * After this point, if we are using a vector UPL, then
3446 * either all the UPL elements end on a page boundary OR
3447 * this UPL is the last element because it does not end
3448 * on a page boundary.
3449 */
3450 }
3451
3452 /*
3453 * we want push out these writes asynchronously so that we can overlap
3454 * the preparation of the next I/O
3455 * if there are already too many outstanding writes
3456 * wait until some complete before issuing the next
3457 */
3458 if (vp->v_mount->mnt_minsaturationbytecount) {
3459 bytes_outstanding_limit = vp->v_mount->mnt_minsaturationbytecount;
3460 } else {
3461 if (__improbable(os_mul_overflow(max_upl_size, IO_SCALE(vp, 2),
3462 &bytes_outstanding_limit) ||
3463 (bytes_outstanding_limit > overlapping_write_max))) {
3464 bytes_outstanding_limit = overlapping_write_max;
3465 }
3466 }
3467
3468 cluster_iostate_wait(&iostate, bytes_outstanding_limit, "cluster_write_direct");
3469
3470 if (iostate.io_error) {
3471 /*
3472 * one of the earlier writes we issued ran into a hard error
3473 * don't issue any more writes, cleanup the UPL
3474 * that was just created but not used, then
3475 * go wait for all writes that are part of this stream
3476 * to complete before returning the error to the caller
3477 */
3478 ubc_upl_abort(upl, 0);
3479 upl = NULL;
3480
3481 goto wait_for_dwrites;
3482 }
3483
3484 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
3485 (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
3486
3487 if (!useVectorUPL) {
3488 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
3489 io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3490 } else {
3491 if (!vector_upl_index) {
3492 vector_upl = vector_upl_create(upl_offset, uio->uio_iovcnt);
3493 v_upl_uio_offset = uio->uio_offset;
3494 vector_upl_offset = upl_offset;
3495 }
3496
3497 vector_upl_set_subupl(vector_upl, upl, upl_size);
3498 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
3499 vector_upl_index++;
3500 vector_upl_iosize += io_size;
3501 vector_upl_size += upl_size;
3502
3503 if (issueVectorUPL || vector_upl_index == vector_upl_max_upls(vector_upl) || vector_upl_size >= max_vector_size) {
3504 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3505 if (retval != ENOTSUP) {
3506 reset_vector_run_state();
3507 }
3508 }
3509 }
3510 if (retval == ENOTSUP) {
3511 enotsup:
3512 /*
3513 * Can't do direct I/O. Try again with cached I/O.
3514 */
3515 // printf("******* FBDP %s:%d ENOTSUP io_size 0%x resid 0x%llx\n", __FUNCTION__, __LINE__, io_size, uio_resid(uio));
3516 io_size = 0;
3517 if (snapshot_uio) {
3518 int restore_error;
3519
3520 /*
3521 * We've been collecting UPLs for this vector UPL and
3522 * moving the uio along. We need to undo that so that
3523 * the I/O can continue where it actually stopped...
3524 */
3525 restore_error = uio_restore(uio, snapshot_uio);
3526 assert(!restore_error);
3527 uio_free(snapshot_uio);
3528 snapshot_uio = NULL;
3529 }
3530 if (vector_upl_index) {
3531 ubc_upl_abort(vector_upl, 0);
3532 vector_upl = NULL;
3533 } else {
3534 ubc_upl_abort(upl, 0);
3535 upl = NULL;
3536 }
3537 goto wait_for_dwrites;
3538 }
3539
3540 /*
3541 * update the uio structure to
3542 * reflect the I/O that we just issued
3543 */
3544 uio_update(uio, (user_size_t)io_size);
3545
3546 /*
3547 * in case we end up calling through to cluster_write_copy to finish
3548 * the tail of this request, we need to update the oldEOF so that we
3549 * don't zero-fill the head of a page if we've successfully written
3550 * data to that area... 'cluster_write_copy' will zero-fill the head of a
3551 * page that is beyond the oldEOF if the write is unaligned... we only
3552 * want that to happen for the very first page of the cluster_write,
3553 * NOT the first page of each vector making up a multi-vector write.
3554 */
3555 if (uio->uio_offset > oldEOF) {
3556 oldEOF = uio->uio_offset;
3557 }
3558
3559 io_req_size -= io_size;
3560
3561 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
3562 (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0);
3563 } /* end while */
3564
3565 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) {
3566 retval = cluster_io_type(uio, write_type, write_length, min_io_size);
3567
3568 if (retval == 0 && *write_type == IO_DIRECT) {
3569 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE,
3570 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
3571
3572 goto next_dwrite;
3573 }
3574 }
3575
3576 wait_for_dwrites:
3577
3578 if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
3579 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3580 reset_vector_run_state();
3581 }
3582 /*
3583 * make sure all async writes issued as part of this stream
3584 * have completed before we return
3585 */
3586 cluster_iostate_wait(&iostate, 0, "cluster_write_direct");
3587
3588 if (iostate.io_error) {
3589 retval = iostate.io_error;
3590 }
3591
3592 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
3593
3594 if (io_throttled == TRUE && retval == 0) {
3595 retval = EAGAIN;
3596 }
3597
3598 if (io_req_size && retval == 0) {
3599 /*
3600 * we couldn't handle the tail of this request in DIRECT mode
3601 * so fire it through the copy path
3602 *
3603 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
3604 * so we can just pass 0 in for the headOff and tailOff
3605 */
3606 if (uio->uio_offset > oldEOF) {
3607 oldEOF = uio->uio_offset;
3608 }
3609
3610 retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
3611
3612 *write_type = IO_UNKNOWN;
3613 }
3614
3615 if (snapshot_uio) {
3616 uio_free(snapshot_uio);
3617 snapshot_uio = NULL;
3618 }
3619
3620 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
3621 (int)uio->uio_offset, io_req_size, retval, 4, 0);
3622
3623 return retval;
3624 }
3625
3626
3627 static int
cluster_write_contig(vnode_t vp,struct uio * uio,off_t newEOF,int * write_type,u_int32_t * write_length,int (* callback)(buf_t,void *),void * callback_arg,int bflag)3628 cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length,
3629 int (*callback)(buf_t, void *), void *callback_arg, int bflag)
3630 {
3631 upl_page_info_t *pl;
3632 addr64_t src_paddr = 0;
3633 upl_t upl[MAX_VECTS];
3634 vm_offset_t upl_offset;
3635 u_int32_t tail_size = 0;
3636 u_int32_t io_size;
3637 u_int32_t xsize;
3638 upl_size_t upl_size;
3639 vm_size_t upl_needed_size;
3640 mach_msg_type_number_t pages_in_pl;
3641 upl_control_flags_t upl_flags;
3642 kern_return_t kret;
3643 struct clios iostate;
3644 int error = 0;
3645 int cur_upl = 0;
3646 int num_upl = 0;
3647 int n;
3648 user_addr_t iov_base;
3649 u_int32_t devblocksize;
3650 u_int32_t mem_alignment_mask;
3651
3652 /*
3653 * When we enter this routine, we know
3654 * -- the io_req_size will not exceed iov_len
3655 * -- the target address is physically contiguous
3656 */
3657 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
3658
3659 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
3660 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
3661
3662 iostate.io_completed = 0;
3663 iostate.io_issued = 0;
3664 iostate.io_error = 0;
3665 iostate.io_wanted = 0;
3666
3667 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
3668
3669 next_cwrite:
3670 io_size = *write_length;
3671
3672 iov_base = uio_curriovbase(uio);
3673
3674 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
3675 upl_needed_size = upl_offset + io_size;
3676
3677 pages_in_pl = 0;
3678 upl_size = (upl_size_t)upl_needed_size;
3679 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
3680 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
3681
3682 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
3683 kret = vm_map_get_upl(map,
3684 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
3685 vm_memtag_canonicalize(map, vm_map_trunc_page(iov_base, vm_map_page_mask(map))),
3686 #else /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
3687 vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
3688 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
3689 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0);
3690
3691 if (kret != KERN_SUCCESS) {
3692 /*
3693 * failed to get pagelist
3694 */
3695 error = EINVAL;
3696 goto wait_for_cwrites;
3697 }
3698 num_upl++;
3699
3700 if (!(upl_flags & UPL_PHYS_CONTIG)) {
3701 /*
3702 * The created UPL needs to have the UPL_PHYS_CONTIG flag.
3703 */
3704 error = EINVAL;
3705 goto wait_for_cwrites;
3706 }
3707
3708 /*
3709 * Consider the possibility that upl_size wasn't satisfied.
3710 */
3711 if (upl_size < upl_needed_size) {
3712 /*
3713 * This is a failure in the physical memory case.
3714 */
3715 error = EINVAL;
3716 goto wait_for_cwrites;
3717 }
3718 pl = ubc_upl_pageinfo(upl[cur_upl]);
3719
3720 src_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
3721
3722 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
3723 u_int32_t head_size;
3724
3725 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
3726
3727 if (head_size > io_size) {
3728 head_size = io_size;
3729 }
3730
3731 error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg);
3732
3733 if (error) {
3734 goto wait_for_cwrites;
3735 }
3736
3737 upl_offset += head_size;
3738 src_paddr += head_size;
3739 io_size -= head_size;
3740
3741 iov_base += head_size;
3742 }
3743 if ((u_int32_t)iov_base & mem_alignment_mask) {
3744 /*
3745 * request doesn't set up on a memory boundary
3746 * the underlying DMA engine can handle...
3747 * return an error instead of going through
3748 * the slow copy path since the intent of this
3749 * path is direct I/O from device memory
3750 */
3751 error = EINVAL;
3752 goto wait_for_cwrites;
3753 }
3754
3755 tail_size = io_size & (devblocksize - 1);
3756 io_size -= tail_size;
3757
3758 while (io_size && error == 0) {
3759 if (io_size > MAX_IO_CONTIG_SIZE) {
3760 xsize = MAX_IO_CONTIG_SIZE;
3761 } else {
3762 xsize = io_size;
3763 }
3764 /*
3765 * request asynchronously so that we can overlap
3766 * the preparation of the next I/O... we'll do
3767 * the commit after all the I/O has completed
3768 * since its all issued against the same UPL
3769 * if there are already too many outstanding writes
3770 * wait until some have completed before issuing the next
3771 */
3772 cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_write_contig");
3773
3774 if (iostate.io_error) {
3775 /*
3776 * one of the earlier writes we issued ran into a hard error
3777 * don't issue any more writes...
3778 * go wait for all writes that are part of this stream
3779 * to complete before returning the error to the caller
3780 */
3781 goto wait_for_cwrites;
3782 }
3783 /*
3784 * issue an asynchronous write to cluster_io
3785 */
3786 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
3787 xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg);
3788
3789 if (error == 0) {
3790 /*
3791 * The cluster_io write completed successfully,
3792 * update the uio structure
3793 */
3794 uio_update(uio, (user_size_t)xsize);
3795
3796 upl_offset += xsize;
3797 src_paddr += xsize;
3798 io_size -= xsize;
3799 }
3800 }
3801 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) {
3802 error = cluster_io_type(uio, write_type, write_length, 0);
3803
3804 if (error == 0 && *write_type == IO_CONTIG) {
3805 cur_upl++;
3806 goto next_cwrite;
3807 }
3808 } else {
3809 *write_type = IO_UNKNOWN;
3810 }
3811
3812 wait_for_cwrites:
3813 /*
3814 * make sure all async writes that are part of this stream
3815 * have completed before we proceed
3816 */
3817 cluster_iostate_wait(&iostate, 0, "cluster_write_contig");
3818
3819 if (iostate.io_error) {
3820 error = iostate.io_error;
3821 }
3822
3823 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
3824
3825 if (error == 0 && tail_size) {
3826 error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg);
3827 }
3828
3829 for (n = 0; n < num_upl; n++) {
3830 /*
3831 * just release our hold on each physically contiguous
3832 * region without changing any state
3833 */
3834 ubc_upl_abort(upl[n], 0);
3835 }
3836
3837 return error;
3838 }
3839
3840
3841 /*
3842 * need to avoid a race between an msync of a range of pages dirtied via mmap
3843 * vs a filesystem such as HFS deciding to write a 'hole' to disk via cluster_write's
3844 * zerofill mechanism before it has seen the VNOP_PAGEOUTs for the pages being msync'd
3845 *
3846 * we should never force-zero-fill pages that are already valid in the cache...
3847 * the entire page contains valid data (either from disk, zero-filled or dirtied
3848 * via an mmap) so we can only do damage by trying to zero-fill
3849 *
3850 */
3851 static int
cluster_zero_range(upl_t upl,upl_page_info_t * pl,int flags,int io_offset,off_t zero_off,off_t upl_f_offset,int bytes_to_zero)3852 cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off_t zero_off, off_t upl_f_offset, int bytes_to_zero)
3853 {
3854 int zero_pg_index;
3855 boolean_t need_cluster_zero = TRUE;
3856
3857 if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
3858 bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64));
3859 zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
3860
3861 if (upl_valid_page(pl, zero_pg_index)) {
3862 /*
3863 * never force zero valid pages - dirty or clean
3864 * we'll leave these in the UPL for cluster_write_copy to deal with
3865 */
3866 need_cluster_zero = FALSE;
3867 }
3868 }
3869 if (need_cluster_zero == TRUE) {
3870 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
3871 }
3872
3873 return bytes_to_zero;
3874 }
3875
3876
3877 void
cluster_update_state(vnode_t vp,vm_object_offset_t s_offset,vm_object_offset_t e_offset,boolean_t vm_initiated)3878 cluster_update_state(vnode_t vp, vm_object_offset_t s_offset, vm_object_offset_t e_offset, boolean_t vm_initiated)
3879 {
3880 struct cl_extent cl;
3881 boolean_t first_pass = TRUE;
3882
3883 assert(s_offset < e_offset);
3884 assert((s_offset & PAGE_MASK_64) == 0);
3885 assert((e_offset & PAGE_MASK_64) == 0);
3886
3887 cl.b_addr = (daddr64_t)(s_offset / PAGE_SIZE_64);
3888 cl.e_addr = (daddr64_t)(e_offset / PAGE_SIZE_64);
3889
3890 cluster_update_state_internal(vp, &cl, 0, TRUE, &first_pass, s_offset, (int)(e_offset - s_offset),
3891 vp->v_un.vu_ubcinfo->ui_size, NULL, NULL, vm_initiated);
3892 }
3893
3894
3895 static void
cluster_update_state_internal(vnode_t vp,struct cl_extent * cl,int flags,boolean_t defer_writes,boolean_t * first_pass,off_t write_off,int write_cnt,off_t newEOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)3896 cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes,
3897 boolean_t *first_pass, off_t write_off, int write_cnt, off_t newEOF,
3898 int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
3899 {
3900 struct cl_writebehind *wbp;
3901 int cl_index;
3902 int ret_cluster_try_push;
3903 u_int max_cluster_pgcount;
3904
3905
3906 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
3907
3908 /*
3909 * take the lock to protect our accesses
3910 * of the writebehind and sparse cluster state
3911 */
3912 wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
3913
3914 if (wbp->cl_scmap) {
3915 if (!(flags & IO_NOCACHE)) {
3916 /*
3917 * we've fallen into the sparse
3918 * cluster method of delaying dirty pages
3919 */
3920 sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated);
3921
3922 lck_mtx_unlock(&wbp->cl_lockw);
3923 return;
3924 }
3925 /*
3926 * must have done cached writes that fell into
3927 * the sparse cluster mechanism... we've switched
3928 * to uncached writes on the file, so go ahead
3929 * and push whatever's in the sparse map
3930 * and switch back to normal clustering
3931 */
3932 wbp->cl_number = 0;
3933
3934 sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, newEOF, PUSH_ALL, 0, callback, callback_arg, vm_initiated);
3935 /*
3936 * no clusters of either type present at this point
3937 * so just go directly to start_new_cluster since
3938 * we know we need to delay this I/O since we've
3939 * already released the pages back into the cache
3940 * to avoid the deadlock with sparse_cluster_push
3941 */
3942 goto start_new_cluster;
3943 }
3944 if (*first_pass == TRUE) {
3945 if (write_off == wbp->cl_last_write) {
3946 wbp->cl_seq_written += write_cnt;
3947 } else {
3948 wbp->cl_seq_written = write_cnt;
3949 }
3950
3951 wbp->cl_last_write = write_off + write_cnt;
3952
3953 *first_pass = FALSE;
3954 }
3955 if (wbp->cl_number == 0) {
3956 /*
3957 * no clusters currently present
3958 */
3959 goto start_new_cluster;
3960 }
3961
3962 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
3963 /*
3964 * check each cluster that we currently hold
3965 * try to merge some or all of this write into
3966 * one or more of the existing clusters... if
3967 * any portion of the write remains, start a
3968 * new cluster
3969 */
3970 if (cl->b_addr >= wbp->cl_clusters[cl_index].b_addr) {
3971 /*
3972 * the current write starts at or after the current cluster
3973 */
3974 if (cl->e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3975 /*
3976 * we have a write that fits entirely
3977 * within the existing cluster limits
3978 */
3979 if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) {
3980 /*
3981 * update our idea of where the cluster ends
3982 */
3983 wbp->cl_clusters[cl_index].e_addr = cl->e_addr;
3984 }
3985 break;
3986 }
3987 if (cl->b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3988 /*
3989 * we have a write that starts in the middle of the current cluster
3990 * but extends beyond the cluster's limit... we know this because
3991 * of the previous checks
3992 * we'll extend the current cluster to the max
3993 * and update the b_addr for the current write to reflect that
3994 * the head of it was absorbed into this cluster...
3995 * note that we'll always have a leftover tail in this case since
3996 * full absorbtion would have occurred in the clause above
3997 */
3998 wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount;
3999
4000 cl->b_addr = wbp->cl_clusters[cl_index].e_addr;
4001 }
4002 /*
4003 * we come here for the case where the current write starts
4004 * beyond the limit of the existing cluster or we have a leftover
4005 * tail after a partial absorbtion
4006 *
4007 * in either case, we'll check the remaining clusters before
4008 * starting a new one
4009 */
4010 } else {
4011 /*
4012 * the current write starts in front of the cluster we're currently considering
4013 */
4014 if ((wbp->cl_clusters[cl_index].e_addr - cl->b_addr) <= max_cluster_pgcount) {
4015 /*
4016 * we can just merge the new request into
4017 * this cluster and leave it in the cache
4018 * since the resulting cluster is still
4019 * less than the maximum allowable size
4020 */
4021 wbp->cl_clusters[cl_index].b_addr = cl->b_addr;
4022
4023 if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) {
4024 /*
4025 * the current write completely
4026 * envelops the existing cluster and since
4027 * each write is limited to at most max_cluster_pgcount pages
4028 * we can just use the start and last blocknos of the write
4029 * to generate the cluster limits
4030 */
4031 wbp->cl_clusters[cl_index].e_addr = cl->e_addr;
4032 }
4033 break;
4034 }
4035 /*
4036 * if we were to combine this write with the current cluster
4037 * we would exceed the cluster size limit.... so,
4038 * let's see if there's any overlap of the new I/O with
4039 * the cluster we're currently considering... in fact, we'll
4040 * stretch the cluster out to it's full limit and see if we
4041 * get an intersection with the current write
4042 *
4043 */
4044 if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) {
4045 /*
4046 * the current write extends into the proposed cluster
4047 * clip the length of the current write after first combining it's
4048 * tail with the newly shaped cluster
4049 */
4050 wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount;
4051
4052 cl->e_addr = wbp->cl_clusters[cl_index].b_addr;
4053 }
4054 /*
4055 * if we get here, there was no way to merge
4056 * any portion of this write with this cluster
4057 * or we could only merge part of it which
4058 * will leave a tail...
4059 * we'll check the remaining clusters before starting a new one
4060 */
4061 }
4062 }
4063 if (cl_index < wbp->cl_number) {
4064 /*
4065 * we found an existing cluster(s) that we
4066 * could entirely merge this I/O into
4067 */
4068 goto delay_io;
4069 }
4070
4071 if (defer_writes == FALSE &&
4072 wbp->cl_number == MAX_CLUSTERS &&
4073 wbp->cl_seq_written >= (MAX_CLUSTERS * (max_cluster_pgcount * PAGE_SIZE))) {
4074 uint32_t n;
4075
4076 if (vp->v_mount->mnt_minsaturationbytecount) {
4077 n = vp->v_mount->mnt_minsaturationbytecount / MAX_CLUSTER_SIZE(vp);
4078
4079 if (n > MAX_CLUSTERS) {
4080 n = MAX_CLUSTERS;
4081 }
4082 } else {
4083 n = 0;
4084 }
4085
4086 if (n == 0) {
4087 if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
4088 n = WRITE_BEHIND_SSD;
4089 } else {
4090 n = WRITE_BEHIND;
4091 }
4092 }
4093 while (n--) {
4094 cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg, NULL, vm_initiated);
4095 }
4096 }
4097 if (wbp->cl_number < MAX_CLUSTERS) {
4098 /*
4099 * we didn't find an existing cluster to
4100 * merge into, but there's room to start
4101 * a new one
4102 */
4103 goto start_new_cluster;
4104 }
4105 /*
4106 * no exisitng cluster to merge with and no
4107 * room to start a new one... we'll try
4108 * pushing one of the existing ones... if none of
4109 * them are able to be pushed, we'll switch
4110 * to the sparse cluster mechanism
4111 * cluster_try_push updates cl_number to the
4112 * number of remaining clusters... and
4113 * returns the number of currently unused clusters
4114 */
4115 ret_cluster_try_push = 0;
4116
4117 /*
4118 * if writes are not deferred, call cluster push immediately
4119 */
4120 if (defer_writes == FALSE) {
4121 ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg, NULL, vm_initiated);
4122 }
4123 /*
4124 * execute following regardless of writes being deferred or not
4125 */
4126 if (ret_cluster_try_push == 0) {
4127 /*
4128 * no more room in the normal cluster mechanism
4129 * so let's switch to the more expansive but expensive
4130 * sparse mechanism....
4131 */
4132 sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg, vm_initiated);
4133 sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated);
4134
4135 lck_mtx_unlock(&wbp->cl_lockw);
4136 return;
4137 }
4138 start_new_cluster:
4139 wbp->cl_clusters[wbp->cl_number].b_addr = cl->b_addr;
4140 wbp->cl_clusters[wbp->cl_number].e_addr = cl->e_addr;
4141
4142 wbp->cl_clusters[wbp->cl_number].io_flags = 0;
4143
4144 if (flags & IO_NOCACHE) {
4145 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE;
4146 }
4147
4148 if (flags & IO_PASSIVE) {
4149 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE;
4150 }
4151
4152 wbp->cl_number++;
4153 delay_io:
4154 lck_mtx_unlock(&wbp->cl_lockw);
4155 return;
4156 }
4157
4158
4159 static int
cluster_write_copy(vnode_t vp,struct uio * uio,u_int32_t io_req_size,off_t oldEOF,off_t newEOF,off_t headOff,off_t tailOff,int flags,int (* callback)(buf_t,void *),void * callback_arg)4160 cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff,
4161 off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg)
4162 {
4163 upl_page_info_t *pl;
4164 upl_t upl;
4165 vm_offset_t upl_offset = 0;
4166 vm_size_t upl_size;
4167 off_t upl_f_offset;
4168 int pages_in_upl;
4169 int start_offset;
4170 int xfer_resid;
4171 int io_size;
4172 int io_offset;
4173 int bytes_to_zero;
4174 int bytes_to_move;
4175 kern_return_t kret;
4176 int retval = 0;
4177 int io_resid;
4178 long long total_size;
4179 long long zero_cnt;
4180 off_t zero_off;
4181 long long zero_cnt1;
4182 off_t zero_off1;
4183 off_t write_off = 0;
4184 int write_cnt = 0;
4185 boolean_t first_pass = FALSE;
4186 struct cl_extent cl;
4187 int bflag;
4188 u_int max_io_size;
4189
4190 if (uio) {
4191 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
4192 (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0);
4193
4194 io_resid = io_req_size;
4195 } else {
4196 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
4197 0, 0, (int)oldEOF, (int)newEOF, 0);
4198
4199 io_resid = 0;
4200 }
4201 if (flags & IO_PASSIVE) {
4202 bflag = CL_PASSIVE;
4203 } else {
4204 bflag = 0;
4205 }
4206 if (flags & IO_NOCACHE) {
4207 bflag |= CL_NOCACHE;
4208 }
4209
4210 if (flags & IO_SKIP_ENCRYPTION) {
4211 bflag |= CL_ENCRYPTED;
4212 }
4213
4214 zero_cnt = 0;
4215 zero_cnt1 = 0;
4216 zero_off = 0;
4217 zero_off1 = 0;
4218
4219 max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
4220
4221 if (flags & IO_HEADZEROFILL) {
4222 /*
4223 * some filesystems (HFS is one) don't support unallocated holes within a file...
4224 * so we zero fill the intervening space between the old EOF and the offset
4225 * where the next chunk of real data begins.... ftruncate will also use this
4226 * routine to zero fill to the new EOF when growing a file... in this case, the
4227 * uio structure will not be provided
4228 */
4229 if (uio) {
4230 if (headOff < uio->uio_offset) {
4231 zero_cnt = uio->uio_offset - headOff;
4232 zero_off = headOff;
4233 }
4234 } else if (headOff < newEOF) {
4235 zero_cnt = newEOF - headOff;
4236 zero_off = headOff;
4237 }
4238 } else {
4239 if (uio && uio->uio_offset > oldEOF) {
4240 zero_off = uio->uio_offset & ~PAGE_MASK_64;
4241
4242 if (zero_off >= oldEOF) {
4243 zero_cnt = uio->uio_offset - zero_off;
4244
4245 flags |= IO_HEADZEROFILL;
4246 }
4247 }
4248 }
4249 if (flags & IO_TAILZEROFILL) {
4250 if (uio) {
4251 zero_off1 = uio->uio_offset + io_req_size;
4252
4253 if (zero_off1 < tailOff) {
4254 zero_cnt1 = tailOff - zero_off1;
4255 }
4256 }
4257 } else {
4258 if (uio && newEOF > oldEOF) {
4259 zero_off1 = uio->uio_offset + io_req_size;
4260
4261 if (zero_off1 == newEOF && (zero_off1 & PAGE_MASK_64)) {
4262 zero_cnt1 = PAGE_SIZE_64 - (zero_off1 & PAGE_MASK_64);
4263
4264 flags |= IO_TAILZEROFILL;
4265 }
4266 }
4267 }
4268 if (zero_cnt == 0 && uio == (struct uio *) 0) {
4269 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
4270 retval, 0, 0, 0, 0);
4271 return 0;
4272 }
4273 if (uio) {
4274 write_off = uio->uio_offset;
4275 write_cnt = (int)uio_resid(uio);
4276 /*
4277 * delay updating the sequential write info
4278 * in the control block until we've obtained
4279 * the lock for it
4280 */
4281 first_pass = TRUE;
4282 }
4283 while ((total_size = (io_resid + zero_cnt + zero_cnt1)) && retval == 0) {
4284 /*
4285 * for this iteration of the loop, figure out where our starting point is
4286 */
4287 if (zero_cnt) {
4288 start_offset = (int)(zero_off & PAGE_MASK_64);
4289 upl_f_offset = zero_off - start_offset;
4290 } else if (io_resid) {
4291 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
4292 upl_f_offset = uio->uio_offset - start_offset;
4293 } else {
4294 start_offset = (int)(zero_off1 & PAGE_MASK_64);
4295 upl_f_offset = zero_off1 - start_offset;
4296 }
4297 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE,
4298 (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0);
4299
4300 if (total_size > max_io_size) {
4301 total_size = max_io_size;
4302 }
4303
4304 cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
4305
4306 if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) {
4307 /*
4308 * assumption... total_size <= io_resid
4309 * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
4310 */
4311 if ((start_offset + total_size) > max_io_size) {
4312 total_size = max_io_size - start_offset;
4313 }
4314 xfer_resid = (int)total_size;
4315
4316 retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1);
4317
4318 if (retval) {
4319 break;
4320 }
4321
4322 io_resid -= (total_size - xfer_resid);
4323 total_size = xfer_resid;
4324 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
4325 upl_f_offset = uio->uio_offset - start_offset;
4326
4327 if (total_size == 0) {
4328 if (start_offset) {
4329 /*
4330 * the write did not finish on a page boundary
4331 * which will leave upl_f_offset pointing to the
4332 * beginning of the last page written instead of
4333 * the page beyond it... bump it in this case
4334 * so that the cluster code records the last page
4335 * written as dirty
4336 */
4337 upl_f_offset += PAGE_SIZE_64;
4338 }
4339 upl_size = 0;
4340
4341 goto check_cluster;
4342 }
4343 }
4344 /*
4345 * compute the size of the upl needed to encompass
4346 * the requested write... limit each call to cluster_io
4347 * to the maximum UPL size... cluster_io will clip if
4348 * this exceeds the maximum io_size for the device,
4349 * make sure to account for
4350 * a starting offset that's not page aligned
4351 */
4352 upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4353
4354 if (upl_size > max_io_size) {
4355 upl_size = max_io_size;
4356 }
4357
4358 pages_in_upl = (int)(upl_size / PAGE_SIZE);
4359 io_size = (int)(upl_size - start_offset);
4360
4361 if ((long long)io_size > total_size) {
4362 io_size = (int)total_size;
4363 }
4364
4365 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
4366
4367
4368 /*
4369 * Gather the pages from the buffer cache.
4370 * The UPL_WILL_MODIFY flag lets the UPL subsystem know
4371 * that we intend to modify these pages.
4372 */
4373 kret = ubc_create_upl_kernel(vp,
4374 upl_f_offset,
4375 (int)upl_size,
4376 &upl,
4377 &pl,
4378 UPL_SET_LITE | ((uio != NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY),
4379 VM_KERN_MEMORY_FILE);
4380 if (kret != KERN_SUCCESS) {
4381 panic("cluster_write_copy: failed to get pagelist");
4382 }
4383
4384 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END,
4385 upl, (int)upl_f_offset, start_offset, 0, 0);
4386
4387 if (start_offset && upl_f_offset < oldEOF && !upl_valid_page(pl, 0)) {
4388 int read_size;
4389
4390 /*
4391 * we're starting in the middle of the first page of the upl
4392 * and the page isn't currently valid, so we're going to have
4393 * to read it in first... this is a synchronous operation
4394 */
4395 read_size = PAGE_SIZE;
4396
4397 if ((upl_f_offset + read_size) > oldEOF) {
4398 read_size = (int)(oldEOF - upl_f_offset);
4399 }
4400
4401 retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
4402 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
4403 if (retval) {
4404 /*
4405 * we had an error during the read which causes us to abort
4406 * the current cluster_write request... before we do, we need
4407 * to release the rest of the pages in the upl without modifying
4408 * there state and mark the failed page in error
4409 */
4410 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4411
4412 if (upl_size > PAGE_SIZE) {
4413 ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size,
4414 UPL_ABORT_FREE_ON_EMPTY);
4415 }
4416
4417 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4418 upl, 0, 0, retval, 0);
4419 break;
4420 }
4421 }
4422 if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
4423 /*
4424 * the last offset we're writing to in this upl does not end on a page
4425 * boundary... if it's not beyond the old EOF, then we'll also need to
4426 * pre-read this page in if it isn't already valid
4427 */
4428 upl_offset = upl_size - PAGE_SIZE;
4429
4430 if ((upl_f_offset + start_offset + io_size) < oldEOF &&
4431 !upl_valid_page(pl, (int)(upl_offset / PAGE_SIZE))) {
4432 int read_size;
4433
4434 read_size = PAGE_SIZE;
4435
4436 if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF) {
4437 read_size = (int)(oldEOF - (upl_f_offset + upl_offset));
4438 }
4439
4440 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
4441 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
4442 if (retval) {
4443 /*
4444 * we had an error during the read which causes us to abort
4445 * the current cluster_write request... before we do, we
4446 * need to release the rest of the pages in the upl without
4447 * modifying there state and mark the failed page in error
4448 */
4449 ubc_upl_abort_range(upl, (upl_offset_t)upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4450
4451 if (upl_size > PAGE_SIZE) {
4452 ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, UPL_ABORT_FREE_ON_EMPTY);
4453 }
4454
4455 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4456 upl, 0, 0, retval, 0);
4457 break;
4458 }
4459 }
4460 }
4461 xfer_resid = io_size;
4462 io_offset = start_offset;
4463
4464 while (zero_cnt && xfer_resid) {
4465 if (zero_cnt < (long long)xfer_resid) {
4466 bytes_to_zero = (int)zero_cnt;
4467 } else {
4468 bytes_to_zero = xfer_resid;
4469 }
4470
4471 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero);
4472
4473 xfer_resid -= bytes_to_zero;
4474 zero_cnt -= bytes_to_zero;
4475 zero_off += bytes_to_zero;
4476 io_offset += bytes_to_zero;
4477 }
4478 if (xfer_resid && io_resid) {
4479 u_int32_t io_requested;
4480
4481 bytes_to_move = min(io_resid, xfer_resid);
4482 io_requested = bytes_to_move;
4483
4484 retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
4485
4486 if (retval) {
4487 ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, UPL_ABORT_FREE_ON_EMPTY);
4488
4489 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
4490 upl, 0, 0, retval, 0);
4491 } else {
4492 io_resid -= bytes_to_move;
4493 xfer_resid -= bytes_to_move;
4494 io_offset += bytes_to_move;
4495 }
4496 }
4497 while (xfer_resid && zero_cnt1 && retval == 0) {
4498 if (zero_cnt1 < (long long)xfer_resid) {
4499 bytes_to_zero = (int)zero_cnt1;
4500 } else {
4501 bytes_to_zero = xfer_resid;
4502 }
4503
4504 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero);
4505
4506 xfer_resid -= bytes_to_zero;
4507 zero_cnt1 -= bytes_to_zero;
4508 zero_off1 += bytes_to_zero;
4509 io_offset += bytes_to_zero;
4510 }
4511 if (retval == 0) {
4512 int do_zeroing = 1;
4513
4514 io_size += start_offset;
4515
4516 /* Force more restrictive zeroing behavior only on APFS */
4517 if ((vnode_tag(vp) == VT_APFS) && (newEOF < oldEOF)) {
4518 do_zeroing = 0;
4519 }
4520
4521 if (do_zeroing && (upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
4522 /*
4523 * if we're extending the file with this write
4524 * we'll zero fill the rest of the page so that
4525 * if the file gets extended again in such a way as to leave a
4526 * hole starting at this EOF, we'll have zero's in the correct spot
4527 */
4528 cluster_zero(upl, io_size, (int)(upl_size - io_size), NULL);
4529 }
4530 /*
4531 * release the upl now if we hold one since...
4532 * 1) pages in it may be present in the sparse cluster map
4533 * and may span 2 separate buckets there... if they do and
4534 * we happen to have to flush a bucket to make room and it intersects
4535 * this upl, a deadlock may result on page BUSY
4536 * 2) we're delaying the I/O... from this point forward we're just updating
4537 * the cluster state... no need to hold the pages, so commit them
4538 * 3) IO_SYNC is set...
4539 * because we had to ask for a UPL that provides currenty non-present pages, the
4540 * UPL has been automatically set to clear the dirty flags (both software and hardware)
4541 * upon committing it... this is not the behavior we want since it's possible for
4542 * pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
4543 * we'll pick these pages back up later with the correct behavior specified.
4544 * 4) we don't want to hold pages busy in a UPL and then block on the cluster lock... if a flush
4545 * of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages
4546 * we hold since the flushing context is holding the cluster lock.
4547 */
4548 ubc_upl_commit_range(upl, 0, (upl_size_t)upl_size,
4549 UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
4550 check_cluster:
4551 /*
4552 * calculate the last logical block number
4553 * that this delayed I/O encompassed
4554 */
4555 cl.e_addr = (daddr64_t)((upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64);
4556
4557 if (flags & IO_SYNC) {
4558 /*
4559 * if the IO_SYNC flag is set than we need to bypass
4560 * any clustering and immediately issue the I/O
4561 *
4562 * we don't hold the lock at this point
4563 *
4564 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
4565 * so that we correctly deal with a change in state of the hardware modify bit...
4566 * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force
4567 * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also
4568 * responsible for generating the correct sized I/O(s)
4569 */
4570 retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg, FALSE);
4571 } else {
4572 boolean_t defer_writes = FALSE;
4573
4574 if (vfs_flags(vp->v_mount) & MNT_DEFWRITE) {
4575 defer_writes = TRUE;
4576 }
4577
4578 cluster_update_state_internal(vp, &cl, flags, defer_writes, &first_pass,
4579 write_off, write_cnt, newEOF, callback, callback_arg, FALSE);
4580 }
4581 }
4582 }
4583 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, retval, 0, io_resid, 0, 0);
4584
4585 return retval;
4586 }
4587
4588
4589
4590 int
cluster_read(vnode_t vp,struct uio * uio,off_t filesize,int xflags)4591 cluster_read(vnode_t vp, struct uio *uio, off_t filesize, int xflags)
4592 {
4593 return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL);
4594 }
4595
4596
4597 int
cluster_read_ext(vnode_t vp,struct uio * uio,off_t filesize,int xflags,int (* callback)(buf_t,void *),void * callback_arg)4598 cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (*callback)(buf_t, void *), void *callback_arg)
4599 {
4600 int retval = 0;
4601 int flags;
4602 user_ssize_t cur_resid;
4603 u_int32_t io_size;
4604 u_int32_t read_length = 0;
4605 int read_type = IO_COPY;
4606 bool check_io_type;
4607
4608 flags = xflags;
4609
4610 if (vp->v_flag & VNOCACHE_DATA) {
4611 flags |= IO_NOCACHE;
4612 }
4613 if ((vp->v_flag & VRAOFF) || speculative_reads_disabled) {
4614 flags |= IO_RAOFF;
4615 }
4616
4617 if (flags & IO_SKIP_ENCRYPTION) {
4618 flags |= IO_ENCRYPTED;
4619 }
4620
4621 /*
4622 * do a read through the cache if one of the following is true....
4623 * NOCACHE is not true
4624 * the uio request doesn't target USERSPACE (unless IO_NOCACHE_SYSSPACE is also set)
4625 * Alternatively, if IO_ENCRYPTED is set, then we want to bypass the cache as well.
4626 * Reading encrypted data from a CP filesystem should never result in the data touching
4627 * the UBC.
4628 *
4629 * otherwise, find out if we want the direct or contig variant for
4630 * the first vector in the uio request
4631 */
4632 check_io_type = false;
4633 if (flags & IO_NOCACHE) {
4634 if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
4635 /*
4636 * no-cache to user-space: ok to consider IO_DIRECT.
4637 */
4638 check_io_type = true;
4639 } else if (uio->uio_segflg == UIO_SYSSPACE &&
4640 (flags & IO_NOCACHE_SYSSPACE)) {
4641 /*
4642 * no-cache to kernel-space but w/ IO_NOCACHE_SYSSPACE:
4643 * ok to consider IO_DIRECT.
4644 * The caller should make sure to target kernel buffer
4645 * that is backed by regular anonymous memory (i.e.
4646 * not backed by the kernel object or an external
4647 * memory manager like device memory or a file).
4648 */
4649 check_io_type = true;
4650 }
4651 } else if (flags & IO_ENCRYPTED) {
4652 check_io_type = true;
4653 }
4654 if (check_io_type) {
4655 retval = cluster_io_type(uio, &read_type, &read_length, 0);
4656 }
4657
4658 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < filesize && retval == 0) {
4659 switch (read_type) {
4660 case IO_COPY:
4661 /*
4662 * make sure the uio_resid isn't too big...
4663 * internally, we want to handle all of the I/O in
4664 * chunk sizes that fit in a 32 bit int
4665 */
4666 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
4667 io_size = MAX_IO_REQUEST_SIZE;
4668 } else {
4669 io_size = (u_int32_t)cur_resid;
4670 }
4671
4672 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
4673 break;
4674
4675 case IO_DIRECT:
4676 retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg);
4677 break;
4678
4679 case IO_CONTIG:
4680 retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags);
4681 break;
4682
4683 case IO_UNKNOWN:
4684 retval = cluster_io_type(uio, &read_type, &read_length, 0);
4685 break;
4686 }
4687 }
4688 return retval;
4689 }
4690
4691
4692
4693 static void
cluster_read_upl_release(upl_t upl,int start_pg,int last_pg,int take_reference)4694 cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference)
4695 {
4696 int range;
4697 int abort_flags = UPL_ABORT_FREE_ON_EMPTY;
4698
4699 if ((range = last_pg - start_pg)) {
4700 if (take_reference) {
4701 abort_flags |= UPL_ABORT_REFERENCE;
4702 }
4703
4704 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags);
4705 }
4706 }
4707
4708
4709 static int
cluster_read_copy(vnode_t vp,struct uio * uio,u_int32_t io_req_size,off_t filesize,int flags,int (* callback)(buf_t,void *),void * callback_arg)4710 cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
4711 {
4712 upl_page_info_t *pl;
4713 upl_t upl = NULL;
4714 vm_offset_t upl_offset;
4715 u_int32_t upl_size;
4716 off_t upl_f_offset;
4717 int start_offset;
4718 int start_pg;
4719 int last_pg;
4720 int uio_last = 0;
4721 int pages_in_upl;
4722 off_t max_size;
4723 off_t last_ioread_offset;
4724 off_t last_request_offset;
4725 kern_return_t kret;
4726 int error = 0;
4727 int retval = 0;
4728 u_int32_t size_of_prefetch;
4729 u_int32_t xsize;
4730 u_int32_t io_size;
4731 u_int32_t max_rd_size;
4732 u_int32_t max_io_size;
4733 u_int32_t max_prefetch;
4734 u_int rd_ahead_enabled = 1;
4735 u_int prefetch_enabled = 1;
4736 struct cl_readahead * rap;
4737 struct clios iostate;
4738 struct cl_extent extent;
4739 int bflag;
4740 int take_reference = 1;
4741 int policy = IOPOL_DEFAULT;
4742 boolean_t iolock_inited = FALSE;
4743
4744 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
4745 (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0);
4746
4747 if (flags & IO_ENCRYPTED) {
4748 panic("encrypted blocks will hit UBC!");
4749 }
4750
4751 policy = throttle_get_io_policy(NULL);
4752
4753 if (policy == THROTTLE_LEVEL_TIER3 || policy == THROTTLE_LEVEL_TIER2 || (flags & IO_NOCACHE)) {
4754 take_reference = 0;
4755 }
4756
4757 if (flags & IO_PASSIVE) {
4758 bflag = CL_PASSIVE;
4759 } else {
4760 bflag = 0;
4761 }
4762
4763 if (flags & IO_NOCACHE) {
4764 bflag |= CL_NOCACHE;
4765 }
4766
4767 if (flags & IO_SKIP_ENCRYPTION) {
4768 bflag |= CL_ENCRYPTED;
4769 }
4770
4771 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
4772 max_prefetch = cluster_max_prefetch(vp, max_io_size, prefetch_max);
4773 max_rd_size = max_prefetch;
4774
4775 last_request_offset = uio->uio_offset + io_req_size;
4776
4777 if (last_request_offset > filesize) {
4778 last_request_offset = filesize;
4779 }
4780
4781 if ((flags & (IO_RAOFF | IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) {
4782 rd_ahead_enabled = 0;
4783 rap = NULL;
4784 } else {
4785 if (cluster_is_throttled(vp)) {
4786 /*
4787 * we're in the throttle window, at the very least
4788 * we want to limit the size of the I/O we're about
4789 * to issue
4790 */
4791 rd_ahead_enabled = 0;
4792 prefetch_enabled = 0;
4793
4794 max_rd_size = calculate_max_throttle_size(vp);
4795 }
4796 if ((rap = cluster_get_rap(vp)) == NULL) {
4797 rd_ahead_enabled = 0;
4798 } else {
4799 extent.b_addr = uio->uio_offset / PAGE_SIZE_64;
4800 extent.e_addr = (last_request_offset - 1) / PAGE_SIZE_64;
4801 }
4802 }
4803 if (rap != NULL && rap->cl_ralen && (rap->cl_lastr == extent.b_addr || (rap->cl_lastr + 1) == extent.b_addr)) {
4804 /*
4805 * determine if we already have a read-ahead in the pipe courtesy of the
4806 * last read systemcall that was issued...
4807 * if so, pick up it's extent to determine where we should start
4808 * with respect to any read-ahead that might be necessary to
4809 * garner all the data needed to complete this read systemcall
4810 */
4811 last_ioread_offset = (rap->cl_maxra * PAGE_SIZE_64) + PAGE_SIZE_64;
4812
4813 if (last_ioread_offset < uio->uio_offset) {
4814 last_ioread_offset = (off_t)0;
4815 } else if (last_ioread_offset > last_request_offset) {
4816 last_ioread_offset = last_request_offset;
4817 }
4818 } else {
4819 last_ioread_offset = (off_t)0;
4820 }
4821
4822 while (io_req_size && uio->uio_offset < filesize && retval == 0) {
4823 max_size = filesize - uio->uio_offset;
4824 bool leftover_upl_aborted = false;
4825
4826 if ((off_t)(io_req_size) < max_size) {
4827 io_size = io_req_size;
4828 } else {
4829 io_size = (u_int32_t)max_size;
4830 }
4831
4832 if (!(flags & IO_NOCACHE)) {
4833 while (io_size) {
4834 u_int32_t io_resid;
4835 u_int32_t io_requested;
4836
4837 /*
4838 * if we keep finding the pages we need already in the cache, then
4839 * don't bother to call cluster_read_prefetch since it costs CPU cycles
4840 * to determine that we have all the pages we need... once we miss in
4841 * the cache and have issued an I/O, than we'll assume that we're likely
4842 * to continue to miss in the cache and it's to our advantage to try and prefetch
4843 */
4844 if (last_request_offset && last_ioread_offset && (size_of_prefetch = (u_int32_t)(last_request_offset - last_ioread_offset))) {
4845 if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) {
4846 /*
4847 * we've already issued I/O for this request and
4848 * there's still work to do and
4849 * our prefetch stream is running dry, so issue a
4850 * pre-fetch I/O... the I/O latency will overlap
4851 * with the copying of the data
4852 */
4853 if (size_of_prefetch > max_rd_size) {
4854 size_of_prefetch = max_rd_size;
4855 }
4856
4857 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
4858
4859 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
4860
4861 if (last_ioread_offset > last_request_offset) {
4862 last_ioread_offset = last_request_offset;
4863 }
4864 }
4865 }
4866 /*
4867 * limit the size of the copy we're about to do so that
4868 * we can notice that our I/O pipe is running dry and
4869 * get the next I/O issued before it does go dry
4870 */
4871 if (last_ioread_offset && io_size > (max_io_size / 4)) {
4872 io_resid = (max_io_size / 4);
4873 } else {
4874 io_resid = io_size;
4875 }
4876
4877 io_requested = io_resid;
4878
4879 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, take_reference);
4880
4881 xsize = io_requested - io_resid;
4882
4883 io_size -= xsize;
4884 io_req_size -= xsize;
4885
4886 if (retval || io_resid) {
4887 /*
4888 * if we run into a real error or
4889 * a page that is not in the cache
4890 * we need to leave streaming mode
4891 */
4892 break;
4893 }
4894
4895 if (rd_ahead_enabled && (io_size == 0 || last_ioread_offset == last_request_offset)) {
4896 /*
4897 * we're already finished the I/O for this read request
4898 * let's see if we should do a read-ahead
4899 */
4900 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
4901 }
4902 }
4903 if (retval) {
4904 break;
4905 }
4906 if (io_size == 0) {
4907 if (rap != NULL) {
4908 if (extent.e_addr < rap->cl_lastr) {
4909 rap->cl_maxra = 0;
4910 }
4911 rap->cl_lastr = extent.e_addr;
4912 }
4913 break;
4914 }
4915 /*
4916 * recompute max_size since cluster_copy_ubc_data_internal
4917 * may have advanced uio->uio_offset
4918 */
4919 max_size = filesize - uio->uio_offset;
4920 }
4921
4922 iostate.io_completed = 0;
4923 iostate.io_issued = 0;
4924 iostate.io_error = 0;
4925 iostate.io_wanted = 0;
4926
4927 if ((flags & IO_RETURN_ON_THROTTLE)) {
4928 if (cluster_is_throttled(vp) == THROTTLE_NOW) {
4929 if (!cluster_io_present_in_BC(vp, uio->uio_offset)) {
4930 /*
4931 * we're in the throttle window and at least 1 I/O
4932 * has already been issued by a throttleable thread
4933 * in this window, so return with EAGAIN to indicate
4934 * to the FS issuing the cluster_read call that it
4935 * should now throttle after dropping any locks
4936 */
4937 throttle_info_update_by_mount(vp->v_mount);
4938
4939 retval = EAGAIN;
4940 break;
4941 }
4942 }
4943 }
4944
4945 /*
4946 * compute the size of the upl needed to encompass
4947 * the requested read... limit each call to cluster_io
4948 * to the maximum UPL size... cluster_io will clip if
4949 * this exceeds the maximum io_size for the device,
4950 * make sure to account for
4951 * a starting offset that's not page aligned
4952 */
4953 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
4954 upl_f_offset = uio->uio_offset - (off_t)start_offset;
4955
4956 if (io_size > max_rd_size) {
4957 io_size = max_rd_size;
4958 }
4959
4960 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4961
4962 if (flags & IO_NOCACHE) {
4963 if (upl_size > max_io_size) {
4964 upl_size = max_io_size;
4965 }
4966 } else {
4967 if (upl_size > max_io_size / 4) {
4968 upl_size = max_io_size / 4;
4969 upl_size &= ~PAGE_MASK;
4970
4971 if (upl_size == 0) {
4972 upl_size = PAGE_SIZE;
4973 }
4974 }
4975 }
4976 pages_in_upl = upl_size / PAGE_SIZE;
4977
4978 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
4979 upl, (int)upl_f_offset, upl_size, start_offset, 0);
4980
4981 kret = ubc_create_upl_kernel(vp,
4982 upl_f_offset,
4983 upl_size,
4984 &upl,
4985 &pl,
4986 UPL_FILE_IO | UPL_SET_LITE,
4987 VM_KERN_MEMORY_FILE);
4988 if (kret != KERN_SUCCESS) {
4989 panic("cluster_read_copy: failed to get pagelist");
4990 }
4991
4992 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END,
4993 upl, (int)upl_f_offset, upl_size, start_offset, 0);
4994
4995 /*
4996 * scan from the beginning of the upl looking for the first
4997 * non-valid page.... this will become the first page in
4998 * the request we're going to make to 'cluster_io'... if all
4999 * of the pages are valid, we won't call through to 'cluster_io'
5000 */
5001 for (start_pg = 0; start_pg < pages_in_upl; start_pg++) {
5002 if (!upl_valid_page(pl, start_pg)) {
5003 break;
5004 }
5005 }
5006
5007 /*
5008 * scan from the starting invalid page looking for a valid
5009 * page before the end of the upl is reached, if we
5010 * find one, then it will be the last page of the request to
5011 * 'cluster_io'
5012 */
5013 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
5014 if (upl_valid_page(pl, last_pg)) {
5015 break;
5016 }
5017 }
5018
5019 if (start_pg < last_pg) {
5020 /*
5021 * we found a range of 'invalid' pages that must be filled
5022 * if the last page in this range is the last page of the file
5023 * we may have to clip the size of it to keep from reading past
5024 * the end of the last physical block associated with the file
5025 */
5026 if (iolock_inited == FALSE) {
5027 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
5028
5029 iolock_inited = TRUE;
5030 }
5031 upl_offset = start_pg * PAGE_SIZE;
5032 io_size = (last_pg - start_pg) * PAGE_SIZE;
5033
5034 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) {
5035 io_size = (u_int32_t)(filesize - (upl_f_offset + upl_offset));
5036 }
5037
5038 /*
5039 * Find out if this needs verification, we'll have to manage the UPL
5040 * diffrently if so. Note that this call only lets us know if
5041 * verification is enabled on this mount point, the actual verification
5042 * is performed in the File system.
5043 */
5044 size_t verify_block_size = 0;
5045 if ((VNOP_VERIFY(vp, start_offset, NULL, 0, &verify_block_size, NULL, VNODE_VERIFY_DEFAULT, NULL, NULL) == 0) /* && verify_block_size */) {
5046 for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
5047 if (!upl_valid_page(pl, uio_last)) {
5048 break;
5049 }
5050 }
5051 if (uio_last < pages_in_upl) {
5052 /*
5053 * there were some invalid pages beyond the valid pages
5054 * that we didn't issue an I/O for, just release them
5055 * unchanged now, so that any prefetch/readahed can
5056 * include them
5057 */
5058 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
5059 (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
5060 leftover_upl_aborted = true;
5061 }
5062 }
5063
5064 /*
5065 * issue an asynchronous read to cluster_io
5066 */
5067
5068 error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
5069 io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
5070
5071 if (rap) {
5072 if (extent.e_addr < rap->cl_maxra) {
5073 /*
5074 * we've just issued a read for a block that should have been
5075 * in the cache courtesy of the read-ahead engine... something
5076 * has gone wrong with the pipeline, so reset the read-ahead
5077 * logic which will cause us to restart from scratch
5078 */
5079 rap->cl_maxra = 0;
5080 }
5081 }
5082 }
5083 if (error == 0) {
5084 /*
5085 * if the read completed successfully, or there was no I/O request
5086 * issued, than copy the data into user land via 'cluster_upl_copy_data'
5087 * we'll first add on any 'valid'
5088 * pages that were present in the upl when we acquired it.
5089 */
5090 u_int val_size;
5091
5092 if (!leftover_upl_aborted) {
5093 for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
5094 if (!upl_valid_page(pl, uio_last)) {
5095 break;
5096 }
5097 }
5098 if (uio_last < pages_in_upl) {
5099 /*
5100 * there were some invalid pages beyond the valid pages
5101 * that we didn't issue an I/O for, just release them
5102 * unchanged now, so that any prefetch/readahed can
5103 * include them
5104 */
5105 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
5106 (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
5107 }
5108 }
5109
5110 /*
5111 * compute size to transfer this round, if io_req_size is
5112 * still non-zero after this attempt, we'll loop around and
5113 * set up for another I/O.
5114 */
5115 val_size = (uio_last * PAGE_SIZE) - start_offset;
5116
5117 if (val_size > max_size) {
5118 val_size = (u_int)max_size;
5119 }
5120
5121 if (val_size > io_req_size) {
5122 val_size = io_req_size;
5123 }
5124
5125 if ((uio->uio_offset + val_size) > last_ioread_offset) {
5126 last_ioread_offset = uio->uio_offset + val_size;
5127 }
5128
5129 if ((size_of_prefetch = (u_int32_t)(last_request_offset - last_ioread_offset)) && prefetch_enabled) {
5130 if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) {
5131 /*
5132 * if there's still I/O left to do for this request, and...
5133 * we're not in hard throttle mode, and...
5134 * we're close to using up the previous prefetch, then issue a
5135 * new pre-fetch I/O... the I/O latency will overlap
5136 * with the copying of the data
5137 */
5138 if (size_of_prefetch > max_rd_size) {
5139 size_of_prefetch = max_rd_size;
5140 }
5141
5142 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
5143
5144 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
5145
5146 if (last_ioread_offset > last_request_offset) {
5147 last_ioread_offset = last_request_offset;
5148 }
5149 }
5150 } else if ((uio->uio_offset + val_size) == last_request_offset) {
5151 /*
5152 * this transfer will finish this request, so...
5153 * let's try to read ahead if we're in
5154 * a sequential access pattern and we haven't
5155 * explicitly disabled it
5156 */
5157 if (rd_ahead_enabled) {
5158 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
5159 }
5160
5161 if (rap != NULL) {
5162 if (extent.e_addr < rap->cl_lastr) {
5163 rap->cl_maxra = 0;
5164 }
5165 rap->cl_lastr = extent.e_addr;
5166 }
5167 }
5168 if (iolock_inited == TRUE) {
5169 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
5170 }
5171
5172 if (iostate.io_error) {
5173 error = iostate.io_error;
5174 } else {
5175 u_int32_t io_requested;
5176
5177 io_requested = val_size;
5178
5179 retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested);
5180
5181 io_req_size -= (val_size - io_requested);
5182 }
5183 } else {
5184 if (iolock_inited == TRUE) {
5185 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
5186 }
5187 }
5188 if (start_pg < last_pg) {
5189 /*
5190 * compute the range of pages that we actually issued an I/O for
5191 * and either commit them as valid if the I/O succeeded
5192 * or abort them if the I/O failed or we're not supposed to
5193 * keep them in the cache
5194 */
5195 io_size = (last_pg - start_pg) * PAGE_SIZE;
5196
5197 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0);
5198
5199 if (error || (flags & IO_NOCACHE)) {
5200 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
5201 UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
5202 } else {
5203 int commit_flags = UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY;
5204
5205 if (take_reference) {
5206 commit_flags |= UPL_COMMIT_INACTIVATE;
5207 } else {
5208 commit_flags |= UPL_COMMIT_SPECULATE;
5209 }
5210
5211 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags);
5212 }
5213 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0);
5214 }
5215 if ((last_pg - start_pg) < pages_in_upl) {
5216 /*
5217 * the set of pages that we issued an I/O for did not encompass
5218 * the entire upl... so just release these without modifying
5219 * their state
5220 */
5221 if (error) {
5222 if (leftover_upl_aborted) {
5223 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, (uio_last - start_pg) * PAGE_SIZE,
5224 UPL_ABORT_FREE_ON_EMPTY);
5225 } else {
5226 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
5227 }
5228 } else {
5229 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
5230 upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
5231
5232 /*
5233 * handle any valid pages at the beginning of
5234 * the upl... release these appropriately
5235 */
5236 cluster_read_upl_release(upl, 0, start_pg, take_reference);
5237
5238 /*
5239 * handle any valid pages immediately after the
5240 * pages we issued I/O for... ... release these appropriately
5241 */
5242 cluster_read_upl_release(upl, last_pg, uio_last, take_reference);
5243
5244 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, -1, -1, 0, 0);
5245 }
5246 }
5247 if (retval == 0) {
5248 retval = error;
5249 }
5250
5251 if (io_req_size) {
5252 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
5253
5254 if (cluster_is_throttled(vp)) {
5255 /*
5256 * we're in the throttle window, at the very least
5257 * we want to limit the size of the I/O we're about
5258 * to issue
5259 */
5260 rd_ahead_enabled = 0;
5261 prefetch_enabled = 0;
5262 max_rd_size = max_throttle_size;
5263 } else {
5264 if (max_rd_size == max_throttle_size) {
5265 /*
5266 * coming out of throttled state
5267 */
5268 if (policy != THROTTLE_LEVEL_TIER3 && policy != THROTTLE_LEVEL_TIER2) {
5269 if (rap != NULL) {
5270 rd_ahead_enabled = 1;
5271 }
5272 prefetch_enabled = 1;
5273 }
5274 max_rd_size = max_prefetch;
5275 last_ioread_offset = 0;
5276 }
5277 }
5278 }
5279 }
5280 if (iolock_inited == TRUE) {
5281 /*
5282 * cluster_io returned an error after it
5283 * had already issued some I/O. we need
5284 * to wait for that I/O to complete before
5285 * we can destroy the iostate mutex...
5286 * 'retval' already contains the early error
5287 * so no need to pick it up from iostate.io_error
5288 */
5289 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
5290
5291 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
5292 }
5293 if (rap != NULL) {
5294 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
5295 (int)uio->uio_offset, io_req_size, rap->cl_lastr, retval, 0);
5296
5297 lck_mtx_unlock(&rap->cl_lockr);
5298 } else {
5299 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
5300 (int)uio->uio_offset, io_req_size, 0, retval, 0);
5301 }
5302
5303 return retval;
5304 }
5305
5306 /*
5307 * We don't want another read/write lock for every vnode in the system
5308 * so we keep a hash of them here. There should never be very many of
5309 * these around at any point in time.
5310 */
5311 cl_direct_read_lock_t *
cluster_lock_direct_read(vnode_t vp,lck_rw_type_t type)5312 cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type)
5313 {
5314 struct cl_direct_read_locks *head
5315 = &cl_direct_read_locks[(uintptr_t)vp / sizeof(*vp)
5316 % CL_DIRECT_READ_LOCK_BUCKETS];
5317
5318 struct cl_direct_read_lock *lck, *new_lck = NULL;
5319
5320 for (;;) {
5321 lck_spin_lock(&cl_direct_read_spin_lock);
5322
5323 LIST_FOREACH(lck, head, chain) {
5324 if (lck->vp == vp) {
5325 ++lck->ref_count;
5326 lck_spin_unlock(&cl_direct_read_spin_lock);
5327 if (new_lck) {
5328 // Someone beat us to it, ditch the allocation
5329 lck_rw_destroy(&new_lck->rw_lock, &cl_mtx_grp);
5330 kfree_type(cl_direct_read_lock_t, new_lck);
5331 }
5332 lck_rw_lock(&lck->rw_lock, type);
5333 return lck;
5334 }
5335 }
5336
5337 if (new_lck) {
5338 // Use the lock we allocated
5339 LIST_INSERT_HEAD(head, new_lck, chain);
5340 lck_spin_unlock(&cl_direct_read_spin_lock);
5341 lck_rw_lock(&new_lck->rw_lock, type);
5342 return new_lck;
5343 }
5344
5345 lck_spin_unlock(&cl_direct_read_spin_lock);
5346
5347 // Allocate a new lock
5348 new_lck = kalloc_type(cl_direct_read_lock_t, Z_WAITOK);
5349 lck_rw_init(&new_lck->rw_lock, &cl_mtx_grp, LCK_ATTR_NULL);
5350 new_lck->vp = vp;
5351 new_lck->ref_count = 1;
5352
5353 // Got to go round again
5354 }
5355 }
5356
5357 void
cluster_unlock_direct_read(cl_direct_read_lock_t * lck)5358 cluster_unlock_direct_read(cl_direct_read_lock_t *lck)
5359 {
5360 lck_rw_done(&lck->rw_lock);
5361
5362 lck_spin_lock(&cl_direct_read_spin_lock);
5363 if (lck->ref_count == 1) {
5364 LIST_REMOVE(lck, chain);
5365 lck_spin_unlock(&cl_direct_read_spin_lock);
5366 lck_rw_destroy(&lck->rw_lock, &cl_mtx_grp);
5367 kfree_type(cl_direct_read_lock_t, lck);
5368 } else {
5369 --lck->ref_count;
5370 lck_spin_unlock(&cl_direct_read_spin_lock);
5371 }
5372 }
5373
5374 static int
cluster_read_direct(vnode_t vp,struct uio * uio,off_t filesize,int * read_type,u_int32_t * read_length,int flags,int (* callback)(buf_t,void *),void * callback_arg)5375 cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
5376 int flags, int (*callback)(buf_t, void *), void *callback_arg)
5377 {
5378 upl_t upl = NULL;
5379 upl_page_info_t *pl;
5380 off_t max_io_size;
5381 size_t verify_block_size = 0;
5382 vm_offset_t upl_offset, vector_upl_offset = 0;
5383 upl_size_t upl_size = 0, vector_upl_size = 0;
5384 vm_size_t upl_needed_size;
5385 unsigned int pages_in_pl;
5386 upl_control_flags_t upl_flags;
5387 kern_return_t kret = KERN_SUCCESS;
5388 unsigned int i;
5389 int force_data_sync;
5390 int retval = 0;
5391 int no_zero_fill = 0;
5392 int io_flag = 0;
5393 int misaligned = 0;
5394 struct clios iostate;
5395 user_addr_t iov_base;
5396 u_int32_t io_req_size;
5397 u_int32_t offset_in_file;
5398 u_int32_t offset_in_iovbase;
5399 u_int32_t io_size;
5400 u_int32_t io_min;
5401 u_int32_t xsize;
5402 u_int32_t devblocksize;
5403 u_int32_t mem_alignment_mask;
5404 u_int32_t max_upl_size;
5405 u_int32_t max_rd_size;
5406 u_int32_t max_rd_ahead;
5407 u_int32_t max_vector_size;
5408 boolean_t io_throttled = FALSE;
5409
5410 u_int32_t vector_upl_iosize = 0;
5411 int issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1);
5412 off_t v_upl_uio_offset = 0;
5413 int vector_upl_index = 0;
5414 upl_t vector_upl = NULL;
5415 cl_direct_read_lock_t *lock = NULL;
5416 uint32_t verify_mask = 0;
5417
5418 assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT);
5419
5420 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
5421 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
5422
5423 max_upl_size = cluster_max_io_size(vp->v_mount, CL_READ);
5424
5425 max_rd_size = max_upl_size;
5426
5427 if (__improbable(os_mul_overflow(max_rd_size, IO_SCALE(vp, 2),
5428 &max_rd_ahead) || (max_rd_ahead > overlapping_read_max))) {
5429 max_rd_ahead = overlapping_read_max;
5430 }
5431
5432 io_flag = CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO | CL_DIRECT_IO;
5433
5434 if (flags & IO_PASSIVE) {
5435 io_flag |= CL_PASSIVE;
5436 }
5437
5438 if (flags & IO_ENCRYPTED) {
5439 io_flag |= CL_RAW_ENCRYPTED;
5440 }
5441
5442 if (flags & IO_NOCACHE) {
5443 io_flag |= CL_NOCACHE;
5444 }
5445
5446 if (flags & IO_SKIP_ENCRYPTION) {
5447 io_flag |= CL_ENCRYPTED;
5448 }
5449
5450 iostate.io_completed = 0;
5451 iostate.io_issued = 0;
5452 iostate.io_error = 0;
5453 iostate.io_wanted = 0;
5454
5455 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
5456
5457 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
5458 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
5459
5460 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
5461 (int)devblocksize, (int)mem_alignment_mask, 0, 0, 0);
5462
5463 if (devblocksize == 1) {
5464 /*
5465 * the AFP client advertises a devblocksize of 1
5466 * however, its BLOCKMAP routine maps to physical
5467 * blocks that are PAGE_SIZE in size...
5468 * therefore we can't ask for I/Os that aren't page aligned
5469 * or aren't multiples of PAGE_SIZE in size
5470 * by setting devblocksize to PAGE_SIZE, we re-instate
5471 * the old behavior we had before the mem_alignment_mask
5472 * changes went in...
5473 */
5474 devblocksize = PAGE_SIZE;
5475 }
5476
5477 /*
5478 * We are going to need this uio for the prefaulting later
5479 * especially for the cases where multiple non-contiguous
5480 * iovs are passed into this routine.
5481 *
5482 * Note that we only want to prefault for direct IOs to userspace buffers,
5483 * not kernel buffers.
5484 */
5485 uio_t uio_acct = NULL;
5486 if (uio->uio_segflg != UIO_SYSSPACE) {
5487 uio_acct = uio_duplicate(uio);
5488 }
5489
5490 retval = VNOP_VERIFY(vp, 0, NULL, 0, &verify_block_size, NULL, VNODE_VERIFY_DEFAULT, NULL, NULL);
5491 if (retval) {
5492 verify_block_size = 0;
5493 } else if (verify_block_size) {
5494 assert((verify_block_size & (verify_block_size - 1)) == 0);
5495 verify_mask = verify_block_size - 1;
5496 }
5497
5498 next_dread:
5499 io_req_size = *read_length;
5500 iov_base = uio_curriovbase(uio);
5501
5502 offset_in_file = (u_int32_t)uio->uio_offset & (devblocksize - 1);
5503 offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
5504
5505 if (vm_map_page_mask(current_map()) < PAGE_MASK) {
5506 /*
5507 * XXX TODO4K
5508 * Direct I/O might not work as expected from a 16k kernel space
5509 * to a 4k user space because each 4k chunk might point to
5510 * a different 16k physical page...
5511 * Let's go the "misaligned" way.
5512 */
5513 if (!misaligned) {
5514 DEBUG4K_VFS("forcing misaligned\n");
5515 }
5516 misaligned = 1;
5517 }
5518
5519 if (offset_in_file || offset_in_iovbase) {
5520 /*
5521 * one of the 2 important offsets is misaligned
5522 * so fire an I/O through the cache for this entire vector
5523 */
5524 misaligned = 1;
5525 }
5526 if (iov_base & (devblocksize - 1)) {
5527 /*
5528 * the offset in memory must be on a device block boundary
5529 * so that we can guarantee that we can generate an
5530 * I/O that ends on a page boundary in cluster_io
5531 */
5532 misaligned = 1;
5533 }
5534
5535 if (verify_block_size && !misaligned && ((uio_offset(uio) & verify_mask) || (uio_resid(uio) & verify_mask))) {
5536 /*
5537 * If the offset is not aligned to the verification block size
5538 * or the size is not aligned to the verification block size,
5539 * we simply send this through the cached i/o path as that is
5540 * what the Filesystem will end up doing anyway i.e. it will
5541 * read all the remaining data in order to verify it and then
5542 * discard the data it has read.
5543 */
5544 misaligned = 1;
5545 }
5546
5547 max_io_size = filesize - uio->uio_offset;
5548
5549 /*
5550 * The user must request IO in aligned chunks. If the
5551 * offset into the file is bad, or the userland pointer
5552 * is non-aligned, then we cannot service the encrypted IO request.
5553 */
5554 if (flags & IO_ENCRYPTED) {
5555 if (misaligned || (io_req_size & (devblocksize - 1))) {
5556 retval = EINVAL;
5557 }
5558
5559 max_io_size = roundup(max_io_size, devblocksize);
5560 }
5561
5562 if ((off_t)io_req_size > max_io_size) {
5563 io_req_size = (u_int32_t)max_io_size;
5564 }
5565
5566 /*
5567 * When we get to this point, we know...
5568 * -- the offset into the file is on a devblocksize boundary
5569 */
5570
5571 while (io_req_size && retval == 0) {
5572 u_int32_t io_start;
5573
5574 if (cluster_is_throttled(vp)) {
5575 uint32_t max_throttle_size = calculate_max_throttle_size(vp);
5576
5577 /*
5578 * we're in the throttle window, at the very least
5579 * we want to limit the size of the I/O we're about
5580 * to issue
5581 */
5582 max_rd_size = max_throttle_size;
5583 max_rd_ahead = max_throttle_size - 1;
5584 max_vector_size = max_throttle_size;
5585 } else {
5586 max_rd_size = max_upl_size;
5587 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
5588 max_vector_size = MAX_VECTOR_UPL_SIZE;
5589 }
5590 io_start = io_size = io_req_size;
5591
5592 /*
5593 * First look for pages already in the cache
5594 * and move them to user space. But only do this
5595 * check if we are not retrieving encrypted data directly
5596 * from the filesystem; those blocks should never
5597 * be in the UBC.
5598 *
5599 * cluster_copy_ubc_data returns the resid
5600 * in io_size
5601 */
5602 if ((flags & IO_ENCRYPTED) == 0) {
5603 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
5604 }
5605 /*
5606 * calculate the number of bytes actually copied
5607 * starting size - residual
5608 */
5609 xsize = io_start - io_size;
5610
5611 io_req_size -= xsize;
5612
5613 if (useVectorUPL && (xsize || (iov_base & PAGE_MASK))) {
5614 /*
5615 * We found something in the cache or we have an iov_base that's not
5616 * page-aligned.
5617 *
5618 * Issue all I/O's that have been collected within this Vectored UPL.
5619 */
5620 if (vector_upl_index) {
5621 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5622 reset_vector_run_state();
5623 }
5624
5625 if (xsize) {
5626 useVectorUPL = 0;
5627 }
5628
5629 /*
5630 * After this point, if we are using the Vector UPL path and the base is
5631 * not page-aligned then the UPL with that base will be the first in the vector UPL.
5632 */
5633 }
5634
5635 /*
5636 * check to see if we are finished with this request.
5637 *
5638 * If we satisfied this IO already, then io_req_size will be 0.
5639 * Otherwise, see if the IO was mis-aligned and needs to go through
5640 * the UBC to deal with the 'tail'.
5641 *
5642 */
5643 if (io_req_size == 0 || (misaligned)) {
5644 /*
5645 * see if there's another uio vector to
5646 * process that's of type IO_DIRECT
5647 *
5648 * break out of while loop to get there
5649 */
5650 break;
5651 }
5652 /*
5653 * assume the request ends on a device block boundary
5654 */
5655 io_min = devblocksize;
5656
5657 /*
5658 * we can handle I/O's in multiples of the device block size
5659 * however, if io_size isn't a multiple of devblocksize we
5660 * want to clip it back to the nearest page boundary since
5661 * we are going to have to go through cluster_read_copy to
5662 * deal with the 'overhang'... by clipping it to a PAGE_SIZE
5663 * multiple, we avoid asking the drive for the same physical
5664 * blocks twice.. once for the partial page at the end of the
5665 * request and a 2nd time for the page we read into the cache
5666 * (which overlaps the end of the direct read) in order to
5667 * get at the overhang bytes
5668 */
5669 if (io_size & (devblocksize - 1)) {
5670 assert(!(flags & IO_ENCRYPTED));
5671 /*
5672 * Clip the request to the previous page size boundary
5673 * since request does NOT end on a device block boundary
5674 */
5675 io_size &= ~PAGE_MASK;
5676 io_min = PAGE_SIZE;
5677 }
5678 if (retval || io_size < io_min) {
5679 /*
5680 * either an error or we only have the tail left to
5681 * complete via the copy path...
5682 * we may have already spun some portion of this request
5683 * off as async requests... we need to wait for the I/O
5684 * to complete before returning
5685 */
5686 goto wait_for_dreads;
5687 }
5688
5689 /*
5690 * Don't re-check the UBC data if we are looking for uncached IO
5691 * or asking for encrypted blocks.
5692 */
5693 if ((flags & IO_ENCRYPTED) == 0) {
5694 if ((xsize = io_size) > max_rd_size) {
5695 xsize = max_rd_size;
5696 }
5697
5698 io_size = 0;
5699
5700 if (!lock) {
5701 /*
5702 * We hold a lock here between the time we check the
5703 * cache and the time we issue I/O. This saves us
5704 * from having to lock the pages in the cache. Not
5705 * all clients will care about this lock but some
5706 * clients may want to guarantee stability between
5707 * here and when the I/O is issued in which case they
5708 * will take the lock exclusively.
5709 */
5710 lock = cluster_lock_direct_read(vp, LCK_RW_TYPE_SHARED);
5711 }
5712
5713 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
5714
5715 if (io_size == 0) {
5716 /*
5717 * a page must have just come into the cache
5718 * since the first page in this range is no
5719 * longer absent, go back and re-evaluate
5720 */
5721 continue;
5722 }
5723 }
5724 if ((flags & IO_RETURN_ON_THROTTLE)) {
5725 if (cluster_is_throttled(vp) == THROTTLE_NOW) {
5726 if (!cluster_io_present_in_BC(vp, uio->uio_offset)) {
5727 /*
5728 * we're in the throttle window and at least 1 I/O
5729 * has already been issued by a throttleable thread
5730 * in this window, so return with EAGAIN to indicate
5731 * to the FS issuing the cluster_read call that it
5732 * should now throttle after dropping any locks
5733 */
5734 throttle_info_update_by_mount(vp->v_mount);
5735
5736 io_throttled = TRUE;
5737 goto wait_for_dreads;
5738 }
5739 }
5740 }
5741 if (io_size > max_rd_size) {
5742 io_size = max_rd_size;
5743 }
5744
5745 iov_base = uio_curriovbase(uio);
5746
5747 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
5748 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
5749
5750 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
5751 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
5752
5753 if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0)) {
5754 no_zero_fill = 1;
5755 } else {
5756 no_zero_fill = 0;
5757 }
5758
5759 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
5760 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
5761 pages_in_pl = 0;
5762 upl_size = (upl_size_t)upl_needed_size;
5763 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
5764 if (no_zero_fill) {
5765 upl_flags |= UPL_NOZEROFILL;
5766 }
5767 if (force_data_sync) {
5768 upl_flags |= UPL_FORCE_DATA_SYNC;
5769 }
5770
5771 kret = vm_map_create_upl(map,
5772 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
5773 &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE);
5774
5775 if (kret != KERN_SUCCESS) {
5776 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5777 (int)upl_offset, upl_size, io_size, kret, 0);
5778 /*
5779 * failed to get pagelist
5780 *
5781 * we may have already spun some portion of this request
5782 * off as async requests... we need to wait for the I/O
5783 * to complete before returning
5784 */
5785 goto wait_for_dreads;
5786 }
5787 pages_in_pl = upl_size / PAGE_SIZE;
5788 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
5789
5790 for (i = 0; i < pages_in_pl; i++) {
5791 if (!upl_page_present(pl, i)) {
5792 break;
5793 }
5794 }
5795 if (i == pages_in_pl) {
5796 break;
5797 }
5798
5799 ubc_upl_abort(upl, 0);
5800 }
5801 if (force_data_sync >= 3) {
5802 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5803 (int)upl_offset, upl_size, io_size, kret, 0);
5804
5805 goto wait_for_dreads;
5806 }
5807 /*
5808 * Consider the possibility that upl_size wasn't satisfied.
5809 */
5810 if (upl_size < upl_needed_size) {
5811 if (upl_size && upl_offset == 0) {
5812 io_size = upl_size;
5813 } else {
5814 io_size = 0;
5815 }
5816 }
5817 if (io_size == 0) {
5818 ubc_upl_abort(upl, 0);
5819 goto wait_for_dreads;
5820 }
5821 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
5822 (int)upl_offset, upl_size, io_size, kret, 0);
5823
5824 if (useVectorUPL) {
5825 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
5826 if (end_off) {
5827 issueVectorUPL = 1;
5828 }
5829 /*
5830 * After this point, if we are using a vector UPL, then
5831 * either all the UPL elements end on a page boundary OR
5832 * this UPL is the last element because it does not end
5833 * on a page boundary.
5834 */
5835 }
5836
5837 /*
5838 * request asynchronously so that we can overlap
5839 * the preparation of the next I/O
5840 * if there are already too many outstanding reads
5841 * wait until some have completed before issuing the next read
5842 */
5843 cluster_iostate_wait(&iostate, max_rd_ahead, "cluster_read_direct");
5844
5845 if (iostate.io_error) {
5846 /*
5847 * one of the earlier reads we issued ran into a hard error
5848 * don't issue any more reads, cleanup the UPL
5849 * that was just created but not used, then
5850 * go wait for any other reads to complete before
5851 * returning the error to the caller
5852 */
5853 ubc_upl_abort(upl, 0);
5854
5855 goto wait_for_dreads;
5856 }
5857 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
5858 upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
5859
5860 if (!useVectorUPL) {
5861 if (no_zero_fill) {
5862 io_flag &= ~CL_PRESERVE;
5863 } else {
5864 io_flag |= CL_PRESERVE;
5865 }
5866
5867 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5868 } else {
5869 if (!vector_upl_index) {
5870 vector_upl = vector_upl_create(upl_offset, uio->uio_iovcnt);
5871 v_upl_uio_offset = uio->uio_offset;
5872 vector_upl_offset = upl_offset;
5873 }
5874
5875 vector_upl_set_subupl(vector_upl, upl, upl_size);
5876 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
5877 vector_upl_index++;
5878 vector_upl_size += upl_size;
5879 vector_upl_iosize += io_size;
5880
5881 if (issueVectorUPL || vector_upl_index == vector_upl_max_upls(vector_upl) || vector_upl_size >= max_vector_size) {
5882 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5883 reset_vector_run_state();
5884 }
5885 }
5886
5887 if (lock) {
5888 // We don't need to wait for the I/O to complete
5889 cluster_unlock_direct_read(lock);
5890 lock = NULL;
5891 }
5892
5893 /*
5894 * update the uio structure
5895 */
5896 if ((flags & IO_ENCRYPTED) && (max_io_size < io_size)) {
5897 uio_update(uio, (user_size_t)max_io_size);
5898 } else {
5899 uio_update(uio, (user_size_t)io_size);
5900 }
5901
5902 io_req_size -= io_size;
5903
5904 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
5905 upl, (int)uio->uio_offset, io_req_size, retval, 0);
5906 } /* end while */
5907
5908 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0 && uio->uio_offset < filesize) {
5909 retval = cluster_io_type(uio, read_type, read_length, 0);
5910
5911 if (retval == 0 && *read_type == IO_DIRECT) {
5912 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
5913 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
5914
5915 goto next_dread;
5916 }
5917 }
5918
5919 wait_for_dreads:
5920
5921 if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
5922 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
5923 reset_vector_run_state();
5924 }
5925
5926 // We don't need to wait for the I/O to complete
5927 if (lock) {
5928 cluster_unlock_direct_read(lock);
5929 }
5930
5931 /*
5932 * make sure all async reads that are part of this stream
5933 * have completed before we return
5934 */
5935 cluster_iostate_wait(&iostate, 0, "cluster_read_direct");
5936
5937 if (iostate.io_error) {
5938 retval = iostate.io_error;
5939 }
5940
5941 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
5942
5943 if (io_throttled == TRUE && retval == 0) {
5944 retval = EAGAIN;
5945 }
5946
5947 vm_map_offset_t current_page_size, current_page_mask;
5948 current_page_size = vm_map_page_size(current_map());
5949 current_page_mask = vm_map_page_mask(current_map());
5950 if (uio_acct) {
5951 assert(uio_acct->uio_segflg != UIO_SYSSPACE);
5952 off_t bytes_to_prefault = 0, bytes_prefaulted = 0;
5953 user_addr_t curr_iov_base = 0;
5954 user_addr_t curr_iov_end = 0;
5955 user_size_t curr_iov_len = 0;
5956
5957 bytes_to_prefault = uio_offset(uio) - uio_offset(uio_acct);
5958
5959 for (; bytes_prefaulted < bytes_to_prefault;) {
5960 curr_iov_base = uio_curriovbase(uio_acct);
5961 curr_iov_len = MIN(uio_curriovlen(uio_acct), bytes_to_prefault - bytes_prefaulted);
5962 curr_iov_end = curr_iov_base + curr_iov_len;
5963
5964 for (; curr_iov_base < curr_iov_end;) {
5965 /*
5966 * This is specifically done for pmap accounting purposes.
5967 * vm_pre_fault() will call vm_fault() to enter the page into
5968 * the pmap if there isn't _a_ physical page for that VA already.
5969 */
5970 vm_pre_fault(vm_map_trunc_page(curr_iov_base, current_page_mask), VM_PROT_READ);
5971 curr_iov_base += current_page_size;
5972 bytes_prefaulted += current_page_size;
5973 }
5974 /*
5975 * Use update instead of advance so we can see how many iovs we processed.
5976 */
5977 uio_update(uio_acct, curr_iov_len);
5978 }
5979 uio_free(uio_acct);
5980 uio_acct = NULL;
5981 }
5982
5983 if (io_req_size && retval == 0) {
5984 /*
5985 * we couldn't handle the tail of this request in DIRECT mode
5986 * so fire it through the copy path
5987 */
5988 if (flags & IO_ENCRYPTED) {
5989 /*
5990 * We cannot fall back to the copy path for encrypted I/O. If this
5991 * happens, there is something wrong with the user buffer passed
5992 * down.
5993 */
5994 retval = EFAULT;
5995 } else {
5996 retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg);
5997 }
5998
5999 *read_type = IO_UNKNOWN;
6000 }
6001 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
6002 (int)uio->uio_offset, (int)uio_resid(uio), io_req_size, retval, 0);
6003
6004 return retval;
6005 }
6006
6007
6008 static int
cluster_read_contig(vnode_t vp,struct uio * uio,off_t filesize,int * read_type,u_int32_t * read_length,int (* callback)(buf_t,void *),void * callback_arg,int flags)6009 cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
6010 int (*callback)(buf_t, void *), void *callback_arg, int flags)
6011 {
6012 upl_page_info_t *pl;
6013 upl_t upl[MAX_VECTS];
6014 vm_offset_t upl_offset;
6015 addr64_t dst_paddr = 0;
6016 user_addr_t iov_base;
6017 off_t max_size;
6018 upl_size_t upl_size;
6019 vm_size_t upl_needed_size;
6020 mach_msg_type_number_t pages_in_pl;
6021 upl_control_flags_t upl_flags;
6022 kern_return_t kret;
6023 struct clios iostate;
6024 int error = 0;
6025 int cur_upl = 0;
6026 int num_upl = 0;
6027 int n;
6028 u_int32_t xsize;
6029 u_int32_t io_size;
6030 u_int32_t devblocksize;
6031 u_int32_t mem_alignment_mask;
6032 u_int32_t tail_size = 0;
6033 int bflag;
6034
6035 if (flags & IO_PASSIVE) {
6036 bflag = CL_PASSIVE;
6037 } else {
6038 bflag = 0;
6039 }
6040
6041 if (flags & IO_NOCACHE) {
6042 bflag |= CL_NOCACHE;
6043 }
6044
6045 /*
6046 * When we enter this routine, we know
6047 * -- the read_length will not exceed the current iov_len
6048 * -- the target address is physically contiguous for read_length
6049 */
6050 cluster_syncup(vp, filesize, callback, callback_arg, PUSH_SYNC);
6051
6052 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
6053 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
6054
6055 iostate.io_completed = 0;
6056 iostate.io_issued = 0;
6057 iostate.io_error = 0;
6058 iostate.io_wanted = 0;
6059
6060 lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL);
6061
6062 next_cread:
6063 io_size = *read_length;
6064
6065 max_size = filesize - uio->uio_offset;
6066
6067 if (io_size > max_size) {
6068 io_size = (u_int32_t)max_size;
6069 }
6070
6071 iov_base = uio_curriovbase(uio);
6072
6073 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
6074 upl_needed_size = upl_offset + io_size;
6075
6076 pages_in_pl = 0;
6077 upl_size = (upl_size_t)upl_needed_size;
6078 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
6079
6080
6081 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_START,
6082 (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
6083
6084 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
6085 kret = vm_map_get_upl(map,
6086 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
6087 vm_memtag_canonicalize(map, vm_map_trunc_page(iov_base, vm_map_page_mask(map))),
6088 #else /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
6089 vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
6090 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
6091 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0);
6092
6093 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END,
6094 (int)upl_offset, upl_size, io_size, kret, 0);
6095
6096 if (kret != KERN_SUCCESS) {
6097 /*
6098 * failed to get pagelist
6099 */
6100 error = EINVAL;
6101 goto wait_for_creads;
6102 }
6103 num_upl++;
6104
6105 if (!(upl_flags & UPL_PHYS_CONTIG)) {
6106 /*
6107 * The created UPL needs to have the UPL_PHYS_CONTIG flag.
6108 */
6109 error = EINVAL;
6110 goto wait_for_creads;
6111 }
6112
6113 if (upl_size < upl_needed_size) {
6114 /*
6115 * The upl_size wasn't satisfied.
6116 */
6117 error = EINVAL;
6118 goto wait_for_creads;
6119 }
6120 pl = ubc_upl_pageinfo(upl[cur_upl]);
6121
6122 dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
6123
6124 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
6125 u_int32_t head_size;
6126
6127 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
6128
6129 if (head_size > io_size) {
6130 head_size = io_size;
6131 }
6132
6133 error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, CL_READ, callback, callback_arg);
6134
6135 if (error) {
6136 goto wait_for_creads;
6137 }
6138
6139 upl_offset += head_size;
6140 dst_paddr += head_size;
6141 io_size -= head_size;
6142
6143 iov_base += head_size;
6144 }
6145 if ((u_int32_t)iov_base & mem_alignment_mask) {
6146 /*
6147 * request doesn't set up on a memory boundary
6148 * the underlying DMA engine can handle...
6149 * return an error instead of going through
6150 * the slow copy path since the intent of this
6151 * path is direct I/O to device memory
6152 */
6153 error = EINVAL;
6154 goto wait_for_creads;
6155 }
6156
6157 tail_size = io_size & (devblocksize - 1);
6158
6159 io_size -= tail_size;
6160
6161 while (io_size && error == 0) {
6162 if (io_size > MAX_IO_CONTIG_SIZE) {
6163 xsize = MAX_IO_CONTIG_SIZE;
6164 } else {
6165 xsize = io_size;
6166 }
6167 /*
6168 * request asynchronously so that we can overlap
6169 * the preparation of the next I/O... we'll do
6170 * the commit after all the I/O has completed
6171 * since its all issued against the same UPL
6172 * if there are already too many outstanding reads
6173 * wait until some have completed before issuing the next
6174 */
6175 cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_read_contig");
6176
6177 if (iostate.io_error) {
6178 /*
6179 * one of the earlier reads we issued ran into a hard error
6180 * don't issue any more reads...
6181 * go wait for any other reads to complete before
6182 * returning the error to the caller
6183 */
6184 goto wait_for_creads;
6185 }
6186 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize,
6187 CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC | bflag,
6188 (buf_t)NULL, &iostate, callback, callback_arg);
6189 /*
6190 * The cluster_io read was issued successfully,
6191 * update the uio structure
6192 */
6193 if (error == 0) {
6194 uio_update(uio, (user_size_t)xsize);
6195
6196 dst_paddr += xsize;
6197 upl_offset += xsize;
6198 io_size -= xsize;
6199 }
6200 }
6201 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS && uio->uio_offset < filesize) {
6202 error = cluster_io_type(uio, read_type, read_length, 0);
6203
6204 if (error == 0 && *read_type == IO_CONTIG) {
6205 cur_upl++;
6206 goto next_cread;
6207 }
6208 } else {
6209 *read_type = IO_UNKNOWN;
6210 }
6211
6212 wait_for_creads:
6213 /*
6214 * make sure all async reads that are part of this stream
6215 * have completed before we proceed
6216 */
6217 cluster_iostate_wait(&iostate, 0, "cluster_read_contig");
6218
6219 if (iostate.io_error) {
6220 error = iostate.io_error;
6221 }
6222
6223 lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp);
6224
6225 if (error == 0 && tail_size) {
6226 error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg);
6227 }
6228
6229 for (n = 0; n < num_upl; n++) {
6230 /*
6231 * just release our hold on each physically contiguous
6232 * region without changing any state
6233 */
6234 ubc_upl_abort(upl[n], 0);
6235 }
6236
6237 return error;
6238 }
6239
6240
6241 static int
cluster_io_type(struct uio * uio,int * io_type,u_int32_t * io_length,u_int32_t min_length)6242 cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length)
6243 {
6244 user_size_t iov_len;
6245 user_addr_t iov_base = 0;
6246 upl_t upl;
6247 upl_size_t upl_size;
6248 upl_control_flags_t upl_flags;
6249 int retval = 0;
6250
6251 /*
6252 * skip over any emtpy vectors
6253 */
6254 uio_update(uio, (user_size_t)0);
6255
6256 iov_len = MIN(uio_curriovlen(uio), uio_resid(uio));
6257
6258 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_START, uio, (int)iov_len, 0, 0, 0);
6259
6260 if (iov_len) {
6261 iov_base = uio_curriovbase(uio);
6262 /*
6263 * make sure the size of the vector isn't too big...
6264 * internally, we want to handle all of the I/O in
6265 * chunk sizes that fit in a 32 bit int
6266 */
6267 if (iov_len > (user_size_t)MAX_IO_REQUEST_SIZE) {
6268 upl_size = MAX_IO_REQUEST_SIZE;
6269 } else {
6270 upl_size = (u_int32_t)iov_len;
6271 }
6272
6273 upl_flags = UPL_QUERY_OBJECT_TYPE;
6274
6275 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
6276 if ((vm_map_get_upl(map,
6277 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
6278 vm_memtag_canonicalize(map, vm_map_trunc_page(iov_base, vm_map_page_mask(map))),
6279 #else /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
6280 vm_map_trunc_page(iov_base, vm_map_page_mask(map)),
6281 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
6282 &upl_size, &upl, NULL, NULL, &upl_flags, VM_KERN_MEMORY_FILE, 0)) != KERN_SUCCESS) {
6283 /*
6284 * the user app must have passed in an invalid address
6285 */
6286 retval = EFAULT;
6287 }
6288 if (upl_size == 0) {
6289 retval = EFAULT;
6290 }
6291
6292 *io_length = upl_size;
6293
6294 if (upl_flags & UPL_PHYS_CONTIG) {
6295 *io_type = IO_CONTIG;
6296 } else if (iov_len >= min_length) {
6297 *io_type = IO_DIRECT;
6298 } else {
6299 *io_type = IO_COPY;
6300 }
6301 } else {
6302 /*
6303 * nothing left to do for this uio
6304 */
6305 *io_length = 0;
6306 *io_type = IO_UNKNOWN;
6307 }
6308 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_END, iov_base, *io_type, *io_length, retval, 0);
6309
6310 if (*io_type == IO_DIRECT &&
6311 vm_map_page_shift(current_map()) < PAGE_SHIFT) {
6312 /* no direct I/O for sub-page-size address spaces */
6313 DEBUG4K_VFS("io_type IO_DIRECT -> IO_COPY\n");
6314 *io_type = IO_COPY;
6315 }
6316
6317 return retval;
6318 }
6319
6320
6321 /*
6322 * generate advisory I/O's in the largest chunks possible
6323 * the completed pages will be released into the VM cache
6324 */
6325 int
advisory_read(vnode_t vp,off_t filesize,off_t f_offset,int resid)6326 advisory_read(vnode_t vp, off_t filesize, off_t f_offset, int resid)
6327 {
6328 return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE);
6329 }
6330
6331 int
advisory_read_ext(vnode_t vp,off_t filesize,off_t f_offset,int resid,int (* callback)(buf_t,void *),void * callback_arg,int bflag)6332 advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
6333 {
6334 upl_page_info_t *pl;
6335 upl_t upl = NULL;
6336 vm_offset_t upl_offset;
6337 int upl_size;
6338 off_t upl_f_offset;
6339 int start_offset;
6340 int start_pg;
6341 int last_pg;
6342 int pages_in_upl;
6343 off_t max_size;
6344 int io_size;
6345 kern_return_t kret;
6346 int retval = 0;
6347 int issued_io;
6348 int skip_range;
6349 uint32_t max_io_size;
6350
6351
6352 if (!UBCINFOEXISTS(vp)) {
6353 return EINVAL;
6354 }
6355
6356 if (f_offset < 0 || resid < 0) {
6357 return EINVAL;
6358 }
6359
6360 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
6361
6362 if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
6363 if (max_io_size > speculative_prefetch_max_iosize) {
6364 max_io_size = speculative_prefetch_max_iosize;
6365 }
6366 }
6367
6368 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
6369 (int)f_offset, resid, (int)filesize, 0, 0);
6370
6371 while (resid && f_offset < filesize && retval == 0) {
6372 /*
6373 * compute the size of the upl needed to encompass
6374 * the requested read... limit each call to cluster_io
6375 * to the maximum UPL size... cluster_io will clip if
6376 * this exceeds the maximum io_size for the device,
6377 * make sure to account for
6378 * a starting offset that's not page aligned
6379 */
6380 start_offset = (int)(f_offset & PAGE_MASK_64);
6381 upl_f_offset = f_offset - (off_t)start_offset;
6382 max_size = filesize - f_offset;
6383
6384 if (resid < max_size) {
6385 io_size = resid;
6386 } else {
6387 io_size = (int)max_size;
6388 }
6389
6390 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
6391 if ((uint32_t)upl_size > max_io_size) {
6392 upl_size = max_io_size;
6393 }
6394
6395 skip_range = 0;
6396 /*
6397 * return the number of contiguously present pages in the cache
6398 * starting at upl_f_offset within the file
6399 */
6400 ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range);
6401
6402 if (skip_range) {
6403 /*
6404 * skip over pages already present in the cache
6405 */
6406 io_size = skip_range - start_offset;
6407
6408 f_offset += io_size;
6409 resid -= io_size;
6410
6411 if (skip_range == upl_size) {
6412 continue;
6413 }
6414 /*
6415 * have to issue some real I/O
6416 * at this point, we know it's starting on a page boundary
6417 * because we've skipped over at least the first page in the request
6418 */
6419 start_offset = 0;
6420 upl_f_offset += skip_range;
6421 upl_size -= skip_range;
6422 }
6423 pages_in_upl = upl_size / PAGE_SIZE;
6424
6425 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START,
6426 upl, (int)upl_f_offset, upl_size, start_offset, 0);
6427
6428 kret = ubc_create_upl_kernel(vp,
6429 upl_f_offset,
6430 upl_size,
6431 &upl,
6432 &pl,
6433 UPL_RET_ONLY_ABSENT | UPL_SET_LITE,
6434 VM_KERN_MEMORY_FILE);
6435 if (kret != KERN_SUCCESS) {
6436 return retval;
6437 }
6438 issued_io = 0;
6439
6440 /*
6441 * before we start marching forward, we must make sure we end on
6442 * a present page, otherwise we will be working with a freed
6443 * upl
6444 */
6445 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
6446 if (upl_page_present(pl, last_pg)) {
6447 break;
6448 }
6449 }
6450 pages_in_upl = last_pg + 1;
6451
6452
6453 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END,
6454 upl, (int)upl_f_offset, upl_size, start_offset, 0);
6455
6456
6457 for (last_pg = 0; last_pg < pages_in_upl;) {
6458 /*
6459 * scan from the beginning of the upl looking for the first
6460 * page that is present.... this will become the first page in
6461 * the request we're going to make to 'cluster_io'... if all
6462 * of the pages are absent, we won't call through to 'cluster_io'
6463 */
6464 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
6465 if (upl_page_present(pl, start_pg)) {
6466 break;
6467 }
6468 }
6469
6470 /*
6471 * scan from the starting present page looking for an absent
6472 * page before the end of the upl is reached, if we
6473 * find one, then it will terminate the range of pages being
6474 * presented to 'cluster_io'
6475 */
6476 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
6477 if (!upl_page_present(pl, last_pg)) {
6478 break;
6479 }
6480 }
6481
6482 if (last_pg > start_pg) {
6483 /*
6484 * we found a range of pages that must be filled
6485 * if the last page in this range is the last page of the file
6486 * we may have to clip the size of it to keep from reading past
6487 * the end of the last physical block associated with the file
6488 */
6489 upl_offset = start_pg * PAGE_SIZE;
6490 io_size = (last_pg - start_pg) * PAGE_SIZE;
6491
6492 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) {
6493 io_size = (int)(filesize - (upl_f_offset + upl_offset));
6494 }
6495
6496 /*
6497 * issue an asynchronous read to cluster_io
6498 */
6499 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
6500 CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
6501
6502 issued_io = 1;
6503 }
6504 }
6505 if (issued_io == 0) {
6506 ubc_upl_abort(upl, 0);
6507 }
6508
6509 io_size = upl_size - start_offset;
6510
6511 if (io_size > resid) {
6512 io_size = resid;
6513 }
6514 f_offset += io_size;
6515 resid -= io_size;
6516 }
6517
6518 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END,
6519 (int)f_offset, resid, retval, 0, 0);
6520
6521 return retval;
6522 }
6523
6524
6525 int
cluster_push(vnode_t vp,int flags)6526 cluster_push(vnode_t vp, int flags)
6527 {
6528 return cluster_push_ext(vp, flags, NULL, NULL);
6529 }
6530
6531
6532 int
cluster_push_ext(vnode_t vp,int flags,int (* callback)(buf_t,void *),void * callback_arg)6533 cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg)
6534 {
6535 return cluster_push_err(vp, flags, callback, callback_arg, NULL);
6536 }
6537
6538 /* write errors via err, but return the number of clusters written */
6539 extern uint32_t system_inshutdown;
6540 uint32_t cl_sparse_push_error = 0;
6541 int
cluster_push_err(vnode_t vp,int flags,int (* callback)(buf_t,void *),void * callback_arg,int * err)6542 cluster_push_err(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg, int *err)
6543 {
6544 int retval;
6545 int my_sparse_wait = 0;
6546 struct cl_writebehind *wbp;
6547 int local_err = 0;
6548
6549 if (err) {
6550 *err = 0;
6551 }
6552
6553 if (!UBCINFOEXISTS(vp)) {
6554 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -1, 0);
6555 return 0;
6556 }
6557 /* return if deferred write is set */
6558 if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) {
6559 return 0;
6560 }
6561 if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) {
6562 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -2, 0);
6563 return 0;
6564 }
6565 if (!ISSET(flags, IO_SYNC) && wbp->cl_number == 0 && wbp->cl_scmap == NULL) {
6566 lck_mtx_unlock(&wbp->cl_lockw);
6567
6568 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -3, 0);
6569 return 0;
6570 }
6571 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START,
6572 wbp->cl_scmap, wbp->cl_number, flags, 0, 0);
6573
6574 /*
6575 * if we have an fsync in progress, we don't want to allow any additional
6576 * sync/fsync/close(s) to occur until it finishes.
6577 * note that its possible for writes to continue to occur to this file
6578 * while we're waiting and also once the fsync starts to clean if we're
6579 * in the sparse map case
6580 */
6581 while (wbp->cl_sparse_wait) {
6582 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, kdebug_vnode(vp), 0, 0, 0, 0);
6583
6584 msleep((caddr_t)&wbp->cl_sparse_wait, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
6585
6586 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, kdebug_vnode(vp), 0, 0, 0, 0);
6587 }
6588 if (flags & IO_SYNC) {
6589 my_sparse_wait = 1;
6590 wbp->cl_sparse_wait = 1;
6591
6592 /*
6593 * this is an fsync (or equivalent)... we must wait for any existing async
6594 * cleaning operations to complete before we evaulate the current state
6595 * and finish cleaning... this insures that all writes issued before this
6596 * fsync actually get cleaned to the disk before this fsync returns
6597 */
6598 while (wbp->cl_sparse_pushes) {
6599 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_START, kdebug_vnode(vp), 0, 0, 0, 0);
6600
6601 msleep((caddr_t)&wbp->cl_sparse_pushes, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
6602
6603 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_END, kdebug_vnode(vp), 0, 0, 0, 0);
6604 }
6605 }
6606 if (wbp->cl_scmap) {
6607 void *scmap;
6608
6609 if (wbp->cl_sparse_pushes < SPARSE_PUSH_LIMIT) {
6610 scmap = wbp->cl_scmap;
6611 wbp->cl_scmap = NULL;
6612
6613 wbp->cl_sparse_pushes++;
6614
6615 lck_mtx_unlock(&wbp->cl_lockw);
6616
6617 retval = sparse_cluster_push(wbp, &scmap, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE);
6618
6619 lck_mtx_lock(&wbp->cl_lockw);
6620
6621 wbp->cl_sparse_pushes--;
6622
6623 if (retval) {
6624 if (wbp->cl_scmap != NULL) {
6625 /*
6626 * panic("cluster_push_err: Expected NULL cl_scmap\n");
6627 *
6628 * This can happen if we get an error from the underlying FS
6629 * e.g. ENOSPC, EPERM or EIO etc. We hope that these errors
6630 * are transient and the I/Os will succeed at a later point.
6631 *
6632 * The tricky part here is that a new sparse cluster has been
6633 * allocated and tracking a different set of dirty pages. So these
6634 * pages are not going to be pushed out with the next sparse_cluster_push.
6635 * An explicit msync or file close will, however, push the pages out.
6636 *
6637 * What if those calls still don't work? And so, during shutdown we keep
6638 * trying till we succeed...
6639 */
6640
6641 if (system_inshutdown) {
6642 if ((retval == ENOSPC) && (vp->v_mount->mnt_flag & (MNT_LOCAL | MNT_REMOVABLE)) == MNT_LOCAL) {
6643 os_atomic_inc(&cl_sparse_push_error, relaxed);
6644 }
6645 } else {
6646 vfs_drt_control(&scmap, 0); /* emit stats and free this memory. Dirty pages stay intact. */
6647 scmap = NULL;
6648 }
6649 } else {
6650 wbp->cl_scmap = scmap;
6651 }
6652 }
6653
6654 if (wbp->cl_sparse_wait && wbp->cl_sparse_pushes == 0) {
6655 wakeup((caddr_t)&wbp->cl_sparse_pushes);
6656 }
6657 } else {
6658 retval = sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE);
6659 }
6660
6661 local_err = retval;
6662
6663 if (err) {
6664 *err = retval;
6665 }
6666 retval = 1;
6667 } else {
6668 retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, &local_err, FALSE);
6669 if (err) {
6670 *err = local_err;
6671 }
6672 }
6673 lck_mtx_unlock(&wbp->cl_lockw);
6674
6675 if (flags & IO_SYNC) {
6676 (void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push");
6677 }
6678
6679 if (my_sparse_wait) {
6680 /*
6681 * I'm the owner of the serialization token
6682 * clear it and wakeup anyone that is waiting
6683 * for me to finish
6684 */
6685 lck_mtx_lock(&wbp->cl_lockw);
6686
6687 wbp->cl_sparse_wait = 0;
6688 wakeup((caddr_t)&wbp->cl_sparse_wait);
6689
6690 lck_mtx_unlock(&wbp->cl_lockw);
6691 }
6692 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
6693 wbp->cl_scmap, wbp->cl_number, retval, local_err, 0);
6694
6695 return retval;
6696 }
6697
6698
6699 __private_extern__ void
cluster_release(struct ubc_info * ubc)6700 cluster_release(struct ubc_info *ubc)
6701 {
6702 struct cl_writebehind *wbp;
6703 struct cl_readahead *rap;
6704
6705 if ((wbp = ubc->cl_wbehind)) {
6706 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, wbp->cl_scmap, 0, 0, 0);
6707
6708 if (wbp->cl_scmap) {
6709 vfs_drt_control(&(wbp->cl_scmap), 0);
6710 }
6711 lck_mtx_destroy(&wbp->cl_lockw, &cl_mtx_grp);
6712 zfree(cl_wr_zone, wbp);
6713 ubc->cl_wbehind = NULL;
6714 } else {
6715 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, 0, 0, 0, 0);
6716 }
6717
6718 if ((rap = ubc->cl_rahead)) {
6719 lck_mtx_destroy(&rap->cl_lockr, &cl_mtx_grp);
6720 zfree(cl_rd_zone, rap);
6721 ubc->cl_rahead = NULL;
6722 }
6723
6724 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, ubc, rap, wbp, 0, 0);
6725 }
6726
6727
6728 static int
cluster_try_push(struct cl_writebehind * wbp,vnode_t vp,off_t EOF,int push_flag,int io_flags,int (* callback)(buf_t,void *),void * callback_arg,int * err,boolean_t vm_initiated)6729 cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg, int *err, boolean_t vm_initiated)
6730 {
6731 int cl_index;
6732 int cl_index1;
6733 int min_index;
6734 int cl_len;
6735 int cl_pushed = 0;
6736 struct cl_wextent l_clusters[MAX_CLUSTERS];
6737 u_int max_cluster_pgcount;
6738 int error = 0;
6739
6740 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
6741 /*
6742 * the write behind context exists and has
6743 * already been locked...
6744 */
6745 if (wbp->cl_number == 0) {
6746 /*
6747 * no clusters to push
6748 * return number of empty slots
6749 */
6750 return MAX_CLUSTERS;
6751 }
6752
6753 /*
6754 * make a local 'sorted' copy of the clusters
6755 * and clear wbp->cl_number so that new clusters can
6756 * be developed
6757 */
6758 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
6759 for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) {
6760 if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr) {
6761 continue;
6762 }
6763 if (min_index == -1) {
6764 min_index = cl_index1;
6765 } else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr) {
6766 min_index = cl_index1;
6767 }
6768 }
6769 if (min_index == -1) {
6770 break;
6771 }
6772
6773 l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr;
6774 l_clusters[cl_index].e_addr = wbp->cl_clusters[min_index].e_addr;
6775 l_clusters[cl_index].io_flags = wbp->cl_clusters[min_index].io_flags;
6776
6777 wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr;
6778 }
6779 wbp->cl_number = 0;
6780
6781 cl_len = cl_index;
6782
6783 /* skip switching to the sparse cluster mechanism if on diskimage */
6784 if (((push_flag & PUSH_DELAY) && cl_len == MAX_CLUSTERS) &&
6785 !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV)) {
6786 int i;
6787
6788 /*
6789 * determine if we appear to be writing the file sequentially
6790 * if not, by returning without having pushed any clusters
6791 * we will cause this vnode to be pushed into the sparse cluster mechanism
6792 * used for managing more random I/O patterns
6793 *
6794 * we know that we've got all clusters currently in use and the next write doesn't fit into one of them...
6795 * that's why we're in try_push with PUSH_DELAY...
6796 *
6797 * check to make sure that all the clusters except the last one are 'full'... and that each cluster
6798 * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above
6799 * so we can just make a simple pass through, up to, but not including the last one...
6800 * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they
6801 * are sequential
6802 *
6803 * we let the last one be partial as long as it was adjacent to the previous one...
6804 * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out
6805 * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
6806 */
6807 for (i = 0; i < MAX_CLUSTERS - 1; i++) {
6808 if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != max_cluster_pgcount) {
6809 goto dont_try;
6810 }
6811 if (l_clusters[i].e_addr != l_clusters[i + 1].b_addr) {
6812 goto dont_try;
6813 }
6814 }
6815 }
6816 if (vm_initiated == TRUE) {
6817 lck_mtx_unlock(&wbp->cl_lockw);
6818 }
6819
6820 for (cl_index = 0; cl_index < cl_len; cl_index++) {
6821 int flags;
6822 struct cl_extent cl;
6823 int retval;
6824
6825 flags = io_flags & (IO_PASSIVE | IO_CLOSE);
6826
6827 /*
6828 * try to push each cluster in turn...
6829 */
6830 if (l_clusters[cl_index].io_flags & CLW_IONOCACHE) {
6831 flags |= IO_NOCACHE;
6832 }
6833
6834 if (l_clusters[cl_index].io_flags & CLW_IOPASSIVE) {
6835 flags |= IO_PASSIVE;
6836 }
6837
6838 if (push_flag & PUSH_SYNC) {
6839 flags |= IO_SYNC;
6840 }
6841
6842 cl.b_addr = l_clusters[cl_index].b_addr;
6843 cl.e_addr = l_clusters[cl_index].e_addr;
6844
6845 retval = cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg, vm_initiated);
6846
6847 if (retval == 0) {
6848 cl_pushed++;
6849
6850 l_clusters[cl_index].b_addr = 0;
6851 l_clusters[cl_index].e_addr = 0;
6852 } else if (error == 0) {
6853 error = retval;
6854 }
6855
6856 if (!(push_flag & PUSH_ALL)) {
6857 break;
6858 }
6859 }
6860 if (vm_initiated == TRUE) {
6861 lck_mtx_lock(&wbp->cl_lockw);
6862 }
6863
6864 if (err) {
6865 *err = error;
6866 }
6867
6868 dont_try:
6869 if (cl_len > cl_pushed) {
6870 /*
6871 * we didn't push all of the clusters, so
6872 * lets try to merge them back in to the vnode
6873 */
6874 if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) {
6875 /*
6876 * we picked up some new clusters while we were trying to
6877 * push the old ones... this can happen because I've dropped
6878 * the vnode lock... the sum of the
6879 * leftovers plus the new cluster count exceeds our ability
6880 * to represent them, so switch to the sparse cluster mechanism
6881 *
6882 * collect the active public clusters...
6883 */
6884 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated);
6885
6886 for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) {
6887 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) {
6888 continue;
6889 }
6890 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
6891 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
6892 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
6893
6894 cl_index1++;
6895 }
6896 /*
6897 * update the cluster count
6898 */
6899 wbp->cl_number = cl_index1;
6900
6901 /*
6902 * and collect the original clusters that were moved into the
6903 * local storage for sorting purposes
6904 */
6905 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated);
6906 } else {
6907 /*
6908 * we've got room to merge the leftovers back in
6909 * just append them starting at the next 'hole'
6910 * represented by wbp->cl_number
6911 */
6912 for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) {
6913 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) {
6914 continue;
6915 }
6916
6917 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
6918 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
6919 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
6920
6921 cl_index1++;
6922 }
6923 /*
6924 * update the cluster count
6925 */
6926 wbp->cl_number = cl_index1;
6927 }
6928 }
6929 return MAX_CLUSTERS - wbp->cl_number;
6930 }
6931
6932
6933
6934 static int
cluster_push_now(vnode_t vp,struct cl_extent * cl,off_t EOF,int flags,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)6935 cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags,
6936 int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
6937 {
6938 upl_page_info_t *pl;
6939 upl_t upl;
6940 vm_offset_t upl_offset;
6941 int upl_size;
6942 off_t upl_f_offset;
6943 int pages_in_upl;
6944 int start_pg;
6945 int last_pg;
6946 int io_size;
6947 int io_flags;
6948 int upl_flags;
6949 int bflag;
6950 int size;
6951 int error = 0;
6952 int retval;
6953 kern_return_t kret;
6954
6955 if (flags & IO_PASSIVE) {
6956 bflag = CL_PASSIVE;
6957 } else {
6958 bflag = 0;
6959 }
6960
6961 if (flags & IO_SKIP_ENCRYPTION) {
6962 bflag |= CL_ENCRYPTED;
6963 }
6964
6965 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START,
6966 (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0);
6967
6968 if ((pages_in_upl = (int)(cl->e_addr - cl->b_addr)) == 0) {
6969 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0);
6970
6971 return 0;
6972 }
6973 upl_size = pages_in_upl * PAGE_SIZE;
6974 upl_f_offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
6975
6976 if (upl_f_offset + upl_size >= EOF) {
6977 if (upl_f_offset >= EOF) {
6978 /*
6979 * must have truncated the file and missed
6980 * clearing a dangling cluster (i.e. it's completely
6981 * beyond the new EOF
6982 */
6983 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0);
6984
6985 return 0;
6986 }
6987 size = (int)(EOF - upl_f_offset);
6988
6989 upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
6990 pages_in_upl = upl_size / PAGE_SIZE;
6991 } else {
6992 size = upl_size;
6993 }
6994
6995
6996 if (vm_initiated) {
6997 vnode_pageout(vp, NULL, (upl_offset_t)0, upl_f_offset, (upl_size_t)upl_size,
6998 UPL_MSYNC | UPL_VNODE_PAGER | UPL_KEEPCACHED, &error);
6999
7000 return error;
7001 }
7002 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0);
7003
7004 /*
7005 * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior
7006 *
7007 * - only pages that are currently dirty are returned... these are the ones we need to clean
7008 * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
7009 * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
7010 * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if
7011 * someone dirties this page while the I/O is in progress, we don't lose track of the new state
7012 *
7013 * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard)
7014 */
7015
7016 if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE)) {
7017 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED;
7018 } else {
7019 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE;
7020 }
7021
7022 kret = ubc_create_upl_kernel(vp,
7023 upl_f_offset,
7024 upl_size,
7025 &upl,
7026 &pl,
7027 upl_flags,
7028 VM_KERN_MEMORY_FILE);
7029 if (kret != KERN_SUCCESS) {
7030 panic("cluster_push: failed to get pagelist");
7031 }
7032
7033 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, upl, upl_f_offset, 0, 0, 0);
7034
7035 /*
7036 * since we only asked for the dirty pages back
7037 * it's possible that we may only get a few or even none, so...
7038 * before we start marching forward, we must make sure we know
7039 * where the last present page is in the UPL, otherwise we could
7040 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
7041 * employed by commit_range and abort_range.
7042 */
7043 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
7044 if (upl_page_present(pl, last_pg)) {
7045 break;
7046 }
7047 }
7048 pages_in_upl = last_pg + 1;
7049
7050 if (pages_in_upl == 0) {
7051 ubc_upl_abort(upl, 0);
7052
7053 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0);
7054 return 0;
7055 }
7056
7057 for (last_pg = 0; last_pg < pages_in_upl;) {
7058 /*
7059 * find the next dirty page in the UPL
7060 * this will become the first page in the
7061 * next I/O to generate
7062 */
7063 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
7064 if (upl_dirty_page(pl, start_pg)) {
7065 break;
7066 }
7067 if (upl_page_present(pl, start_pg)) {
7068 /*
7069 * RET_ONLY_DIRTY will return non-dirty 'precious' pages
7070 * just release these unchanged since we're not going
7071 * to steal them or change their state
7072 */
7073 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
7074 }
7075 }
7076 if (start_pg >= pages_in_upl) {
7077 /*
7078 * done... no more dirty pages to push
7079 */
7080 break;
7081 }
7082 if (start_pg > last_pg) {
7083 /*
7084 * skipped over some non-dirty pages
7085 */
7086 size -= ((start_pg - last_pg) * PAGE_SIZE);
7087 }
7088
7089 /*
7090 * find a range of dirty pages to write
7091 */
7092 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
7093 if (!upl_dirty_page(pl, last_pg)) {
7094 break;
7095 }
7096 }
7097 upl_offset = start_pg * PAGE_SIZE;
7098
7099 io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
7100
7101 io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | bflag;
7102
7103 if (!(flags & IO_SYNC)) {
7104 io_flags |= CL_ASYNC;
7105 }
7106
7107 if (flags & IO_CLOSE) {
7108 io_flags |= CL_CLOSE;
7109 }
7110
7111 if (flags & IO_NOCACHE) {
7112 io_flags |= CL_NOCACHE;
7113 }
7114
7115 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
7116 io_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
7117
7118 if (error == 0 && retval) {
7119 error = retval;
7120 }
7121
7122 size -= io_size;
7123 }
7124 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, error, 0, 0);
7125
7126 return error;
7127 }
7128
7129
7130 /*
7131 * sparse_cluster_switch is called with the write behind lock held
7132 */
7133 static int
sparse_cluster_switch(struct cl_writebehind * wbp,vnode_t vp,off_t EOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)7134 sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
7135 {
7136 int cl_index;
7137 int error = 0;
7138
7139 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, 0, 0);
7140
7141 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
7142 int flags;
7143 struct cl_extent cl;
7144
7145 for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) {
7146 if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) {
7147 if (flags & UPL_POP_DIRTY) {
7148 cl.e_addr = cl.b_addr + 1;
7149
7150 error = sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg, vm_initiated);
7151
7152 if (error) {
7153 break;
7154 }
7155 }
7156 }
7157 }
7158 }
7159 wbp->cl_number -= cl_index;
7160
7161 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, error, 0);
7162
7163 return error;
7164 }
7165
7166
7167 /*
7168 * sparse_cluster_push must be called with the write-behind lock held if the scmap is
7169 * still associated with the write-behind context... however, if the scmap has been disassociated
7170 * from the write-behind context (the cluster_push case), the wb lock is not held
7171 */
7172 static int
sparse_cluster_push(struct cl_writebehind * wbp,void ** scmap,vnode_t vp,off_t EOF,int push_flag,int io_flags,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)7173 sparse_cluster_push(struct cl_writebehind *wbp, void **scmap, vnode_t vp, off_t EOF, int push_flag,
7174 int io_flags, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
7175 {
7176 struct cl_extent cl;
7177 off_t offset;
7178 u_int length;
7179 void *l_scmap;
7180 int error = 0;
7181
7182 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, kdebug_vnode(vp), (*scmap), 0, push_flag, 0);
7183
7184 if (push_flag & PUSH_ALL) {
7185 vfs_drt_control(scmap, 1);
7186 }
7187
7188 l_scmap = *scmap;
7189
7190 for (;;) {
7191 int retval;
7192
7193 if (vfs_drt_get_cluster(scmap, &offset, &length) != KERN_SUCCESS) {
7194 /*
7195 * Not finding anything to push will return KERN_FAILURE.
7196 * Confusing since it isn't really a failure. But that's the
7197 * reason we don't set 'error' here like we do below.
7198 */
7199 break;
7200 }
7201
7202 if (vm_initiated == TRUE) {
7203 lck_mtx_unlock(&wbp->cl_lockw);
7204 }
7205
7206 cl.b_addr = (daddr64_t)(offset / PAGE_SIZE_64);
7207 cl.e_addr = (daddr64_t)((offset + length) / PAGE_SIZE_64);
7208
7209 retval = cluster_push_now(vp, &cl, EOF, io_flags, callback, callback_arg, vm_initiated);
7210 if (error == 0 && retval) {
7211 error = retval;
7212 }
7213
7214 if (vm_initiated == TRUE) {
7215 lck_mtx_lock(&wbp->cl_lockw);
7216
7217 if (*scmap != l_scmap) {
7218 break;
7219 }
7220 }
7221
7222 if (error) {
7223 if (vfs_drt_mark_pages(scmap, offset, length, NULL) != KERN_SUCCESS) {
7224 panic("Failed to restore dirty state on failure");
7225 }
7226
7227 break;
7228 }
7229
7230 if (!(push_flag & PUSH_ALL)) {
7231 break;
7232 }
7233 }
7234 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0);
7235
7236 return error;
7237 }
7238
7239
7240 /*
7241 * sparse_cluster_add is called with the write behind lock held
7242 */
7243 static int
sparse_cluster_add(struct cl_writebehind * wbp,void ** scmap,vnode_t vp,struct cl_extent * cl,off_t EOF,int (* callback)(buf_t,void *),void * callback_arg,boolean_t vm_initiated)7244 sparse_cluster_add(struct cl_writebehind *wbp, void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF,
7245 int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated)
7246 {
7247 u_int new_dirty;
7248 u_int length;
7249 off_t offset;
7250 int error = 0;
7251 int push_flag = 0; /* Is this a valid value? */
7252
7253 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (*scmap), 0, cl->b_addr, (int)cl->e_addr, 0);
7254
7255 offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
7256 length = ((u_int)(cl->e_addr - cl->b_addr)) * PAGE_SIZE;
7257
7258 while (vfs_drt_mark_pages(scmap, offset, length, &new_dirty) != KERN_SUCCESS) {
7259 /*
7260 * no room left in the map
7261 * only a partial update was done
7262 * push out some pages and try again
7263 */
7264
7265 if (vfs_get_scmap_push_behavior_internal(scmap, &push_flag)) {
7266 push_flag = 0;
7267 }
7268
7269 error = sparse_cluster_push(wbp, scmap, vp, EOF, push_flag, 0, callback, callback_arg, vm_initiated);
7270
7271 if (error) {
7272 break;
7273 }
7274
7275 offset += (new_dirty * PAGE_SIZE_64);
7276 length -= (new_dirty * PAGE_SIZE);
7277 }
7278 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0);
7279
7280 return error;
7281 }
7282
7283
7284 static int
cluster_align_phys_io(vnode_t vp,struct uio * uio,addr64_t usr_paddr,u_int32_t xsize,int flags,int (* callback)(buf_t,void *),void * callback_arg)7285 cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
7286 {
7287 upl_page_info_t *pl;
7288 upl_t upl;
7289 addr64_t ubc_paddr;
7290 kern_return_t kret;
7291 int error = 0;
7292 int did_read = 0;
7293 int abort_flags;
7294 int upl_flags;
7295 int bflag;
7296
7297 if (flags & IO_PASSIVE) {
7298 bflag = CL_PASSIVE;
7299 } else {
7300 bflag = 0;
7301 }
7302
7303 if (flags & IO_NOCACHE) {
7304 bflag |= CL_NOCACHE;
7305 }
7306
7307 upl_flags = UPL_SET_LITE;
7308
7309 if (!(flags & CL_READ)) {
7310 /*
7311 * "write" operation: let the UPL subsystem know
7312 * that we intend to modify the buffer cache pages
7313 * we're gathering.
7314 */
7315 upl_flags |= UPL_WILL_MODIFY;
7316 } else {
7317 /*
7318 * indicate that there is no need to pull the
7319 * mapping for this page... we're only going
7320 * to read from it, not modify it.
7321 */
7322 upl_flags |= UPL_FILE_IO;
7323 }
7324 kret = ubc_create_upl_kernel(vp,
7325 uio->uio_offset & ~PAGE_MASK_64,
7326 PAGE_SIZE,
7327 &upl,
7328 &pl,
7329 upl_flags,
7330 VM_KERN_MEMORY_FILE);
7331
7332 if (kret != KERN_SUCCESS) {
7333 return EINVAL;
7334 }
7335
7336 if (!upl_valid_page(pl, 0)) {
7337 /*
7338 * issue a synchronous read to cluster_io
7339 */
7340 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
7341 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
7342 if (error) {
7343 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
7344
7345 return error;
7346 }
7347 did_read = 1;
7348 }
7349 ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)(uio->uio_offset & PAGE_MASK_64);
7350
7351 /*
7352 * NOTE: There is no prototype for the following in BSD. It, and the definitions
7353 * of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
7354 * osfmk/ppc/mappings.h. They are not included here because there appears to be no
7355 * way to do so without exporting them to kexts as well.
7356 */
7357 if (flags & CL_READ) {
7358 // copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk); /* Copy physical to physical and flush the destination */
7359 copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 4); /* Copy physical to physical and flush the destination */
7360 } else {
7361 // copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc); /* Copy physical to physical and flush the source */
7362 copypv(usr_paddr, ubc_paddr, xsize, 2 | 1 | 8); /* Copy physical to physical and flush the source */
7363 }
7364 if (!(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) {
7365 /*
7366 * issue a synchronous write to cluster_io
7367 */
7368 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
7369 bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
7370 }
7371 if (error == 0) {
7372 uio_update(uio, (user_size_t)xsize);
7373 }
7374
7375 if (did_read) {
7376 abort_flags = UPL_ABORT_FREE_ON_EMPTY;
7377 } else {
7378 abort_flags = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
7379 }
7380
7381 ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags);
7382
7383 return error;
7384 }
7385
7386 int
cluster_copy_upl_data(struct uio * uio,upl_t upl,int upl_offset,int * io_resid)7387 cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid)
7388 {
7389 int pg_offset;
7390 int pg_index;
7391 int csize;
7392 int segflg;
7393 int retval = 0;
7394 int xsize;
7395 upl_page_info_t *pl;
7396 int dirty_count;
7397
7398 xsize = *io_resid;
7399
7400 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
7401 (int)uio->uio_offset, upl_offset, xsize, 0, 0);
7402
7403 segflg = uio->uio_segflg;
7404
7405 switch (segflg) {
7406 case UIO_USERSPACE32:
7407 case UIO_USERISPACE32:
7408 uio->uio_segflg = UIO_PHYS_USERSPACE32;
7409 break;
7410
7411 case UIO_USERSPACE:
7412 case UIO_USERISPACE:
7413 uio->uio_segflg = UIO_PHYS_USERSPACE;
7414 break;
7415
7416 case UIO_USERSPACE64:
7417 case UIO_USERISPACE64:
7418 uio->uio_segflg = UIO_PHYS_USERSPACE64;
7419 break;
7420
7421 case UIO_SYSSPACE:
7422 uio->uio_segflg = UIO_PHYS_SYSSPACE;
7423 break;
7424 }
7425 pl = ubc_upl_pageinfo(upl);
7426
7427 pg_index = upl_offset / PAGE_SIZE;
7428 pg_offset = upl_offset & PAGE_MASK;
7429 csize = min(PAGE_SIZE - pg_offset, xsize);
7430
7431 dirty_count = 0;
7432 while (xsize && retval == 0) {
7433 addr64_t paddr;
7434 ppnum_t pn = upl_phys_page(pl, pg_index);
7435
7436 paddr = ((addr64_t)pn << PAGE_SHIFT) + pg_offset;
7437 if ((uio->uio_rw == UIO_WRITE) && (upl_dirty_page(pl, pg_index) == FALSE)) {
7438 dirty_count++;
7439 }
7440
7441 /* such phyiscal pages should never be restricted pages */
7442 if (pmap_is_page_restricted(pn)) {
7443 panic("%s: cannot uiomove64 into a restricted page", __func__);
7444 }
7445
7446 retval = uiomove64(paddr, csize, uio);
7447
7448 pg_index += 1;
7449 pg_offset = 0;
7450 xsize -= csize;
7451 csize = min(PAGE_SIZE, xsize);
7452 }
7453 *io_resid = xsize;
7454
7455 uio->uio_segflg = segflg;
7456
7457 if (dirty_count) {
7458 task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, upl_lookup_vnode(upl));
7459 }
7460
7461 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7462 (int)uio->uio_offset, xsize, retval, segflg, 0);
7463
7464 return retval;
7465 }
7466
7467
7468 int
cluster_copy_ubc_data(vnode_t vp,struct uio * uio,int * io_resid,int mark_dirty)7469 cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty)
7470 {
7471 return cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1);
7472 }
7473
7474
7475 static int
cluster_copy_ubc_data_internal(vnode_t vp,struct uio * uio,int * io_resid,int mark_dirty,int take_reference)7476 cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference)
7477 {
7478 int segflg;
7479 int io_size;
7480 int xsize;
7481 int start_offset;
7482 int retval = 0;
7483 memory_object_control_t control;
7484
7485 io_size = *io_resid;
7486
7487 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
7488 (int)uio->uio_offset, io_size, mark_dirty, take_reference, 0);
7489
7490 control = ubc_getobject(vp, UBC_FLAGS_NONE);
7491
7492 if (control == MEMORY_OBJECT_CONTROL_NULL) {
7493 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7494 (int)uio->uio_offset, io_size, retval, 3, 0);
7495
7496 return 0;
7497 }
7498 segflg = uio->uio_segflg;
7499
7500 switch (segflg) {
7501 case UIO_USERSPACE32:
7502 case UIO_USERISPACE32:
7503 uio->uio_segflg = UIO_PHYS_USERSPACE32;
7504 break;
7505
7506 case UIO_USERSPACE64:
7507 case UIO_USERISPACE64:
7508 uio->uio_segflg = UIO_PHYS_USERSPACE64;
7509 break;
7510
7511 case UIO_USERSPACE:
7512 case UIO_USERISPACE:
7513 uio->uio_segflg = UIO_PHYS_USERSPACE;
7514 break;
7515
7516 case UIO_SYSSPACE:
7517 uio->uio_segflg = UIO_PHYS_SYSSPACE;
7518 break;
7519 }
7520
7521 if ((io_size = *io_resid)) {
7522 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
7523 xsize = (int)uio_resid(uio);
7524
7525 retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio,
7526 start_offset, io_size, mark_dirty, take_reference);
7527 xsize -= uio_resid(uio);
7528
7529 int num_bytes_copied = xsize;
7530 if (num_bytes_copied && uio_rw(uio)) {
7531 task_update_logical_writes(current_task(), num_bytes_copied, TASK_WRITE_DEFERRED, vp);
7532 }
7533 io_size -= xsize;
7534 }
7535 uio->uio_segflg = segflg;
7536 *io_resid = io_size;
7537
7538 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
7539 (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);
7540
7541 return retval;
7542 }
7543
7544
7545 int
is_file_clean(vnode_t vp,off_t filesize)7546 is_file_clean(vnode_t vp, off_t filesize)
7547 {
7548 off_t f_offset;
7549 int flags;
7550 int total_dirty = 0;
7551
7552 for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) {
7553 if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {
7554 if (flags & UPL_POP_DIRTY) {
7555 total_dirty++;
7556 }
7557 }
7558 }
7559 if (total_dirty) {
7560 return EINVAL;
7561 }
7562
7563 return 0;
7564 }
7565
7566
7567
7568 /*
7569 * Dirty region tracking/clustering mechanism.
7570 *
7571 * This code (vfs_drt_*) provides a mechanism for tracking and clustering
7572 * dirty regions within a larger space (file). It is primarily intended to
7573 * support clustering in large files with many dirty areas.
7574 *
7575 * The implementation assumes that the dirty regions are pages.
7576 *
7577 * To represent dirty pages within the file, we store bit vectors in a
7578 * variable-size circular hash.
7579 */
7580
7581 /*
7582 * Bitvector size. This determines the number of pages we group in a
7583 * single hashtable entry. Each hashtable entry is aligned to this
7584 * size within the file.
7585 */
7586 #define DRT_BITVECTOR_PAGES ((1024 * 256) / PAGE_SIZE)
7587
7588 /*
7589 * File offset handling.
7590 *
7591 * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
7592 * the correct formula is (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1))
7593 */
7594 #define DRT_ADDRESS_MASK (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1))
7595 #define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK)
7596
7597 /*
7598 * Hashtable address field handling.
7599 *
7600 * The low-order bits of the hashtable address are used to conserve
7601 * space.
7602 *
7603 * DRT_HASH_COUNT_MASK must be large enough to store the range
7604 * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
7605 * to indicate that the bucket is actually unoccupied.
7606 */
7607 #define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
7608 #define DRT_HASH_SET_ADDRESS(scm, i, a) \
7609 do { \
7610 (scm)->scm_hashtable[(i)].dhe_control = \
7611 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
7612 } while (0)
7613 #define DRT_HASH_COUNT_MASK 0x1ff
7614 #define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
7615 #define DRT_HASH_SET_COUNT(scm, i, c) \
7616 do { \
7617 (scm)->scm_hashtable[(i)].dhe_control = \
7618 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \
7619 } while (0)
7620 #define DRT_HASH_CLEAR(scm, i) \
7621 do { \
7622 (scm)->scm_hashtable[(i)].dhe_control = 0; \
7623 } while (0)
7624 #define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
7625 #define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
7626 #define DRT_HASH_COPY(oscm, oi, scm, i) \
7627 do { \
7628 (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \
7629 DRT_BITVECTOR_COPY(oscm, oi, scm, i); \
7630 } while(0);
7631
7632
7633 #if !defined(XNU_TARGET_OS_OSX)
7634 /*
7635 * Hash table moduli.
7636 *
7637 * Since the hashtable entry's size is dependent on the size of
7638 * the bitvector, and since the hashtable size is constrained to
7639 * both being prime and fitting within the desired allocation
7640 * size, these values need to be manually determined.
7641 *
7642 * For DRT_BITVECTOR_SIZE = 64, the entry size is 16 bytes.
7643 *
7644 * The small hashtable allocation is 4096 bytes, so the modulus is 251.
7645 * The large hashtable allocation is 32768 bytes, so the modulus is 2039.
7646 * The xlarge hashtable allocation is 131072 bytes, so the modulus is 8179.
7647 */
7648
7649 #define DRT_HASH_SMALL_MODULUS 251
7650 #define DRT_HASH_LARGE_MODULUS 2039
7651 #define DRT_HASH_XLARGE_MODULUS 8179
7652
7653 /*
7654 * Physical memory required before the large hash modulus is permitted.
7655 *
7656 * On small memory systems, the large hash modulus can lead to phsyical
7657 * memory starvation, so we avoid using it there.
7658 */
7659 #define DRT_HASH_LARGE_MEMORY_REQUIRED (1024LL * 1024LL * 1024LL) /* 1GiB */
7660 #define DRT_HASH_XLARGE_MEMORY_REQUIRED (8 * 1024LL * 1024LL * 1024LL) /* 8GiB */
7661
7662 #define DRT_SMALL_ALLOCATION 4096 /* 80 bytes spare */
7663 #define DRT_LARGE_ALLOCATION 32768 /* 144 bytes spare */
7664 #define DRT_XLARGE_ALLOCATION 131072 /* 208 bytes spare */
7665
7666 #else /* XNU_TARGET_OS_OSX */
7667 /*
7668 * Hash table moduli.
7669 *
7670 * Since the hashtable entry's size is dependent on the size of
7671 * the bitvector, and since the hashtable size is constrained to
7672 * both being prime and fitting within the desired allocation
7673 * size, these values need to be manually determined.
7674 *
7675 * For DRT_BITVECTOR_SIZE = 64, the entry size is 16 bytes.
7676 *
7677 * The small hashtable allocation is 16384 bytes, so the modulus is 1019.
7678 * The large hashtable allocation is 131072 bytes, so the modulus is 8179.
7679 * The xlarge hashtable allocation is 524288 bytes, so the modulus is 32749.
7680 */
7681
7682 #define DRT_HASH_SMALL_MODULUS 1019
7683 #define DRT_HASH_LARGE_MODULUS 8179
7684 #define DRT_HASH_XLARGE_MODULUS 32749
7685
7686 /*
7687 * Physical memory required before the large hash modulus is permitted.
7688 *
7689 * On small memory systems, the large hash modulus can lead to phsyical
7690 * memory starvation, so we avoid using it there.
7691 */
7692 #define DRT_HASH_LARGE_MEMORY_REQUIRED (4 * 1024LL * 1024LL * 1024LL) /* 4GiB */
7693 #define DRT_HASH_XLARGE_MEMORY_REQUIRED (32 * 1024LL * 1024LL * 1024LL) /* 32GiB */
7694
7695 #define DRT_SMALL_ALLOCATION 16384 /* 80 bytes spare */
7696 #define DRT_LARGE_ALLOCATION 131072 /* 208 bytes spare */
7697 #define DRT_XLARGE_ALLOCATION 524288 /* 304 bytes spare */
7698
7699 #endif /* ! XNU_TARGET_OS_OSX */
7700
7701 /* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
7702
7703 /*
7704 * Hashtable entry.
7705 */
7706 struct vfs_drt_hashentry {
7707 u_int64_t dhe_control;
7708 /*
7709 * dhe_bitvector was declared as dhe_bitvector[DRT_BITVECTOR_PAGES / 32];
7710 * DRT_BITVECTOR_PAGES is defined as ((1024 * 256) / PAGE_SIZE)
7711 * Since PAGE_SIZE is only known at boot time,
7712 * -define MAX_DRT_BITVECTOR_PAGES for smallest supported page size (4k)
7713 * -declare dhe_bitvector array for largest possible length
7714 */
7715 #define MAX_DRT_BITVECTOR_PAGES (1024 * 256)/( 4 * 1024)
7716 u_int32_t dhe_bitvector[MAX_DRT_BITVECTOR_PAGES / 32];
7717 };
7718
7719 /*
7720 * Hashtable bitvector handling.
7721 *
7722 * Bitvector fields are 32 bits long.
7723 */
7724
7725 #define DRT_HASH_SET_BIT(scm, i, bit) \
7726 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
7727
7728 #define DRT_HASH_CLEAR_BIT(scm, i, bit) \
7729 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
7730
7731 #define DRT_HASH_TEST_BIT(scm, i, bit) \
7732 ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
7733
7734 #define DRT_BITVECTOR_CLEAR(scm, i) \
7735 bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
7736
7737 #define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \
7738 bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \
7739 &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \
7740 (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
7741
7742 /*
7743 * Dirty Region Tracking structure.
7744 *
7745 * The hashtable is allocated entirely inside the DRT structure.
7746 *
7747 * The hash is a simple circular prime modulus arrangement, the structure
7748 * is resized from small to large if it overflows.
7749 */
7750
7751 struct vfs_drt_clustermap {
7752 u_int32_t scm_magic; /* sanity/detection */
7753 #define DRT_SCM_MAGIC 0x12020003
7754 u_int32_t scm_modulus; /* current ring size */
7755 u_int32_t scm_buckets; /* number of occupied buckets */
7756 u_int32_t scm_lastclean; /* last entry we cleaned */
7757 u_int32_t scm_iskips; /* number of slot skips */
7758
7759 struct vfs_drt_hashentry scm_hashtable[0];
7760 };
7761
7762
7763 #define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus)
7764 #define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus)
7765
7766 /*
7767 * Debugging codes and arguments.
7768 */
7769 #define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
7770 #define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
7771 #define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
7772 #define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
7773 #define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
7774 * dirty */
7775 /* 0, setcount */
7776 /* 1 (clean, no map) */
7777 /* 2 (map alloc fail) */
7778 /* 3, resid (partial) */
7779 #define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87))
7780 #define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
7781 * lastclean, iskips */
7782
7783
7784 static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp);
7785 static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap);
7786 static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap,
7787 u_int64_t offset, int *indexp);
7788 static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp,
7789 u_int64_t offset,
7790 int *indexp,
7791 int recursed);
7792 static kern_return_t vfs_drt_do_mark_pages(
7793 void **cmapp,
7794 u_int64_t offset,
7795 u_int length,
7796 u_int *setcountp,
7797 int dirty);
7798 static void vfs_drt_trace(
7799 struct vfs_drt_clustermap *cmap,
7800 int code,
7801 int arg1,
7802 int arg2,
7803 int arg3,
7804 int arg4);
7805
7806
7807 /*
7808 * Allocate and initialise a sparse cluster map.
7809 *
7810 * Will allocate a new map, resize or compact an existing map.
7811 *
7812 * XXX we should probably have at least one intermediate map size,
7813 * as the 1:16 ratio seems a bit drastic.
7814 */
7815 static kern_return_t
vfs_drt_alloc_map(struct vfs_drt_clustermap ** cmapp)7816 vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp)
7817 {
7818 struct vfs_drt_clustermap *cmap = NULL, *ocmap = NULL;
7819 kern_return_t kret = KERN_SUCCESS;
7820 u_int64_t offset = 0;
7821 u_int32_t i = 0;
7822 int modulus_size = 0, map_size = 0, active_buckets = 0, index = 0, copycount = 0;
7823
7824 ocmap = NULL;
7825 if (cmapp != NULL) {
7826 ocmap = *cmapp;
7827 }
7828
7829 /*
7830 * Decide on the size of the new map.
7831 */
7832 if (ocmap == NULL) {
7833 modulus_size = DRT_HASH_SMALL_MODULUS;
7834 map_size = DRT_SMALL_ALLOCATION;
7835 } else {
7836 /* count the number of active buckets in the old map */
7837 active_buckets = 0;
7838 for (i = 0; i < ocmap->scm_modulus; i++) {
7839 if (!DRT_HASH_VACANT(ocmap, i) &&
7840 (DRT_HASH_GET_COUNT(ocmap, i) != 0)) {
7841 active_buckets++;
7842 }
7843 }
7844 /*
7845 * If we're currently using the small allocation, check to
7846 * see whether we should grow to the large one.
7847 */
7848 if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
7849 /*
7850 * If the ring is nearly full and we are allowed to
7851 * use the large modulus, upgrade.
7852 */
7853 if ((active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) &&
7854 (max_mem >= DRT_HASH_LARGE_MEMORY_REQUIRED)) {
7855 modulus_size = DRT_HASH_LARGE_MODULUS;
7856 map_size = DRT_LARGE_ALLOCATION;
7857 } else {
7858 modulus_size = DRT_HASH_SMALL_MODULUS;
7859 map_size = DRT_SMALL_ALLOCATION;
7860 }
7861 } else if (ocmap->scm_modulus == DRT_HASH_LARGE_MODULUS) {
7862 if ((active_buckets > (DRT_HASH_LARGE_MODULUS - 5)) &&
7863 (max_mem >= DRT_HASH_XLARGE_MEMORY_REQUIRED)) {
7864 modulus_size = DRT_HASH_XLARGE_MODULUS;
7865 map_size = DRT_XLARGE_ALLOCATION;
7866 } else {
7867 /*
7868 * If the ring is completely full and we can't
7869 * expand, there's nothing useful for us to do.
7870 * Behave as though we had compacted into the new
7871 * array and return.
7872 */
7873 return KERN_SUCCESS;
7874 }
7875 } else {
7876 /* already using the xlarge modulus */
7877 modulus_size = DRT_HASH_XLARGE_MODULUS;
7878 map_size = DRT_XLARGE_ALLOCATION;
7879
7880 /*
7881 * If the ring is completely full, there's
7882 * nothing useful for us to do. Behave as
7883 * though we had compacted into the new
7884 * array and return.
7885 */
7886 if (active_buckets >= DRT_HASH_XLARGE_MODULUS) {
7887 return KERN_SUCCESS;
7888 }
7889 }
7890 }
7891
7892 /*
7893 * Allocate and initialise the new map.
7894 */
7895
7896 kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap, map_size,
7897 KMA_DATA, VM_KERN_MEMORY_FILE);
7898 if (kret != KERN_SUCCESS) {
7899 return kret;
7900 }
7901 cmap->scm_magic = DRT_SCM_MAGIC;
7902 cmap->scm_modulus = modulus_size;
7903 cmap->scm_buckets = 0;
7904 cmap->scm_lastclean = 0;
7905 cmap->scm_iskips = 0;
7906 for (i = 0; i < cmap->scm_modulus; i++) {
7907 DRT_HASH_CLEAR(cmap, i);
7908 DRT_HASH_VACATE(cmap, i);
7909 DRT_BITVECTOR_CLEAR(cmap, i);
7910 }
7911
7912 /*
7913 * If there's an old map, re-hash entries from it into the new map.
7914 */
7915 copycount = 0;
7916 if (ocmap != NULL) {
7917 for (i = 0; i < ocmap->scm_modulus; i++) {
7918 /* skip empty buckets */
7919 if (DRT_HASH_VACANT(ocmap, i) ||
7920 (DRT_HASH_GET_COUNT(ocmap, i) == 0)) {
7921 continue;
7922 }
7923 /* get new index */
7924 offset = DRT_HASH_GET_ADDRESS(ocmap, i);
7925 kret = vfs_drt_get_index(&cmap, offset, &index, 1);
7926 if (kret != KERN_SUCCESS) {
7927 /* XXX need to bail out gracefully here */
7928 panic("vfs_drt: new cluster map mysteriously too small");
7929 index = 0;
7930 }
7931 /* copy */
7932 DRT_HASH_COPY(ocmap, i, cmap, index);
7933 copycount++;
7934 }
7935 }
7936
7937 /* log what we've done */
7938 vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0);
7939
7940 /*
7941 * It's important to ensure that *cmapp always points to
7942 * a valid map, so we must overwrite it before freeing
7943 * the old map.
7944 */
7945 *cmapp = cmap;
7946 if (ocmap != NULL) {
7947 /* emit stats into trace buffer */
7948 vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA,
7949 ocmap->scm_modulus,
7950 ocmap->scm_buckets,
7951 ocmap->scm_lastclean,
7952 ocmap->scm_iskips);
7953
7954 vfs_drt_free_map(ocmap);
7955 }
7956 return KERN_SUCCESS;
7957 }
7958
7959
7960 /*
7961 * Free a sparse cluster map.
7962 */
7963 static kern_return_t
vfs_drt_free_map(struct vfs_drt_clustermap * cmap)7964 vfs_drt_free_map(struct vfs_drt_clustermap *cmap)
7965 {
7966 vm_size_t map_size = 0;
7967
7968 if (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
7969 map_size = DRT_SMALL_ALLOCATION;
7970 } else if (cmap->scm_modulus == DRT_HASH_LARGE_MODULUS) {
7971 map_size = DRT_LARGE_ALLOCATION;
7972 } else if (cmap->scm_modulus == DRT_HASH_XLARGE_MODULUS) {
7973 map_size = DRT_XLARGE_ALLOCATION;
7974 } else {
7975 panic("vfs_drt_free_map: Invalid modulus %d", cmap->scm_modulus);
7976 }
7977
7978 kmem_free(kernel_map, (vm_offset_t)cmap, map_size);
7979 return KERN_SUCCESS;
7980 }
7981
7982
7983 /*
7984 * Find the hashtable slot currently occupied by an entry for the supplied offset.
7985 */
7986 static kern_return_t
vfs_drt_search_index(struct vfs_drt_clustermap * cmap,u_int64_t offset,int * indexp)7987 vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp)
7988 {
7989 int index;
7990 u_int32_t i;
7991
7992 offset = DRT_ALIGN_ADDRESS(offset);
7993 index = DRT_HASH(cmap, offset);
7994
7995 /* traverse the hashtable */
7996 for (i = 0; i < cmap->scm_modulus; i++) {
7997 /*
7998 * If the slot is vacant, we can stop.
7999 */
8000 if (DRT_HASH_VACANT(cmap, index)) {
8001 break;
8002 }
8003
8004 /*
8005 * If the address matches our offset, we have success.
8006 */
8007 if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) {
8008 *indexp = index;
8009 return KERN_SUCCESS;
8010 }
8011
8012 /*
8013 * Move to the next slot, try again.
8014 */
8015 index = DRT_HASH_NEXT(cmap, index);
8016 }
8017 /*
8018 * It's not there.
8019 */
8020 return KERN_FAILURE;
8021 }
8022
8023 /*
8024 * Find the hashtable slot for the supplied offset. If we haven't allocated
8025 * one yet, allocate one and populate the address field. Note that it will
8026 * not have a nonzero page count and thus will still technically be free, so
8027 * in the case where we are called to clean pages, the slot will remain free.
8028 */
8029 static kern_return_t
vfs_drt_get_index(struct vfs_drt_clustermap ** cmapp,u_int64_t offset,int * indexp,int recursed)8030 vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed)
8031 {
8032 struct vfs_drt_clustermap *cmap;
8033 kern_return_t kret;
8034 u_int32_t index;
8035 u_int32_t i;
8036
8037 cmap = *cmapp;
8038
8039 /* look for an existing entry */
8040 kret = vfs_drt_search_index(cmap, offset, indexp);
8041 if (kret == KERN_SUCCESS) {
8042 return kret;
8043 }
8044
8045 /* need to allocate an entry */
8046 offset = DRT_ALIGN_ADDRESS(offset);
8047 index = DRT_HASH(cmap, offset);
8048
8049 /* scan from the index forwards looking for a vacant slot */
8050 for (i = 0; i < cmap->scm_modulus; i++) {
8051 /* slot vacant? */
8052 if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap, index) == 0) {
8053 cmap->scm_buckets++;
8054 if (index < cmap->scm_lastclean) {
8055 cmap->scm_lastclean = index;
8056 }
8057 DRT_HASH_SET_ADDRESS(cmap, index, offset);
8058 DRT_HASH_SET_COUNT(cmap, index, 0);
8059 DRT_BITVECTOR_CLEAR(cmap, index);
8060 *indexp = index;
8061 vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0);
8062 return KERN_SUCCESS;
8063 }
8064 cmap->scm_iskips += i;
8065 index = DRT_HASH_NEXT(cmap, index);
8066 }
8067
8068 /*
8069 * We haven't found a vacant slot, so the map is full. If we're not
8070 * already recursed, try reallocating/compacting it.
8071 */
8072 if (recursed) {
8073 return KERN_FAILURE;
8074 }
8075 kret = vfs_drt_alloc_map(cmapp);
8076 if (kret == KERN_SUCCESS) {
8077 /* now try to insert again */
8078 kret = vfs_drt_get_index(cmapp, offset, indexp, 1);
8079 }
8080 return kret;
8081 }
8082
8083 /*
8084 * Implementation of set dirty/clean.
8085 *
8086 * In the 'clean' case, not finding a map is OK.
8087 */
8088 static kern_return_t
vfs_drt_do_mark_pages(void ** private,u_int64_t offset,u_int length,u_int * setcountp,int dirty)8089 vfs_drt_do_mark_pages(
8090 void **private,
8091 u_int64_t offset,
8092 u_int length,
8093 u_int *setcountp,
8094 int dirty)
8095 {
8096 struct vfs_drt_clustermap *cmap, **cmapp;
8097 kern_return_t kret;
8098 int i, index, pgoff, pgcount, setcount, ecount;
8099
8100 cmapp = (struct vfs_drt_clustermap **)private;
8101 cmap = *cmapp;
8102
8103 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0);
8104
8105 if (setcountp != NULL) {
8106 *setcountp = 0;
8107 }
8108
8109 /* allocate a cluster map if we don't already have one */
8110 if (cmap == NULL) {
8111 /* no cluster map, nothing to clean */
8112 if (!dirty) {
8113 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0);
8114 return KERN_SUCCESS;
8115 }
8116 kret = vfs_drt_alloc_map(cmapp);
8117 if (kret != KERN_SUCCESS) {
8118 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0);
8119 return kret;
8120 }
8121 }
8122 setcount = 0;
8123
8124 /*
8125 * Iterate over the length of the region.
8126 */
8127 while (length > 0) {
8128 /*
8129 * Get the hashtable index for this offset.
8130 *
8131 * XXX this will add blank entries if we are clearing a range
8132 * that hasn't been dirtied.
8133 */
8134 kret = vfs_drt_get_index(cmapp, offset, &index, 0);
8135 cmap = *cmapp; /* may have changed! */
8136 /* this may be a partial-success return */
8137 if (kret != KERN_SUCCESS) {
8138 if (setcountp != NULL) {
8139 *setcountp = setcount;
8140 }
8141 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0);
8142
8143 return kret;
8144 }
8145
8146 /*
8147 * Work out how many pages we're modifying in this
8148 * hashtable entry.
8149 */
8150 pgoff = (int)((offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE);
8151 pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff));
8152
8153 /*
8154 * Iterate over pages, dirty/clearing as we go.
8155 */
8156 ecount = DRT_HASH_GET_COUNT(cmap, index);
8157 for (i = 0; i < pgcount; i++) {
8158 if (dirty) {
8159 if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
8160 if (ecount >= DRT_BITVECTOR_PAGES) {
8161 panic("ecount >= DRT_BITVECTOR_PAGES, cmap = %p, index = %d, bit = %d", cmap, index, pgoff + i);
8162 }
8163 DRT_HASH_SET_BIT(cmap, index, pgoff + i);
8164 ecount++;
8165 setcount++;
8166 }
8167 } else {
8168 if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
8169 if (ecount <= 0) {
8170 panic("ecount <= 0, cmap = %p, index = %d, bit = %d", cmap, index, pgoff + i);
8171 }
8172 assert(ecount > 0);
8173 DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i);
8174 ecount--;
8175 setcount++;
8176 }
8177 }
8178 }
8179 DRT_HASH_SET_COUNT(cmap, index, ecount);
8180
8181 offset += pgcount * PAGE_SIZE;
8182 length -= pgcount * PAGE_SIZE;
8183 }
8184 if (setcountp != NULL) {
8185 *setcountp = setcount;
8186 }
8187
8188 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0);
8189
8190 return KERN_SUCCESS;
8191 }
8192
8193 /*
8194 * Mark a set of pages as dirty/clean.
8195 *
8196 * This is a public interface.
8197 *
8198 * cmapp
8199 * Pointer to storage suitable for holding a pointer. Note that
8200 * this must either be NULL or a value set by this function.
8201 *
8202 * size
8203 * Current file size in bytes.
8204 *
8205 * offset
8206 * Offset of the first page to be marked as dirty, in bytes. Must be
8207 * page-aligned.
8208 *
8209 * length
8210 * Length of dirty region, in bytes. Must be a multiple of PAGE_SIZE.
8211 *
8212 * setcountp
8213 * Number of pages newly marked dirty by this call (optional).
8214 *
8215 * Returns KERN_SUCCESS if all the pages were successfully marked.
8216 */
8217 static kern_return_t
vfs_drt_mark_pages(void ** cmapp,off_t offset,u_int length,u_int * setcountp)8218 vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp)
8219 {
8220 /* XXX size unused, drop from interface */
8221 return vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1);
8222 }
8223
8224 #if 0
8225 static kern_return_t
8226 vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length)
8227 {
8228 return vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
8229 }
8230 #endif
8231
8232 /*
8233 * Get a cluster of dirty pages.
8234 *
8235 * This is a public interface.
8236 *
8237 * cmapp
8238 * Pointer to storage managed by drt_mark_pages. Note that this must
8239 * be NULL or a value set by drt_mark_pages.
8240 *
8241 * offsetp
8242 * Returns the byte offset into the file of the first page in the cluster.
8243 *
8244 * lengthp
8245 * Returns the length in bytes of the cluster of dirty pages.
8246 *
8247 * Returns success if a cluster was found. If KERN_FAILURE is returned, there
8248 * are no dirty pages meeting the minmum size criteria. Private storage will
8249 * be released if there are no more dirty pages left in the map
8250 *
8251 */
8252 static kern_return_t
vfs_drt_get_cluster(void ** cmapp,off_t * offsetp,u_int * lengthp)8253 vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp)
8254 {
8255 struct vfs_drt_clustermap *cmap;
8256 u_int64_t offset;
8257 u_int length;
8258 u_int32_t j;
8259 int index, i, fs, ls;
8260
8261 /* sanity */
8262 if ((cmapp == NULL) || (*cmapp == NULL)) {
8263 return KERN_FAILURE;
8264 }
8265 cmap = *cmapp;
8266
8267 /* walk the hashtable */
8268 for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) {
8269 index = DRT_HASH(cmap, offset);
8270
8271 if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0)) {
8272 continue;
8273 }
8274
8275 /* scan the bitfield for a string of bits */
8276 fs = -1;
8277
8278 for (i = 0; i < DRT_BITVECTOR_PAGES; i++) {
8279 if (DRT_HASH_TEST_BIT(cmap, index, i)) {
8280 fs = i;
8281 break;
8282 }
8283 }
8284 if (fs == -1) {
8285 /* didn't find any bits set */
8286 panic("vfs_drt: entry summary count > 0 but no bits set in map, cmap = %p, index = %d, count = %lld",
8287 cmap, index, DRT_HASH_GET_COUNT(cmap, index));
8288 }
8289 for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) {
8290 if (!DRT_HASH_TEST_BIT(cmap, index, i)) {
8291 break;
8292 }
8293 }
8294
8295 /* compute offset and length, mark pages clean */
8296 offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs);
8297 length = ls * PAGE_SIZE;
8298 vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
8299 cmap->scm_lastclean = index;
8300
8301 /* return successful */
8302 *offsetp = (off_t)offset;
8303 *lengthp = length;
8304
8305 vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0);
8306 return KERN_SUCCESS;
8307 }
8308 /*
8309 * We didn't find anything... hashtable is empty
8310 * emit stats into trace buffer and
8311 * then free it
8312 */
8313 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
8314 cmap->scm_modulus,
8315 cmap->scm_buckets,
8316 cmap->scm_lastclean,
8317 cmap->scm_iskips);
8318
8319 vfs_drt_free_map(cmap);
8320 *cmapp = NULL;
8321
8322 return KERN_FAILURE;
8323 }
8324
8325
8326 static kern_return_t
vfs_drt_control(void ** cmapp,int op_type)8327 vfs_drt_control(void **cmapp, int op_type)
8328 {
8329 struct vfs_drt_clustermap *cmap;
8330
8331 /* sanity */
8332 if ((cmapp == NULL) || (*cmapp == NULL)) {
8333 return KERN_FAILURE;
8334 }
8335 cmap = *cmapp;
8336
8337 switch (op_type) {
8338 case 0:
8339 /* emit stats into trace buffer */
8340 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
8341 cmap->scm_modulus,
8342 cmap->scm_buckets,
8343 cmap->scm_lastclean,
8344 cmap->scm_iskips);
8345
8346 vfs_drt_free_map(cmap);
8347 *cmapp = NULL;
8348 break;
8349
8350 case 1:
8351 cmap->scm_lastclean = 0;
8352 break;
8353 }
8354 return KERN_SUCCESS;
8355 }
8356
8357
8358
8359 /*
8360 * Emit a summary of the state of the clustermap into the trace buffer
8361 * along with some caller-provided data.
8362 */
8363 #if KDEBUG
8364 static void
vfs_drt_trace(__unused struct vfs_drt_clustermap * cmap,int code,int arg1,int arg2,int arg3,int arg4)8365 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, int code, int arg1, int arg2, int arg3, int arg4)
8366 {
8367 KERNEL_DEBUG(code, arg1, arg2, arg3, arg4, 0);
8368 }
8369 #else
8370 static void
vfs_drt_trace(__unused struct vfs_drt_clustermap * cmap,__unused int code,__unused int arg1,__unused int arg2,__unused int arg3,__unused int arg4)8371 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code,
8372 __unused int arg1, __unused int arg2, __unused int arg3,
8373 __unused int arg4)
8374 {
8375 }
8376 #endif
8377
8378 #if 0
8379 /*
8380 * Perform basic sanity check on the hash entry summary count
8381 * vs. the actual bits set in the entry.
8382 */
8383 static void
8384 vfs_drt_sanity(struct vfs_drt_clustermap *cmap)
8385 {
8386 int index, i;
8387 int bits_on;
8388
8389 for (index = 0; index < cmap->scm_modulus; index++) {
8390 if (DRT_HASH_VACANT(cmap, index)) {
8391 continue;
8392 }
8393
8394 for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) {
8395 if (DRT_HASH_TEST_BIT(cmap, index, i)) {
8396 bits_on++;
8397 }
8398 }
8399 if (bits_on != DRT_HASH_GET_COUNT(cmap, index)) {
8400 panic("bits_on = %d, index = %d", bits_on, index);
8401 }
8402 }
8403 }
8404 #endif
8405
8406 /*
8407 * Internal interface only.
8408 */
8409 static kern_return_t
vfs_get_scmap_push_behavior_internal(void ** cmapp,int * push_flag)8410 vfs_get_scmap_push_behavior_internal(void **cmapp, int *push_flag)
8411 {
8412 struct vfs_drt_clustermap *cmap;
8413
8414 /* sanity */
8415 if ((cmapp == NULL) || (*cmapp == NULL) || (push_flag == NULL)) {
8416 return KERN_FAILURE;
8417 }
8418 cmap = *cmapp;
8419
8420 if (cmap->scm_modulus == DRT_HASH_XLARGE_MODULUS) {
8421 /*
8422 * If we have a full xlarge sparse cluster,
8423 * we push it out all at once so the cluster
8424 * map can be available to absorb more I/Os.
8425 * This is done on large memory configs so
8426 * the small I/Os don't interfere with the
8427 * pro workloads.
8428 */
8429 *push_flag = PUSH_ALL;
8430 }
8431 return KERN_SUCCESS;
8432 }
8433