xref: /xnu-10002.81.5/bsd/miscfs/specfs/spec_vnops.c (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 /*
2  * Copyright (c) 2000-2019 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1989, 1993, 1995
31  *	The Regents of the University of California.  All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  * 3. All advertising materials mentioning features or use of this software
42  *    must display the following acknowledgement:
43  *	This product includes software developed by the University of
44  *	California, Berkeley and its contributors.
45  * 4. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)spec_vnops.c	8.14 (Berkeley) 5/21/95
62  */
63 
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/kauth.h>
67 #include <sys/systm.h>
68 #include <sys/kernel.h>
69 #include <sys/conf.h>
70 #include <sys/buf_internal.h>
71 #include <sys/mount_internal.h>
72 #include <sys/vnode_internal.h>
73 #include <sys/file_internal.h>
74 #include <sys/namei.h>
75 #include <sys/stat.h>
76 #include <sys/errno.h>
77 #include <sys/ioctl.h>
78 #include <sys/file.h>
79 #include <sys/user.h>
80 #include <sys/malloc.h>
81 #include <sys/disk.h>
82 #include <sys/uio_internal.h>
83 #include <sys/resource.h>
84 #include <machine/machine_routines.h>
85 #include <miscfs/specfs/specdev.h>
86 #include <vfs/vfs_support.h>
87 #include <vfs/vfs_disk_conditioner.h>
88 
89 #include <kern/assert.h>
90 #include <kern/task.h>
91 #include <kern/sched_prim.h>
92 #include <kern/thread.h>
93 #include <kern/policy_internal.h>
94 #include <kern/timer_call.h>
95 #include <kern/waitq.h>
96 
97 #include <pexpert/pexpert.h>
98 
99 #include <sys/kdebug.h>
100 #include <libkern/section_keywords.h>
101 
102 #if CONFIG_IO_COMPRESSION_STATS
103 #include <vfs/vfs_io_compression_stats.h>
104 #endif /* CONFIG_IO_COMPRESSION_STATS */
105 
106 /* XXX following three prototypes should be in a header file somewhere */
107 extern dev_t    chrtoblk(dev_t dev);
108 extern boolean_t        iskmemdev(dev_t dev);
109 extern int bpfkqfilter(dev_t dev, struct knote *kn);
110 extern int ptsd_kqfilter(dev_t, struct knote *);
111 extern int ptmx_kqfilter(dev_t, struct knote *);
112 #if CONFIG_PHYS_WRITE_ACCT
113 uint64_t kernel_pm_writes;    // to track the sync writes occurring during power management transitions
114 #endif /* CONFIG_PHYS_WRITE_ACCT */
115 
116 
117 struct vnode *speclisth[SPECHSZ];
118 
119 /* symbolic sleep message strings for devices */
120 char    devopn[] = "devopn";
121 char    devio[] = "devio";
122 char    devwait[] = "devwait";
123 char    devin[] = "devin";
124 char    devout[] = "devout";
125 char    devioc[] = "devioc";
126 char    devcls[] = "devcls";
127 
128 #define VOPFUNC int (*)(void *)
129 
130 int(**spec_vnodeop_p)(void *);
131 const struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
132 	{ .opve_op = &vnop_default_desc, .opve_impl = (VOPFUNC)(void (*)(void))vn_default_error },
133 	{ .opve_op = &vnop_lookup_desc, .opve_impl = (VOPFUNC)spec_lookup },            /* lookup */
134 	{ .opve_op = &vnop_create_desc, .opve_impl = (VOPFUNC)err_create },             /* create */
135 	{ .opve_op = &vnop_mknod_desc, .opve_impl = (VOPFUNC)err_mknod },               /* mknod */
136 	{ .opve_op = &vnop_open_desc, .opve_impl = (VOPFUNC)spec_open },                        /* open */
137 	{ .opve_op = &vnop_close_desc, .opve_impl = (VOPFUNC)spec_close },              /* close */
138 	{ .opve_op = &vnop_access_desc, .opve_impl = (VOPFUNC)spec_access },            /* access */
139 	{ .opve_op = &vnop_getattr_desc, .opve_impl = (VOPFUNC)spec_getattr },          /* getattr */
140 	{ .opve_op = &vnop_setattr_desc, .opve_impl = (VOPFUNC)spec_setattr },          /* setattr */
141 	{ .opve_op = &vnop_read_desc, .opve_impl = (VOPFUNC)spec_read },                        /* read */
142 	{ .opve_op = &vnop_write_desc, .opve_impl = (VOPFUNC)spec_write },              /* write */
143 	{ .opve_op = &vnop_ioctl_desc, .opve_impl = (VOPFUNC)spec_ioctl },              /* ioctl */
144 	{ .opve_op = &vnop_select_desc, .opve_impl = (VOPFUNC)spec_select },            /* select */
145 	{ .opve_op = &vnop_revoke_desc, .opve_impl = (VOPFUNC)nop_revoke },             /* revoke */
146 	{ .opve_op = &vnop_mmap_desc, .opve_impl = (VOPFUNC)err_mmap },                 /* mmap */
147 	{ .opve_op = &vnop_fsync_desc, .opve_impl = (VOPFUNC)spec_fsync },              /* fsync */
148 	{ .opve_op = &vnop_remove_desc, .opve_impl = (VOPFUNC)err_remove },             /* remove */
149 	{ .opve_op = &vnop_link_desc, .opve_impl = (VOPFUNC)err_link },                 /* link */
150 	{ .opve_op = &vnop_rename_desc, .opve_impl = (VOPFUNC)err_rename },             /* rename */
151 	{ .opve_op = &vnop_mkdir_desc, .opve_impl = (VOPFUNC)err_mkdir },               /* mkdir */
152 	{ .opve_op = &vnop_rmdir_desc, .opve_impl = (VOPFUNC)err_rmdir },               /* rmdir */
153 	{ .opve_op = &vnop_symlink_desc, .opve_impl = (VOPFUNC)err_symlink },           /* symlink */
154 	{ .opve_op = &vnop_readdir_desc, .opve_impl = (VOPFUNC)err_readdir },           /* readdir */
155 	{ .opve_op = &vnop_readlink_desc, .opve_impl = (VOPFUNC)err_readlink },         /* readlink */
156 	{ .opve_op = &vnop_inactive_desc, .opve_impl = (VOPFUNC)nop_inactive },         /* inactive */
157 	{ .opve_op = &vnop_reclaim_desc, .opve_impl = (VOPFUNC)nop_reclaim },           /* reclaim */
158 	{ .opve_op = &vnop_strategy_desc, .opve_impl = (VOPFUNC)spec_strategy },                /* strategy */
159 	{ .opve_op = &vnop_pathconf_desc, .opve_impl = (VOPFUNC)spec_pathconf },                /* pathconf */
160 	{ .opve_op = &vnop_advlock_desc, .opve_impl = (VOPFUNC)err_advlock },           /* advlock */
161 	{ .opve_op = &vnop_bwrite_desc, .opve_impl = (VOPFUNC)spec_bwrite },            /* bwrite */
162 	{ .opve_op = &vnop_pagein_desc, .opve_impl = (VOPFUNC)err_pagein },             /* Pagein */
163 	{ .opve_op = &vnop_pageout_desc, .opve_impl = (VOPFUNC)err_pageout },           /* Pageout */
164 	{ .opve_op = &vnop_copyfile_desc, .opve_impl = (VOPFUNC)err_copyfile },         /* Copyfile */
165 	{ .opve_op = &vnop_blktooff_desc, .opve_impl = (VOPFUNC)spec_blktooff },                /* blktooff */
166 	{ .opve_op = &vnop_offtoblk_desc, .opve_impl = (VOPFUNC)spec_offtoblk },                /* offtoblk */
167 	{ .opve_op = &vnop_blockmap_desc, .opve_impl = (VOPFUNC)spec_blockmap },                /* blockmap */
168 	{ .opve_op = (struct vnodeop_desc*)NULL, .opve_impl = (int (*)(void *))NULL }
169 };
170 const struct vnodeopv_desc spec_vnodeop_opv_desc =
171 { .opv_desc_vector_p = &spec_vnodeop_p, .opv_desc_ops = spec_vnodeop_entries };
172 
173 
174 static void set_blocksize(vnode_t, dev_t);
175 
176 #define LOWPRI_TIER1_WINDOW_MSECS         25
177 #define LOWPRI_TIER2_WINDOW_MSECS         100
178 #define LOWPRI_TIER3_WINDOW_MSECS         500
179 
180 #define LOWPRI_TIER1_IO_PERIOD_MSECS      40
181 #define LOWPRI_TIER2_IO_PERIOD_MSECS      85
182 #define LOWPRI_TIER3_IO_PERIOD_MSECS      200
183 
184 #define LOWPRI_TIER1_IO_PERIOD_SSD_MSECS  5
185 #define LOWPRI_TIER2_IO_PERIOD_SSD_MSECS  15
186 #define LOWPRI_TIER3_IO_PERIOD_SSD_MSECS  25
187 
188 
189 int     throttle_windows_msecs[THROTTLE_LEVEL_END + 1] = {
190 	0,
191 	LOWPRI_TIER1_WINDOW_MSECS,
192 	LOWPRI_TIER2_WINDOW_MSECS,
193 	LOWPRI_TIER3_WINDOW_MSECS,
194 };
195 
196 int     throttle_io_period_msecs[THROTTLE_LEVEL_END + 1] = {
197 	0,
198 	LOWPRI_TIER1_IO_PERIOD_MSECS,
199 	LOWPRI_TIER2_IO_PERIOD_MSECS,
200 	LOWPRI_TIER3_IO_PERIOD_MSECS,
201 };
202 
203 int     throttle_io_period_ssd_msecs[THROTTLE_LEVEL_END + 1] = {
204 	0,
205 	LOWPRI_TIER1_IO_PERIOD_SSD_MSECS,
206 	LOWPRI_TIER2_IO_PERIOD_SSD_MSECS,
207 	LOWPRI_TIER3_IO_PERIOD_SSD_MSECS,
208 };
209 
210 
211 int     throttled_count[THROTTLE_LEVEL_END + 1];
212 
213 struct _throttle_io_info_t {
214 	lck_mtx_t       throttle_lock;
215 
216 	struct timeval  throttle_last_write_timestamp;
217 	struct timeval  throttle_min_timer_deadline;
218 	struct timeval  throttle_window_start_timestamp[THROTTLE_LEVEL_END + 1]; /* window starts at both the beginning and completion of an I/O */
219 	struct timeval  throttle_last_IO_timestamp[THROTTLE_LEVEL_END + 1];
220 	pid_t           throttle_last_IO_pid[THROTTLE_LEVEL_END + 1];
221 	struct timeval  throttle_start_IO_period_timestamp[THROTTLE_LEVEL_END + 1];
222 	int32_t throttle_inflight_count[THROTTLE_LEVEL_END + 1];
223 
224 	TAILQ_HEAD(, uthread) throttle_uthlist[THROTTLE_LEVEL_END + 1];         /* Lists of throttled uthreads */
225 	int             throttle_next_wake_level;
226 
227 	thread_call_t   throttle_timer_call;
228 	int32_t throttle_timer_ref;
229 	int32_t throttle_timer_active;
230 
231 	int32_t throttle_io_count;
232 	int32_t throttle_io_count_begin;
233 	int    *throttle_io_periods;
234 	uint32_t throttle_io_period_num;
235 
236 	int32_t throttle_refcnt;
237 	int32_t throttle_alloc;
238 	int32_t throttle_disabled;
239 	int32_t throttle_is_fusion_with_priority;
240 };
241 
242 struct _throttle_io_info_t _throttle_io_info[LOWPRI_MAX_NUM_DEV];
243 
244 
245 int     lowpri_throttle_enabled = 1;
246 
247 
248 static void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level);
249 static int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int flags, boolean_t isssd, boolean_t inflight, struct bufattr *bap);
250 static int throttle_get_thread_throttle_level(uthread_t ut);
251 static int throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier);
252 void throttle_info_mount_reset_period(mount_t mp, int isssd);
253 
254 /*
255  * Trivial lookup routine that always fails.
256  */
257 int
spec_lookup(struct vnop_lookup_args * ap)258 spec_lookup(struct vnop_lookup_args *ap)
259 {
260 	*ap->a_vpp = NULL;
261 	return ENOTDIR;
262 }
263 
264 static void
set_blocksize(struct vnode * vp,dev_t dev)265 set_blocksize(struct vnode *vp, dev_t dev)
266 {
267 	int (*size)(dev_t);
268 	int rsize;
269 
270 	if ((major(dev) < nblkdev) && (size = bdevsw[major(dev)].d_psize)) {
271 		rsize = (*size)(dev);
272 		if (rsize <= 0) { /* did size fail? */
273 			vp->v_specsize = DEV_BSIZE;
274 		} else {
275 			vp->v_specsize = rsize;
276 		}
277 	} else {
278 		vp->v_specsize = DEV_BSIZE;
279 	}
280 }
281 
282 void
set_fsblocksize(struct vnode * vp)283 set_fsblocksize(struct vnode *vp)
284 {
285 	if (vp->v_type == VBLK) {
286 		dev_t dev = (dev_t)vp->v_rdev;
287 		int maj = major(dev);
288 
289 		if ((u_int)maj >= (u_int)nblkdev) {
290 			return;
291 		}
292 
293 		vnode_lock(vp);
294 		set_blocksize(vp, dev);
295 		vnode_unlock(vp);
296 	}
297 }
298 
299 static void
spec_init_bsdunit(vnode_t vp,vfs_context_t ctx,const char * caller)300 spec_init_bsdunit(vnode_t vp, vfs_context_t ctx, const char* caller)
301 {
302 	int     isssd = 0;
303 	uint64_t throttle_mask = 0;
304 	uint32_t devbsdunit = 0;
305 
306 	if (VNOP_IOCTL(vp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx)) {
307 		isssd = 0;
308 	}
309 	if (VNOP_IOCTL(vp, DKIOCGETTHROTTLEMASK, (caddr_t)&throttle_mask, 0, NULL)) {
310 		throttle_mask = 0;
311 	}
312 
313 	if (throttle_mask != 0) {
314 		/*
315 		 * as a reasonable approximation, only use the lowest bit of the mask
316 		 * to generate a disk unit number
317 		 */
318 		devbsdunit = num_trailing_0(throttle_mask);
319 	} else {
320 		devbsdunit = 0;
321 	}
322 
323 	if (vp->v_un.vu_specinfo->si_initted == 0) {
324 		vnode_lock(vp);
325 		if (vp->v_un.vu_specinfo->si_initted == 0) {
326 			vp->v_un.vu_specinfo->si_isssd = isssd ? 1 : 0;
327 			vp->v_un.vu_specinfo->si_devbsdunit = devbsdunit;
328 			vp->v_un.vu_specinfo->si_throttle_mask = throttle_mask;
329 			vp->v_un.vu_specinfo->si_throttleable = 1;
330 			vp->v_un.vu_specinfo->si_initted = 1;
331 		}
332 		vnode_unlock(vp);
333 		printf("%s : si_devbsdunit initialized to (%d), throttle_mask is (0x%llx), isssd is (%d)\n",
334 		    caller, vp->v_un.vu_specinfo->si_devbsdunit,
335 		    vp->v_un.vu_specinfo->si_throttle_mask,
336 		    vp->v_un.vu_specinfo->si_isssd);
337 	}
338 }
339 
340 #define SPEC_INIT_BSDUNIT(vp, ctx) spec_init_bsdunit((vp), (ctx), __FUNCTION__)
341 
342 /*
343  * Open a special file.
344  */
345 int
spec_open(struct vnop_open_args * ap)346 spec_open(struct vnop_open_args *ap)
347 {
348 	struct proc *p = vfs_context_proc(ap->a_context);
349 	kauth_cred_t cred = vfs_context_ucred(ap->a_context);
350 	struct vnode *vp = ap->a_vp;
351 	dev_t bdev, dev = (dev_t)vp->v_rdev;
352 	int maj = major(dev);
353 	int error;
354 
355 	/*
356 	 * Don't allow open if fs is mounted -nodev.
357 	 */
358 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) {
359 		return ENXIO;
360 	}
361 
362 	switch (vp->v_type) {
363 	case VCHR:
364 		if ((u_int)maj >= (u_int)nchrdev) {
365 			return ENXIO;
366 		}
367 		if (cred != FSCRED && (ap->a_mode & FWRITE)) {
368 #if 0
369 			/*
370 			 * When running in very secure mode, do not allow
371 			 * opens for writing of any disk character devices.
372 			 */
373 			if (securelevel >= 2 && isdisk(dev, VCHR)) {
374 				return EPERM;
375 			}
376 #endif
377 
378 			/* Never allow writing to /dev/mem or /dev/kmem */
379 			if (iskmemdev(dev)) {
380 				return EPERM;
381 			}
382 			/*
383 			 * When running in secure mode, do not allow opens for
384 			 * writing of character devices whose corresponding block
385 			 * devices are currently mounted.
386 			 */
387 			if (securelevel >= 1) {
388 				if ((bdev = chrtoblk(dev)) != NODEV && check_mountedon(bdev, VBLK, &error)) {
389 					return error;
390 				}
391 			}
392 		}
393 
394 		devsw_lock(dev, S_IFCHR);
395 		error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p);
396 
397 		if (error == 0) {
398 			vp->v_specinfo->si_opencount++;
399 		}
400 
401 		devsw_unlock(dev, S_IFCHR);
402 
403 		if (error == 0 && cdevsw[maj].d_type == D_DISK && !vp->v_un.vu_specinfo->si_initted) {
404 			int     isssd = 0;
405 			uint64_t throttle_mask = 0;
406 			uint32_t devbsdunit = 0;
407 
408 			if (VNOP_IOCTL(vp, DKIOCGETTHROTTLEMASK, (caddr_t)&throttle_mask, 0, NULL) == 0) {
409 				if (throttle_mask != 0 &&
410 				    VNOP_IOCTL(vp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ap->a_context) == 0) {
411 					/*
412 					 * as a reasonable approximation, only use the lowest bit of the mask
413 					 * to generate a disk unit number
414 					 */
415 					devbsdunit = num_trailing_0(throttle_mask);
416 
417 					vnode_lock(vp);
418 
419 					vp->v_un.vu_specinfo->si_isssd = isssd ? 1 : 0;
420 					vp->v_un.vu_specinfo->si_devbsdunit = devbsdunit;
421 					vp->v_un.vu_specinfo->si_throttle_mask = throttle_mask;
422 					vp->v_un.vu_specinfo->si_throttleable = 1;
423 					vp->v_un.vu_specinfo->si_initted = 1;
424 
425 					vnode_unlock(vp);
426 				}
427 			}
428 			if (vp->v_un.vu_specinfo->si_initted == 0) {
429 				vnode_lock(vp);
430 				vp->v_un.vu_specinfo->si_initted = 1;
431 				vnode_unlock(vp);
432 			}
433 		}
434 		return error;
435 
436 	case VBLK:
437 		if ((u_int)maj >= (u_int)nblkdev) {
438 			return ENXIO;
439 		}
440 		/*
441 		 * When running in very secure mode, do not allow
442 		 * opens for writing of any disk block devices.
443 		 */
444 		if (securelevel >= 2 && cred != FSCRED &&
445 		    (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) {
446 			return EPERM;
447 		}
448 		/*
449 		 * Do not allow opens of block devices that are
450 		 * currently mounted.
451 		 */
452 		if ((error = vfs_mountedon(vp))) {
453 			return error;
454 		}
455 
456 		devsw_lock(dev, S_IFBLK);
457 		error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p);
458 		if (!error) {
459 			vp->v_specinfo->si_opencount++;
460 		}
461 		devsw_unlock(dev, S_IFBLK);
462 
463 		if (!error) {
464 			u_int64_t blkcnt;
465 			u_int32_t blksize;
466 			int setsize = 0;
467 			u_int32_t size512 = 512;
468 
469 			if (bdevsw[maj].d_type == D_DISK && !vp->v_un.vu_specinfo->si_initted) {
470 				SPEC_INIT_BSDUNIT(vp, ap->a_context);
471 			}
472 
473 			if (!VNOP_IOCTL(vp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, ap->a_context)) {
474 				/* Switch to 512 byte sectors (temporarily) */
475 
476 				if (!VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, ap->a_context)) {
477 					/* Get the number of 512 byte physical blocks. */
478 					if (!VNOP_IOCTL(vp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, ap->a_context)) {
479 						setsize = 1;
480 					}
481 				}
482 				/* If it doesn't set back, we can't recover */
483 				if (VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, ap->a_context)) {
484 					error = ENXIO;
485 				}
486 			}
487 
488 
489 			vnode_lock(vp);
490 			set_blocksize(vp, dev);
491 
492 			/*
493 			 * Cache the size in bytes of the block device for later
494 			 * use by spec_write().
495 			 */
496 			if (setsize) {
497 				vp->v_specdevsize = blkcnt * (u_int64_t)size512;
498 			} else {
499 				vp->v_specdevsize = (u_int64_t)0; /* Default: Can't get */
500 			}
501 			vnode_unlock(vp);
502 		}
503 		return error;
504 	default:
505 		panic("spec_open type");
506 	}
507 	return 0;
508 }
509 
510 /*
511  * Vnode op for read
512  */
513 int
spec_read(struct vnop_read_args * ap)514 spec_read(struct vnop_read_args *ap)
515 {
516 	struct vnode *vp = ap->a_vp;
517 	struct uio *uio = ap->a_uio;
518 	struct buf *bp;
519 	daddr64_t bn, nextbn;
520 	long bscale;
521 	int devBlockSize = 0;
522 	size_t bsize, n, on;
523 	int error = 0;
524 	dev_t dev;
525 
526 #if DIAGNOSTIC
527 	if (uio->uio_rw != UIO_READ) {
528 		panic("spec_read mode");
529 	}
530 	if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
531 		panic("spec_read proc");
532 	}
533 #endif
534 	if (uio_resid(uio) == 0) {
535 		return 0;
536 	}
537 
538 	switch (vp->v_type) {
539 	case VCHR:
540 	{
541 		struct _throttle_io_info_t *throttle_info = NULL;
542 		int thread_throttle_level;
543 		uint64_t blkno = 0;
544 		uint32_t iolen = 0;
545 		int ddisk = 0;
546 		int ktrace_code = DKIO_READ;
547 		devBlockSize = vp->v_specsize;
548 		uintptr_t our_id = 0;
549 
550 		if (cdevsw[major(vp->v_rdev)].d_type == D_DISK) {
551 			ddisk = 1;
552 		}
553 
554 		if (ddisk && vp->v_un.vu_specinfo->si_throttleable) {
555 			throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit];
556 			thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL);
557 		}
558 
559 		if (kdebug_enable && ddisk) {
560 			if (devBlockSize == 0) {
561 				devBlockSize = 512;  // default sector size
562 			}
563 
564 			if (uio_offset(uio) && devBlockSize) {
565 				blkno = ((uint64_t) uio_offset(uio) / ((uint64_t)devBlockSize));
566 			}
567 			iolen = (int) uio_resid(uio);
568 			our_id = (uintptr_t)thread_tid(current_thread());
569 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
570 			    (FSDBG_CODE(DBG_DKRW, ktrace_code)) | DBG_FUNC_NONE, our_id,
571 			    vp->v_rdev, blkno, iolen, 0);
572 		}
573 
574 		error = (*cdevsw[major(vp->v_rdev)].d_read)
575 		    (vp->v_rdev, uio, ap->a_ioflag);
576 
577 
578 		if (kdebug_enable && ddisk) {
579 			uint32_t residual = (uint32_t)uio_resid(uio);
580 			ktrace_code |= DKIO_DONE;
581 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
582 			    (FSDBG_CODE(DBG_DKRW, ktrace_code)) | DBG_FUNC_NONE, our_id,
583 			    (uintptr_t)VM_KERNEL_ADDRPERM(vp), residual, error, 0);
584 		}
585 
586 		if (throttle_info) {
587 			throttle_info_end_io_internal(throttle_info, thread_throttle_level);
588 		}
589 
590 		return error;
591 	}
592 
593 	case VBLK:
594 		if (uio->uio_offset < 0) {
595 			return EINVAL;
596 		}
597 
598 		dev = vp->v_rdev;
599 
600 		devBlockSize = vp->v_specsize;
601 
602 		if (devBlockSize > PAGE_SIZE) {
603 			return EINVAL;
604 		}
605 
606 		bscale = PAGE_SIZE / devBlockSize;
607 		bsize = bscale * devBlockSize;
608 
609 		do {
610 			on = uio->uio_offset % bsize;
611 
612 			bn = (daddr64_t)((uio->uio_offset / devBlockSize) & ~(bscale - 1));
613 
614 			if (vp->v_speclastr + bscale == bn) {
615 				nextbn = bn + bscale;
616 				error = buf_breadn(vp, bn, (int)bsize, &nextbn,
617 				    (int *)&bsize, 1, NOCRED, &bp);
618 			} else {
619 				error = buf_bread(vp, bn, (int)bsize, NOCRED, &bp);
620 			}
621 
622 			vnode_lock(vp);
623 			vp->v_speclastr = bn;
624 			vnode_unlock(vp);
625 
626 			n = bsize - buf_resid(bp);
627 			if ((on > n) || error) {
628 				if (!error) {
629 					error = EINVAL;
630 				}
631 				buf_brelse(bp);
632 				return error;
633 			}
634 			n = MIN((n  - on), (size_t)uio_resid(uio));
635 
636 			error = uiomove((char *)buf_dataptr(bp) + on, (int)n, uio);
637 			if (n + on == bsize) {
638 				buf_markaged(bp);
639 			}
640 			buf_brelse(bp);
641 		} while (error == 0 && uio_resid(uio) > 0 && n != 0);
642 		return error;
643 
644 	default:
645 		panic("spec_read type");
646 	}
647 	/* NOTREACHED */
648 
649 	return 0;
650 }
651 
652 /*
653  * Vnode op for write
654  */
655 int
spec_write(struct vnop_write_args * ap)656 spec_write(struct vnop_write_args *ap)
657 {
658 	struct vnode *vp = ap->a_vp;
659 	struct uio *uio = ap->a_uio;
660 	struct buf *bp;
661 	daddr64_t bn;
662 	int blkmask, bscale;
663 	int io_sync;
664 	int devBlockSize = 0;
665 	size_t bsize, n, on;
666 	int error = 0;
667 	dev_t dev;
668 
669 #if DIAGNOSTIC
670 	if (uio->uio_rw != UIO_WRITE) {
671 		panic("spec_write mode");
672 	}
673 	if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
674 		panic("spec_write proc");
675 	}
676 #endif
677 
678 	switch (vp->v_type) {
679 	case VCHR:
680 	{
681 		struct _throttle_io_info_t *throttle_info = NULL;
682 		int thread_throttle_level;
683 		dev = vp->v_rdev;
684 		devBlockSize = vp->v_specsize;
685 		uint32_t iolen = 0;
686 		uint64_t blkno = 0;
687 		int ddisk = 0;
688 		int ktrace_code = 0;  // write is implied; read must be OR'd in.
689 		uintptr_t our_id = 0;
690 
691 		if (cdevsw[major(dev)].d_type == D_DISK) {
692 			ddisk = 1;
693 		}
694 
695 		if (ddisk && vp->v_un.vu_specinfo->si_throttleable) {
696 			throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit];
697 
698 			thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL);
699 
700 			microuptime(&throttle_info->throttle_last_write_timestamp);
701 		}
702 
703 		if (kdebug_enable && ddisk) {
704 			if (devBlockSize == 0) {
705 				devBlockSize = 512; // default sector size
706 			}
707 			if ((uio_offset(uio) != 0) && devBlockSize) {
708 				blkno = ((uint64_t)uio_offset(uio)) / ((uint64_t)devBlockSize);
709 			}
710 			iolen = (int)uio_resid(uio);
711 			our_id = (uintptr_t)thread_tid(current_thread());
712 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
713 			    (FSDBG_CODE(DBG_DKRW, ktrace_code)) | DBG_FUNC_NONE, our_id,
714 			    vp->v_rdev, blkno, iolen, 0);
715 		}
716 		error = (*cdevsw[major(vp->v_rdev)].d_write)
717 		    (vp->v_rdev, uio, ap->a_ioflag);
718 
719 		if (kdebug_enable && ddisk) {
720 			//emit the I/O completion
721 			uint32_t residual = (uint32_t)uio_resid(uio);
722 			ktrace_code |= DKIO_DONE;
723 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
724 			    (FSDBG_CODE(DBG_DKRW, ktrace_code)) | DBG_FUNC_NONE, our_id,
725 			    (uintptr_t)VM_KERNEL_ADDRPERM(vp), residual, error, 0);
726 		}
727 
728 		if (throttle_info) {
729 			throttle_info_end_io_internal(throttle_info, thread_throttle_level);
730 		}
731 
732 		return error;
733 	}
734 
735 	case VBLK:
736 		if (uio_resid(uio) == 0) {
737 			return 0;
738 		}
739 		if (uio->uio_offset < 0) {
740 			return EINVAL;
741 		}
742 
743 		io_sync = (ap->a_ioflag & IO_SYNC);
744 
745 		dev = (vp->v_rdev);
746 
747 		devBlockSize = vp->v_specsize;
748 		if (devBlockSize > PAGE_SIZE) {
749 			return EINVAL;
750 		}
751 
752 		bscale = PAGE_SIZE / devBlockSize;
753 		blkmask = bscale - 1;
754 		bsize = bscale * devBlockSize;
755 
756 
757 		do {
758 			bn = (daddr64_t)((uio->uio_offset / devBlockSize) & ~blkmask);
759 			on = uio->uio_offset % bsize;
760 
761 			n = MIN((bsize - on), (size_t)uio_resid(uio));
762 
763 			/*
764 			 * Use buf_getblk() as an optimization IFF:
765 			 *
766 			 * 1)	We are reading exactly a block on a block
767 			 *	aligned boundary
768 			 * 2)	We know the size of the device from spec_open
769 			 * 3)	The read doesn't span the end of the device
770 			 *
771 			 * Otherwise, we fall back on buf_bread().
772 			 */
773 			if (n == bsize &&
774 			    vp->v_specdevsize != (u_int64_t)0 &&
775 			    (uio->uio_offset + (u_int64_t)n) > vp->v_specdevsize) {
776 				/* reduce the size of the read to what is there */
777 				n = (uio->uio_offset + (u_int64_t)n) - vp->v_specdevsize;
778 			}
779 
780 			if (n == bsize) {
781 				bp = buf_getblk(vp, bn, (int)bsize, 0, 0, BLK_WRITE);
782 			} else {
783 				error = (int)buf_bread(vp, bn, (int)bsize, NOCRED, &bp);
784 			}
785 
786 			/* Translate downstream error for upstream, if needed */
787 			if (!error) {
788 				error = (int)buf_error(bp);
789 			}
790 			if (error) {
791 				buf_brelse(bp);
792 				return error;
793 			}
794 			n = MIN(n, bsize - buf_resid(bp));
795 
796 			error = uiomove((char *)buf_dataptr(bp) + on, (int)n, uio);
797 			if (error) {
798 				buf_brelse(bp);
799 				return error;
800 			}
801 			buf_markaged(bp);
802 
803 			if (io_sync) {
804 				error = buf_bwrite(bp);
805 			} else {
806 				if ((n + on) == bsize) {
807 					error = buf_bawrite(bp);
808 				} else {
809 					error = buf_bdwrite(bp);
810 				}
811 			}
812 		} while (error == 0 && uio_resid(uio) > 0 && n != 0);
813 		return error;
814 
815 	default:
816 		panic("spec_write type");
817 	}
818 	/* NOTREACHED */
819 
820 	return 0;
821 }
822 
823 /*
824  * Device ioctl operation.
825  */
826 int
spec_ioctl(struct vnop_ioctl_args * ap)827 spec_ioctl(struct vnop_ioctl_args *ap)
828 {
829 	proc_t p = vfs_context_proc(ap->a_context);
830 	dev_t dev = ap->a_vp->v_rdev;
831 	int     retval = 0;
832 
833 	KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_START,
834 	    dev, ap->a_command, ap->a_fflag, ap->a_vp->v_type, 0);
835 
836 	switch (ap->a_vp->v_type) {
837 	case VCHR:
838 		retval = (*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data,
839 		    ap->a_fflag, p);
840 		break;
841 
842 	case VBLK:
843 		retval = (*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, ap->a_fflag, p);
844 		if (!retval && ap->a_command == DKIOCSETBLOCKSIZE) {
845 			ap->a_vp->v_specsize = *(uint32_t *)ap->a_data;
846 		}
847 		break;
848 
849 	default:
850 		panic("spec_ioctl");
851 		/* NOTREACHED */
852 	}
853 	KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_END,
854 	    dev, ap->a_command, ap->a_fflag, retval, 0);
855 
856 	return retval;
857 }
858 
859 int
spec_select(struct vnop_select_args * ap)860 spec_select(struct vnop_select_args *ap)
861 {
862 	proc_t p = vfs_context_proc(ap->a_context);
863 	dev_t dev;
864 
865 	switch (ap->a_vp->v_type) {
866 	default:
867 		return 1;             /* XXX */
868 
869 	case VCHR:
870 		dev = ap->a_vp->v_rdev;
871 		return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_wql, p);
872 	}
873 }
874 
875 int
spec_kqfilter(vnode_t vp,struct knote * kn,struct kevent_qos_s * kev)876 spec_kqfilter(vnode_t vp, struct knote *kn, struct kevent_qos_s *kev)
877 {
878 	dev_t dev;
879 
880 	assert(vnode_ischr(vp));
881 
882 	dev = vnode_specrdev(vp);
883 
884 #if NETWORKING
885 	/*
886 	 * Try a bpf device, as defined in bsd/net/bpf.c
887 	 * If it doesn't error out the attach, then it
888 	 * claimed it. Otherwise, fall through and try
889 	 * other attaches.
890 	 */
891 	int32_t tmp_flags = kn->kn_flags;
892 	int64_t tmp_sdata = kn->kn_sdata;
893 	int res;
894 
895 	res = bpfkqfilter(dev, kn);
896 	if ((kn->kn_flags & EV_ERROR) == 0) {
897 		return res;
898 	}
899 	kn->kn_flags = tmp_flags;
900 	kn->kn_sdata = tmp_sdata;
901 #endif
902 
903 	if (major(dev) >= nchrdev) {
904 		knote_set_error(kn, ENXIO);
905 		return 0;
906 	}
907 
908 	kn->kn_vnode_kqok = !!(cdevsw_flags[major(dev)] & CDEVSW_SELECT_KQUEUE);
909 	kn->kn_vnode_use_ofst = !!(cdevsw_flags[major(dev)] & CDEVSW_USE_OFFSET);
910 
911 	if (cdevsw_flags[major(dev)] & CDEVSW_IS_PTS) {
912 		kn->kn_filtid = EVFILTID_PTSD;
913 		return ptsd_kqfilter(dev, kn);
914 	} else if (cdevsw_flags[major(dev)] & CDEVSW_IS_PTC) {
915 		kn->kn_filtid = EVFILTID_PTMX;
916 		return ptmx_kqfilter(dev, kn);
917 	} else if (cdevsw[major(dev)].d_type == D_TTY && kn->kn_vnode_kqok) {
918 		/*
919 		 * TTYs from drivers that use struct ttys use their own filter
920 		 * routines.  The PTC driver doesn't use the tty for character
921 		 * counts, so it must go through the select fallback.
922 		 */
923 		kn->kn_filtid = EVFILTID_TTY;
924 	} else {
925 		/* Try to attach to other char special devices */
926 		kn->kn_filtid = EVFILTID_SPEC;
927 	}
928 
929 	return knote_fops(kn)->f_attach(kn, kev);
930 }
931 
932 /*
933  * Synch buffers associated with a block device
934  */
935 int
spec_fsync_internal(vnode_t vp,int waitfor,__unused vfs_context_t context)936 spec_fsync_internal(vnode_t vp, int waitfor, __unused vfs_context_t context)
937 {
938 	if (vp->v_type == VCHR) {
939 		return 0;
940 	}
941 	/*
942 	 * Flush all dirty buffers associated with a block device.
943 	 */
944 	buf_flushdirtyblks(vp, (waitfor == MNT_WAIT || waitfor == MNT_DWAIT), 0, "spec_fsync");
945 
946 	return 0;
947 }
948 
949 int
spec_fsync(struct vnop_fsync_args * ap)950 spec_fsync(struct vnop_fsync_args *ap)
951 {
952 	return spec_fsync_internal(ap->a_vp, ap->a_waitfor, ap->a_context);
953 }
954 
955 
956 /*
957  * Just call the device strategy routine
958  */
959 void throttle_init(void);
960 
961 
962 #if 0
963 #define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...)  \
964 	do {                                                    \
965 	       if ((debug_info)->alloc)                           \
966 	       printf("%s: "format, __FUNCTION__, ## args);     \
967        } while(0)
968 
969 #else
970 #define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...)
971 #endif
972 
973 
974 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier1_window_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_windows_msecs[THROTTLE_LEVEL_TIER1], 0, "");
975 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier2_window_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_windows_msecs[THROTTLE_LEVEL_TIER2], 0, "");
976 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier3_window_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_windows_msecs[THROTTLE_LEVEL_TIER3], 0, "");
977 
978 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier1_io_period_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_msecs[THROTTLE_LEVEL_TIER1], 0, "");
979 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier2_io_period_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_msecs[THROTTLE_LEVEL_TIER2], 0, "");
980 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier3_io_period_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_msecs[THROTTLE_LEVEL_TIER3], 0, "");
981 
982 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier1_io_period_ssd_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_ssd_msecs[THROTTLE_LEVEL_TIER1], 0, "");
983 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier2_io_period_ssd_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_ssd_msecs[THROTTLE_LEVEL_TIER2], 0, "");
984 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier3_io_period_ssd_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_ssd_msecs[THROTTLE_LEVEL_TIER3], 0, "");
985 
986 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &lowpri_throttle_enabled, 0, "");
987 
988 
989 static LCK_GRP_DECLARE(throttle_lock_grp, "throttle I/O");
990 
991 
992 /*
993  * throttled I/O helper function
994  * convert the index of the lowest set bit to a device index
995  */
996 int
num_trailing_0(uint64_t n)997 num_trailing_0(uint64_t n)
998 {
999 	/*
1000 	 * since in most cases the number of trailing 0s is very small,
1001 	 * we simply counting sequentially from the lowest bit
1002 	 */
1003 	if (n == 0) {
1004 		return sizeof(n) * 8;
1005 	}
1006 	int count = 0;
1007 	while (!ISSET(n, 1)) {
1008 		n >>= 1;
1009 		++count;
1010 	}
1011 	return count;
1012 }
1013 
1014 
1015 /*
1016  * Release the reference and if the item was allocated and this is the last
1017  * reference then free it.
1018  *
1019  * This routine always returns the old value.
1020  */
1021 static int
throttle_info_rel(struct _throttle_io_info_t * info)1022 throttle_info_rel(struct _throttle_io_info_t *info)
1023 {
1024 	SInt32 oldValue = OSDecrementAtomic(&info->throttle_refcnt);
1025 
1026 	DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n",
1027 	    info, (int)(oldValue - 1), info );
1028 
1029 	/* The reference count just went negative, very bad */
1030 	if (oldValue == 0) {
1031 		panic("throttle info ref cnt went negative!");
1032 	}
1033 
1034 	/*
1035 	 * Once reference count is zero, no one else should be able to take a
1036 	 * reference
1037 	 */
1038 	if ((oldValue == 1) && (info->throttle_alloc)) {
1039 		DEBUG_ALLOC_THROTTLE_INFO("Freeing info = %p\n", info);
1040 
1041 		lck_mtx_destroy(&info->throttle_lock, &throttle_lock_grp);
1042 		kfree_type(struct _throttle_io_info_t, info);
1043 	}
1044 	return oldValue;
1045 }
1046 
1047 
1048 /*
1049  * Just take a reference on the throttle info structure.
1050  *
1051  * This routine always returns the old value.
1052  */
1053 static SInt32
throttle_info_ref(struct _throttle_io_info_t * info)1054 throttle_info_ref(struct _throttle_io_info_t *info)
1055 {
1056 	SInt32 oldValue = OSIncrementAtomic(&info->throttle_refcnt);
1057 
1058 	DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n",
1059 	    info, (int)(oldValue - 1), info );
1060 	/* Allocated items should never have a reference of zero */
1061 	if (info->throttle_alloc && (oldValue == 0)) {
1062 		panic("Taking a reference without calling create throttle info!");
1063 	}
1064 
1065 	return oldValue;
1066 }
1067 
1068 /*
1069  * on entry the throttle_lock is held...
1070  * this function is responsible for taking
1071  * and dropping the reference on the info
1072  * structure which will keep it from going
1073  * away while the timer is running if it
1074  * happens to have been dynamically allocated by
1075  * a network fileystem kext which is now trying
1076  * to free it
1077  */
1078 static uint32_t
throttle_timer_start(struct _throttle_io_info_t * info,boolean_t update_io_count,int wakelevel)1079 throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count, int wakelevel)
1080 {
1081 	struct timeval  elapsed;
1082 	struct timeval  now;
1083 	struct timeval  period;
1084 	uint64_t        elapsed_msecs;
1085 	int             throttle_level;
1086 	int             level;
1087 	int             msecs;
1088 	boolean_t       throttled = FALSE;
1089 	boolean_t       need_timer = FALSE;
1090 
1091 	microuptime(&now);
1092 
1093 	if (update_io_count == TRUE) {
1094 		info->throttle_io_count_begin = info->throttle_io_count;
1095 		info->throttle_io_period_num++;
1096 
1097 		while (wakelevel >= THROTTLE_LEVEL_THROTTLED) {
1098 			info->throttle_start_IO_period_timestamp[wakelevel--] = now;
1099 		}
1100 
1101 		info->throttle_min_timer_deadline = now;
1102 
1103 		msecs = info->throttle_io_periods[THROTTLE_LEVEL_THROTTLED];
1104 		period.tv_sec = msecs / 1000;
1105 		period.tv_usec = (msecs % 1000) * 1000;
1106 
1107 		timevaladd(&info->throttle_min_timer_deadline, &period);
1108 	}
1109 	for (throttle_level = THROTTLE_LEVEL_START; throttle_level < THROTTLE_LEVEL_END; throttle_level++) {
1110 		elapsed = now;
1111 		timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]);
1112 		elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
1113 
1114 		for (level = throttle_level + 1; level <= THROTTLE_LEVEL_END; level++) {
1115 			if (!TAILQ_EMPTY(&info->throttle_uthlist[level])) {
1116 				if (elapsed_msecs < (uint64_t)throttle_windows_msecs[level] || info->throttle_inflight_count[throttle_level]) {
1117 					/*
1118 					 * we had an I/O occur at a higher priority tier within
1119 					 * this tier's throttle window
1120 					 */
1121 					throttled = TRUE;
1122 				}
1123 				/*
1124 				 * we assume that the windows are the same or longer
1125 				 * as we drop through the throttling tiers...  thus
1126 				 * we can stop looking once we run into a tier with
1127 				 * threads to schedule regardless of whether it's
1128 				 * still in its throttling window or not
1129 				 */
1130 				break;
1131 			}
1132 		}
1133 		if (throttled == TRUE) {
1134 			break;
1135 		}
1136 	}
1137 	if (throttled == TRUE) {
1138 		uint64_t        deadline = 0;
1139 		struct timeval  target;
1140 		struct timeval  min_target;
1141 
1142 		/*
1143 		 * we've got at least one tier still in a throttled window
1144 		 * so we need a timer running... compute the next deadline
1145 		 * and schedule it
1146 		 */
1147 		for (level = throttle_level + 1; level <= THROTTLE_LEVEL_END; level++) {
1148 			if (TAILQ_EMPTY(&info->throttle_uthlist[level])) {
1149 				continue;
1150 			}
1151 
1152 			target = info->throttle_start_IO_period_timestamp[level];
1153 
1154 			msecs = info->throttle_io_periods[level];
1155 			period.tv_sec = msecs / 1000;
1156 			period.tv_usec = (msecs % 1000) * 1000;
1157 
1158 			timevaladd(&target, &period);
1159 
1160 			if (need_timer == FALSE || timevalcmp(&target, &min_target, <)) {
1161 				min_target = target;
1162 				need_timer = TRUE;
1163 			}
1164 		}
1165 		if (timevalcmp(&info->throttle_min_timer_deadline, &now, >)) {
1166 			if (timevalcmp(&info->throttle_min_timer_deadline, &min_target, >)) {
1167 				min_target = info->throttle_min_timer_deadline;
1168 			}
1169 		}
1170 
1171 		if (info->throttle_timer_active) {
1172 			if (thread_call_cancel(info->throttle_timer_call) == FALSE) {
1173 				/*
1174 				 * couldn't kill the timer because it's already
1175 				 * been dispatched, so don't try to start a new
1176 				 * one... once we drop the lock, the timer will
1177 				 * proceed and eventually re-run this function
1178 				 */
1179 				need_timer = FALSE;
1180 			} else {
1181 				info->throttle_timer_active = 0;
1182 			}
1183 		}
1184 		if (need_timer == TRUE) {
1185 			/*
1186 			 * This is defined as an int (32-bit) rather than a 64-bit
1187 			 * value because it would need a really big period in the
1188 			 * order of ~500 days to overflow this. So, we let this be
1189 			 * 32-bit which allows us to use the clock_interval_to_deadline()
1190 			 * routine.
1191 			 */
1192 			int     target_msecs;
1193 
1194 			if (info->throttle_timer_ref == 0) {
1195 				/*
1196 				 * take a reference for the timer
1197 				 */
1198 				throttle_info_ref(info);
1199 
1200 				info->throttle_timer_ref = 1;
1201 			}
1202 			elapsed = min_target;
1203 			timevalsub(&elapsed, &now);
1204 			target_msecs = (int)(elapsed.tv_sec * 1000 + elapsed.tv_usec / 1000);
1205 
1206 			if (target_msecs <= 0) {
1207 				/*
1208 				 * we may have computed a deadline slightly in the past
1209 				 * due to various factors... if so, just set the timer
1210 				 * to go off in the near future (we don't need to be precise)
1211 				 */
1212 				target_msecs = 1;
1213 			}
1214 			clock_interval_to_deadline(target_msecs, 1000000, &deadline);
1215 
1216 			thread_call_enter_delayed(info->throttle_timer_call, deadline);
1217 			info->throttle_timer_active = 1;
1218 		}
1219 	}
1220 	return throttle_level;
1221 }
1222 
1223 
1224 static void
throttle_timer(struct _throttle_io_info_t * info,__unused thread_call_param_t p)1225 throttle_timer(struct _throttle_io_info_t *info, __unused thread_call_param_t p)
1226 {
1227 	uthread_t       ut, utlist;
1228 	struct timeval  elapsed;
1229 	struct timeval  now;
1230 	uint64_t        elapsed_msecs;
1231 	int             throttle_level;
1232 	int             level;
1233 	int             wake_level;
1234 	caddr_t         wake_address = NULL;
1235 	boolean_t       update_io_count = FALSE;
1236 	boolean_t       need_wakeup = FALSE;
1237 	boolean_t       need_release = FALSE;
1238 
1239 	ut = NULL;
1240 	lck_mtx_lock(&info->throttle_lock);
1241 
1242 	info->throttle_timer_active = 0;
1243 	microuptime(&now);
1244 
1245 	elapsed = now;
1246 	timevalsub(&elapsed, &info->throttle_start_IO_period_timestamp[THROTTLE_LEVEL_THROTTLED]);
1247 	elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
1248 
1249 	if (elapsed_msecs >= (uint64_t)info->throttle_io_periods[THROTTLE_LEVEL_THROTTLED]) {
1250 		wake_level = info->throttle_next_wake_level;
1251 
1252 		for (level = THROTTLE_LEVEL_START; level < THROTTLE_LEVEL_END; level++) {
1253 			elapsed = now;
1254 			timevalsub(&elapsed, &info->throttle_start_IO_period_timestamp[wake_level]);
1255 			elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
1256 
1257 			if (elapsed_msecs >= (uint64_t)info->throttle_io_periods[wake_level] && !TAILQ_EMPTY(&info->throttle_uthlist[wake_level])) {
1258 				/*
1259 				 * we're closing out the current IO period...
1260 				 * if we have a waiting thread, wake it up
1261 				 * after we have reset the I/O window info
1262 				 */
1263 				need_wakeup = TRUE;
1264 				update_io_count = TRUE;
1265 
1266 				info->throttle_next_wake_level = wake_level - 1;
1267 
1268 				if (info->throttle_next_wake_level == THROTTLE_LEVEL_START) {
1269 					info->throttle_next_wake_level = THROTTLE_LEVEL_END;
1270 				}
1271 
1272 				break;
1273 			}
1274 			wake_level--;
1275 
1276 			if (wake_level == THROTTLE_LEVEL_START) {
1277 				wake_level = THROTTLE_LEVEL_END;
1278 			}
1279 		}
1280 	}
1281 	if (need_wakeup == TRUE) {
1282 		if (!TAILQ_EMPTY(&info->throttle_uthlist[wake_level])) {
1283 			ut = (uthread_t)TAILQ_FIRST(&info->throttle_uthlist[wake_level]);
1284 			TAILQ_REMOVE(&info->throttle_uthlist[wake_level], ut, uu_throttlelist);
1285 			ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE;
1286 			ut->uu_is_throttled = false;
1287 
1288 			wake_address = (caddr_t)&ut->uu_on_throttlelist;
1289 		}
1290 	} else {
1291 		wake_level = THROTTLE_LEVEL_START;
1292 	}
1293 
1294 	throttle_level = throttle_timer_start(info, update_io_count, wake_level);
1295 
1296 	if (wake_address != NULL) {
1297 		wakeup(wake_address);
1298 	}
1299 
1300 	for (level = THROTTLE_LEVEL_THROTTLED; level <= throttle_level; level++) {
1301 		TAILQ_FOREACH_SAFE(ut, &info->throttle_uthlist[level], uu_throttlelist, utlist) {
1302 			TAILQ_REMOVE(&info->throttle_uthlist[level], ut, uu_throttlelist);
1303 			ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE;
1304 			ut->uu_is_throttled = false;
1305 
1306 			wakeup(&ut->uu_on_throttlelist);
1307 		}
1308 	}
1309 	if (info->throttle_timer_active == 0 && info->throttle_timer_ref) {
1310 		info->throttle_timer_ref = 0;
1311 		need_release = TRUE;
1312 	}
1313 	lck_mtx_unlock(&info->throttle_lock);
1314 
1315 	if (need_release == TRUE) {
1316 		throttle_info_rel(info);
1317 	}
1318 }
1319 
1320 
1321 static int
throttle_add_to_list(struct _throttle_io_info_t * info,uthread_t ut,int mylevel,boolean_t insert_tail)1322 throttle_add_to_list(struct _throttle_io_info_t *info, uthread_t ut, int mylevel, boolean_t insert_tail)
1323 {
1324 	boolean_t start_timer = FALSE;
1325 	int level = THROTTLE_LEVEL_START;
1326 
1327 	if (TAILQ_EMPTY(&info->throttle_uthlist[mylevel])) {
1328 		info->throttle_start_IO_period_timestamp[mylevel] = info->throttle_last_IO_timestamp[mylevel];
1329 		start_timer = TRUE;
1330 	}
1331 
1332 	if (insert_tail == TRUE) {
1333 		TAILQ_INSERT_TAIL(&info->throttle_uthlist[mylevel], ut, uu_throttlelist);
1334 	} else {
1335 		TAILQ_INSERT_HEAD(&info->throttle_uthlist[mylevel], ut, uu_throttlelist);
1336 	}
1337 
1338 	ut->uu_on_throttlelist = (int8_t)mylevel;
1339 
1340 	if (start_timer == TRUE) {
1341 		/* we may need to start or rearm the timer */
1342 		level = throttle_timer_start(info, FALSE, THROTTLE_LEVEL_START);
1343 
1344 		if (level == THROTTLE_LEVEL_END) {
1345 			if (ut->uu_on_throttlelist >= THROTTLE_LEVEL_THROTTLED) {
1346 				TAILQ_REMOVE(&info->throttle_uthlist[ut->uu_on_throttlelist], ut, uu_throttlelist);
1347 
1348 				ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE;
1349 			}
1350 		}
1351 	}
1352 	return level;
1353 }
1354 
1355 static void
throttle_init_throttle_window(void)1356 throttle_init_throttle_window(void)
1357 {
1358 	int throttle_window_size;
1359 
1360 	/*
1361 	 * The hierarchy of throttle window values is as follows:
1362 	 * - Global defaults
1363 	 * - Device tree properties
1364 	 * - Boot-args
1365 	 * All values are specified in msecs.
1366 	 */
1367 
1368 #if (XNU_TARGET_OS_OSX && __arm64__)
1369 	/*
1370 	 * IO Tier EDT overrides are meant for
1371 	 * some arm platforms but not for
1372 	 * macs.
1373 	 */
1374 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
1375 	/* Override global values with device-tree properties */
1376 	if (PE_get_default("kern.io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size))) {
1377 		throttle_windows_msecs[THROTTLE_LEVEL_TIER1] = throttle_window_size;
1378 	}
1379 
1380 	if (PE_get_default("kern.io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size))) {
1381 		throttle_windows_msecs[THROTTLE_LEVEL_TIER2] = throttle_window_size;
1382 	}
1383 
1384 	if (PE_get_default("kern.io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size))) {
1385 		throttle_windows_msecs[THROTTLE_LEVEL_TIER3] = throttle_window_size;
1386 	}
1387 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
1388 
1389 	/* Override with boot-args */
1390 	if (PE_parse_boot_argn("io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size))) {
1391 		throttle_windows_msecs[THROTTLE_LEVEL_TIER1] = throttle_window_size;
1392 	}
1393 
1394 	if (PE_parse_boot_argn("io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size))) {
1395 		throttle_windows_msecs[THROTTLE_LEVEL_TIER2] = throttle_window_size;
1396 	}
1397 
1398 	if (PE_parse_boot_argn("io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size))) {
1399 		throttle_windows_msecs[THROTTLE_LEVEL_TIER3] = throttle_window_size;
1400 	}
1401 }
1402 
1403 static void
throttle_init_throttle_period(struct _throttle_io_info_t * info,boolean_t isssd)1404 throttle_init_throttle_period(struct _throttle_io_info_t *info, boolean_t isssd)
1405 {
1406 	int throttle_period_size;
1407 
1408 	/*
1409 	 * The hierarchy of throttle period values is as follows:
1410 	 * - Global defaults
1411 	 * - Device tree properties
1412 	 * - Boot-args
1413 	 * All values are specified in msecs.
1414 	 */
1415 
1416 	/* Assign global defaults */
1417 	if ((isssd == TRUE) && (info->throttle_is_fusion_with_priority == 0)) {
1418 		info->throttle_io_periods = &throttle_io_period_ssd_msecs[0];
1419 	} else {
1420 		info->throttle_io_periods = &throttle_io_period_msecs[0];
1421 	}
1422 
1423 #if (XNU_TARGET_OS_OSX && __arm64__)
1424 	/*
1425 	 * IO Tier EDT overrides are meant for
1426 	 * some arm platforms but not for
1427 	 * macs.
1428 	 */
1429 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
1430 	/* Override global values with device-tree properties */
1431 	if (PE_get_default("kern.io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size))) {
1432 		info->throttle_io_periods[THROTTLE_LEVEL_TIER1] = throttle_period_size;
1433 	}
1434 
1435 	if (PE_get_default("kern.io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size))) {
1436 		info->throttle_io_periods[THROTTLE_LEVEL_TIER2] = throttle_period_size;
1437 	}
1438 
1439 	if (PE_get_default("kern.io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size))) {
1440 		info->throttle_io_periods[THROTTLE_LEVEL_TIER3] = throttle_period_size;
1441 	}
1442 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
1443 
1444 	/* Override with boot-args */
1445 	if (PE_parse_boot_argn("io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size))) {
1446 		info->throttle_io_periods[THROTTLE_LEVEL_TIER1] = throttle_period_size;
1447 	}
1448 
1449 	if (PE_parse_boot_argn("io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size))) {
1450 		info->throttle_io_periods[THROTTLE_LEVEL_TIER2] = throttle_period_size;
1451 	}
1452 
1453 	if (PE_parse_boot_argn("io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size))) {
1454 		info->throttle_io_periods[THROTTLE_LEVEL_TIER3] = throttle_period_size;
1455 	}
1456 }
1457 
1458 #if CONFIG_IOSCHED
1459 extern  void vm_io_reprioritize_init(void);
1460 int     iosched_enabled = 1;
1461 #endif
1462 
1463 void
throttle_init(void)1464 throttle_init(void)
1465 {
1466 	struct _throttle_io_info_t *info;
1467 	int     i;
1468 	int     level;
1469 #if CONFIG_IOSCHED
1470 	int     iosched;
1471 #endif
1472 
1473 	/* Update throttle parameters based on device tree configuration */
1474 	throttle_init_throttle_window();
1475 
1476 	for (i = 0; i < LOWPRI_MAX_NUM_DEV; i++) {
1477 		info = &_throttle_io_info[i];
1478 
1479 		lck_mtx_init(&info->throttle_lock, &throttle_lock_grp, LCK_ATTR_NULL);
1480 		info->throttle_timer_call = thread_call_allocate((thread_call_func_t)throttle_timer, (thread_call_param_t)info);
1481 
1482 		for (level = 0; level <= THROTTLE_LEVEL_END; level++) {
1483 			TAILQ_INIT(&info->throttle_uthlist[level]);
1484 			info->throttle_last_IO_pid[level] = 0;
1485 			info->throttle_inflight_count[level] = 0;
1486 		}
1487 		info->throttle_next_wake_level = THROTTLE_LEVEL_END;
1488 		info->throttle_disabled = 0;
1489 		info->throttle_is_fusion_with_priority = 0;
1490 	}
1491 #if CONFIG_IOSCHED
1492 	if (PE_parse_boot_argn("iosched", &iosched, sizeof(iosched))) {
1493 		iosched_enabled = iosched;
1494 	}
1495 	if (iosched_enabled) {
1496 		/* Initialize I/O Reprioritization mechanism */
1497 		vm_io_reprioritize_init();
1498 	}
1499 #endif
1500 }
1501 
1502 void
sys_override_io_throttle(boolean_t enable_override)1503 sys_override_io_throttle(boolean_t enable_override)
1504 {
1505 	if (enable_override) {
1506 		lowpri_throttle_enabled = 0;
1507 	} else {
1508 		lowpri_throttle_enabled = 1;
1509 	}
1510 }
1511 
1512 int rethrottle_wakeups = 0;
1513 
1514 /*
1515  * the uu_rethrottle_lock is used to synchronize this function
1516  * with "throttle_lowpri_io" which is where a throttled thread
1517  * will block... that function will grab this lock before beginning
1518  * it's decision making process concerning the need to block, and
1519  * hold it through the assert_wait.  When that thread is awakened
1520  * for any reason (timer or rethrottle), it will reacquire the
1521  * uu_rethrottle_lock before determining if it really is ok for
1522  * it to now run.  This is the point at which the thread could
1523  * enter a different throttling queue and reblock or return from
1524  * the throttle w/o having waited out it's entire throttle if
1525  * the rethrottle has now moved it out of any currently
1526  * active throttle window.
1527  *
1528  *
1529  * NOTES:
1530  * 1 - This may be called with the task lock held.
1531  * 2 - This may be called with preemption and interrupts disabled
1532  *     in the kqueue wakeup path so we can't take the throttle_lock which is a mutex
1533  * 3 - This cannot safely dereference uu_throttle_info, as it may
1534  *     get deallocated out from under us
1535  */
1536 
1537 void
rethrottle_thread(uthread_t ut)1538 rethrottle_thread(uthread_t ut)
1539 {
1540 	/*
1541 	 * If uthread doesn't have throttle state, then there's no chance
1542 	 * of it needing a rethrottle.
1543 	 */
1544 	if (ut->uu_throttle_info == NULL) {
1545 		return;
1546 	}
1547 
1548 	boolean_t s = ml_set_interrupts_enabled(FALSE);
1549 	lck_spin_lock(&ut->uu_rethrottle_lock);
1550 
1551 	if (!ut->uu_is_throttled) {
1552 		ut->uu_was_rethrottled = true;
1553 	} else {
1554 		int my_new_level = throttle_get_thread_throttle_level(ut);
1555 
1556 		if (my_new_level != ut->uu_on_throttlelist) {
1557 			/*
1558 			 * ut is currently blocked (as indicated by
1559 			 * ut->uu_is_throttled == true)
1560 			 * and we're changing it's throttle level, so
1561 			 * we need to wake it up.
1562 			 */
1563 			ut->uu_is_throttled = false;
1564 			wakeup(&ut->uu_on_throttlelist);
1565 
1566 			rethrottle_wakeups++;
1567 			KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 102)),
1568 			    uthread_tid(ut), ut->uu_on_throttlelist, my_new_level, 0, 0);
1569 		}
1570 	}
1571 	lck_spin_unlock(&ut->uu_rethrottle_lock);
1572 	ml_set_interrupts_enabled(s);
1573 }
1574 
1575 
1576 /*
1577  * KPI routine
1578  *
1579  * Create and take a reference on a throttle info structure and return a
1580  * pointer for the file system to use when calling throttle_info_update.
1581  * Calling file system must have a matching release for every create.
1582  */
1583 void *
throttle_info_create(void)1584 throttle_info_create(void)
1585 {
1586 	struct _throttle_io_info_t *info;
1587 	int     level;
1588 
1589 	info = kalloc_type(struct _throttle_io_info_t,
1590 	    Z_ZERO | Z_WAITOK | Z_NOFAIL);
1591 	/* Mark that this one was allocated and needs to be freed */
1592 	DEBUG_ALLOC_THROTTLE_INFO("Creating info = %p\n", info, info );
1593 	info->throttle_alloc = TRUE;
1594 
1595 	lck_mtx_init(&info->throttle_lock, &throttle_lock_grp, LCK_ATTR_NULL);
1596 	info->throttle_timer_call = thread_call_allocate((thread_call_func_t)throttle_timer, (thread_call_param_t)info);
1597 
1598 	for (level = 0; level <= THROTTLE_LEVEL_END; level++) {
1599 		TAILQ_INIT(&info->throttle_uthlist[level]);
1600 	}
1601 	info->throttle_next_wake_level = THROTTLE_LEVEL_END;
1602 
1603 	/* Take a reference */
1604 	OSIncrementAtomic(&info->throttle_refcnt);
1605 	return info;
1606 }
1607 
1608 /*
1609  * KPI routine
1610  *
1611  * Release the throttle info pointer if all the reference are gone. Should be
1612  * called to release reference taken by throttle_info_create
1613  */
1614 void
throttle_info_release(void * throttle_info)1615 throttle_info_release(void *throttle_info)
1616 {
1617 	DEBUG_ALLOC_THROTTLE_INFO("Releaseing info = %p\n",
1618 	    (struct _throttle_io_info_t *)throttle_info,
1619 	    (struct _throttle_io_info_t *)throttle_info);
1620 	if (throttle_info) { /* Just to be careful */
1621 		throttle_info_rel(throttle_info);
1622 	}
1623 }
1624 
1625 /*
1626  * KPI routine
1627  *
1628  * File Systems that create an info structure, need to call this routine in
1629  * their mount routine (used by cluster code). File Systems that call this in
1630  * their mount routines must call throttle_info_mount_rel in their unmount
1631  * routines.
1632  */
1633 void
throttle_info_mount_ref(mount_t mp,void * throttle_info)1634 throttle_info_mount_ref(mount_t mp, void *throttle_info)
1635 {
1636 	if ((throttle_info == NULL) || (mp == NULL)) {
1637 		return;
1638 	}
1639 	throttle_info_ref(throttle_info);
1640 
1641 	/*
1642 	 * We already have a reference release it before adding the new one
1643 	 */
1644 	if (mp->mnt_throttle_info) {
1645 		throttle_info_rel(mp->mnt_throttle_info);
1646 	}
1647 	mp->mnt_throttle_info = throttle_info;
1648 }
1649 
1650 /*
1651  * Private KPI routine
1652  *
1653  * return a handle for accessing throttle_info given a throttle_mask.  The
1654  * handle must be released by throttle_info_rel_by_mask
1655  */
1656 int
throttle_info_ref_by_mask(uint64_t throttle_mask,throttle_info_handle_t * throttle_info_handle)1657 throttle_info_ref_by_mask(uint64_t throttle_mask, throttle_info_handle_t *throttle_info_handle)
1658 {
1659 	int     dev_index;
1660 	struct _throttle_io_info_t *info;
1661 
1662 	/*
1663 	 * The 'throttle_mask' is not expected to be 0 otherwise num_trailing_0()
1664 	 * would return value of 64 and this will cause '_throttle_io_info' to
1665 	 * go out of bounds as '_throttle_io_info' is only LOWPRI_MAX_NUM_DEV (64)
1666 	 * elements long.
1667 	 */
1668 	if (throttle_info_handle == NULL || throttle_mask == 0) {
1669 		return EINVAL;
1670 	}
1671 
1672 	dev_index = num_trailing_0(throttle_mask);
1673 	info = &_throttle_io_info[dev_index];
1674 	throttle_info_ref(info);
1675 	*(struct _throttle_io_info_t**)throttle_info_handle = info;
1676 
1677 	return 0;
1678 }
1679 
1680 /*
1681  * Private KPI routine
1682  *
1683  * release the handle obtained by throttle_info_ref_by_mask
1684  */
1685 void
throttle_info_rel_by_mask(throttle_info_handle_t throttle_info_handle)1686 throttle_info_rel_by_mask(throttle_info_handle_t throttle_info_handle)
1687 {
1688 	/*
1689 	 * for now the handle is just a pointer to _throttle_io_info_t
1690 	 */
1691 	throttle_info_rel((struct _throttle_io_info_t*)throttle_info_handle);
1692 }
1693 
1694 /*
1695  * KPI routine
1696  *
1697  * File Systems that throttle_info_mount_ref, must call this routine in their
1698  * umount routine.
1699  */
1700 void
throttle_info_mount_rel(mount_t mp)1701 throttle_info_mount_rel(mount_t mp)
1702 {
1703 	if (mp->mnt_throttle_info) {
1704 		throttle_info_rel(mp->mnt_throttle_info);
1705 	}
1706 	mp->mnt_throttle_info = NULL;
1707 }
1708 
1709 /*
1710  * Reset throttling periods for the given mount point
1711  *
1712  * private interface used by disk conditioner to reset
1713  * throttling periods when 'is_ssd' status changes
1714  */
1715 void
throttle_info_mount_reset_period(mount_t mp,int isssd)1716 throttle_info_mount_reset_period(mount_t mp, int isssd)
1717 {
1718 	struct _throttle_io_info_t *info;
1719 
1720 	if (mp == NULL) {
1721 		info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
1722 	} else if (mp->mnt_throttle_info == NULL) {
1723 		info = &_throttle_io_info[mp->mnt_devbsdunit];
1724 	} else {
1725 		info = mp->mnt_throttle_info;
1726 	}
1727 
1728 	throttle_init_throttle_period(info, isssd);
1729 }
1730 
1731 void
throttle_info_get_last_io_time(mount_t mp,struct timeval * tv)1732 throttle_info_get_last_io_time(mount_t mp, struct timeval *tv)
1733 {
1734 	struct _throttle_io_info_t *info;
1735 
1736 	if (mp == NULL) {
1737 		info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
1738 	} else if (mp->mnt_throttle_info == NULL) {
1739 		info = &_throttle_io_info[mp->mnt_devbsdunit];
1740 	} else {
1741 		info = mp->mnt_throttle_info;
1742 	}
1743 
1744 	*tv = info->throttle_last_write_timestamp;
1745 }
1746 
1747 void
update_last_io_time(mount_t mp)1748 update_last_io_time(mount_t mp)
1749 {
1750 	struct _throttle_io_info_t *info;
1751 
1752 	if (mp == NULL) {
1753 		info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
1754 	} else if (mp->mnt_throttle_info == NULL) {
1755 		info = &_throttle_io_info[mp->mnt_devbsdunit];
1756 	} else {
1757 		info = mp->mnt_throttle_info;
1758 	}
1759 
1760 	microuptime(&info->throttle_last_write_timestamp);
1761 	if (mp != NULL) {
1762 		mp->mnt_last_write_completed_timestamp = info->throttle_last_write_timestamp;
1763 	}
1764 }
1765 
1766 int
throttle_get_io_policy(uthread_t * ut)1767 throttle_get_io_policy(uthread_t *ut)
1768 {
1769 	if (ut != NULL) {
1770 		*ut = current_uthread();
1771 	}
1772 
1773 	return proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
1774 }
1775 
1776 int
throttle_get_passive_io_policy(uthread_t * ut)1777 throttle_get_passive_io_policy(uthread_t *ut)
1778 {
1779 	if (ut != NULL) {
1780 		*ut = current_uthread();
1781 	}
1782 
1783 	return proc_get_effective_thread_policy(current_thread(), TASK_POLICY_PASSIVE_IO);
1784 }
1785 
1786 
1787 static int
throttle_get_thread_throttle_level(uthread_t ut)1788 throttle_get_thread_throttle_level(uthread_t ut)
1789 {
1790 	uthread_t *ut_p = (ut == NULL) ? &ut : NULL;
1791 	int io_tier = throttle_get_io_policy(ut_p);
1792 
1793 	return throttle_get_thread_throttle_level_internal(ut, io_tier);
1794 }
1795 
1796 /*
1797  * Return a throttle level given an existing I/O tier (such as returned by throttle_get_io_policy)
1798  */
1799 static int
throttle_get_thread_throttle_level_internal(uthread_t ut,int io_tier)1800 throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier)
1801 {
1802 	int thread_throttle_level = io_tier;
1803 	int user_idle_level;
1804 
1805 	assert(ut != NULL);
1806 
1807 	/* Bootcache misses should always be throttled */
1808 	if (ut->uu_throttle_bc) {
1809 		thread_throttle_level = THROTTLE_LEVEL_TIER3;
1810 	}
1811 
1812 	/*
1813 	 * Issue tier3 I/O as tier2 when the user is idle
1814 	 * to allow maintenance tasks to make more progress.
1815 	 *
1816 	 * Assume any positive idle level is enough... for now it's
1817 	 * only ever 0 or 128 but this is not defined anywhere.
1818 	 */
1819 	if (thread_throttle_level >= THROTTLE_LEVEL_TIER3) {
1820 		user_idle_level = timer_get_user_idle_level();
1821 		if (user_idle_level > 0) {
1822 			thread_throttle_level--;
1823 		}
1824 	}
1825 
1826 	return thread_throttle_level;
1827 }
1828 
1829 /*
1830  * I/O will be throttled if either of the following are true:
1831  *   - Higher tiers have in-flight I/O
1832  *   - The time delta since the last start/completion of a higher tier is within the throttle window interval
1833  *
1834  * In-flight I/O is bookended by throttle_info_update_internal/throttle_info_end_io_internal
1835  */
1836 static int
throttle_io_will_be_throttled_internal(void * throttle_info,int * mylevel,int * throttling_level)1837 throttle_io_will_be_throttled_internal(void * throttle_info, int * mylevel, int * throttling_level)
1838 {
1839 	struct _throttle_io_info_t *info = throttle_info;
1840 	struct timeval elapsed;
1841 	struct timeval now;
1842 	uint64_t elapsed_msecs;
1843 	int     thread_throttle_level;
1844 	int     throttle_level;
1845 
1846 	if ((thread_throttle_level = throttle_get_thread_throttle_level(NULL)) < THROTTLE_LEVEL_THROTTLED) {
1847 		return THROTTLE_DISENGAGED;
1848 	}
1849 
1850 	microuptime(&now);
1851 
1852 	for (throttle_level = THROTTLE_LEVEL_START; throttle_level < thread_throttle_level; throttle_level++) {
1853 		if (info->throttle_inflight_count[throttle_level]) {
1854 			break;
1855 		}
1856 		elapsed = now;
1857 		timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]);
1858 		elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
1859 
1860 		if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level]) {
1861 			break;
1862 		}
1863 	}
1864 	if (throttle_level >= thread_throttle_level) {
1865 		/*
1866 		 * we're beyond all of the throttle windows
1867 		 * that affect the throttle level of this thread,
1868 		 * so go ahead and treat as normal I/O
1869 		 */
1870 		return THROTTLE_DISENGAGED;
1871 	}
1872 	if (mylevel) {
1873 		*mylevel = thread_throttle_level;
1874 	}
1875 	if (throttling_level) {
1876 		*throttling_level = throttle_level;
1877 	}
1878 
1879 	if (info->throttle_io_count != info->throttle_io_count_begin) {
1880 		/*
1881 		 * we've already issued at least one throttleable I/O
1882 		 * in the current I/O window, so avoid issuing another one
1883 		 */
1884 		return THROTTLE_NOW;
1885 	}
1886 	/*
1887 	 * we're in the throttle window, so
1888 	 * cut the I/O size back
1889 	 */
1890 	return THROTTLE_ENGAGED;
1891 }
1892 
1893 /*
1894  * If we have a mount point and it has a throttle info pointer then
1895  * use it to do the check, otherwise use the device unit number to find
1896  * the correct throttle info array element.
1897  */
1898 int
throttle_io_will_be_throttled(__unused int lowpri_window_msecs,mount_t mp)1899 throttle_io_will_be_throttled(__unused int lowpri_window_msecs, mount_t mp)
1900 {
1901 	struct _throttle_io_info_t      *info;
1902 
1903 	/*
1904 	 * Should we just return zero if no mount point
1905 	 */
1906 	if (mp == NULL) {
1907 		info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
1908 	} else if (mp->mnt_throttle_info == NULL) {
1909 		info = &_throttle_io_info[mp->mnt_devbsdunit];
1910 	} else {
1911 		info = mp->mnt_throttle_info;
1912 	}
1913 
1914 	if (info->throttle_is_fusion_with_priority) {
1915 		uthread_t ut = current_uthread();
1916 		if (ut->uu_lowpri_window == 0) {
1917 			return THROTTLE_DISENGAGED;
1918 		}
1919 	}
1920 
1921 	if (info->throttle_disabled) {
1922 		return THROTTLE_DISENGAGED;
1923 	} else {
1924 		return throttle_io_will_be_throttled_internal(info, NULL, NULL);
1925 	}
1926 }
1927 
1928 /*
1929  * Routine to increment I/O throttling counters maintained in the proc
1930  */
1931 
1932 static void
throttle_update_proc_stats(pid_t throttling_pid,int count)1933 throttle_update_proc_stats(pid_t throttling_pid, int count)
1934 {
1935 	proc_t throttling_proc;
1936 	proc_t throttled_proc = current_proc();
1937 
1938 	/* The throttled_proc is always the current proc; so we are not concerned with refs */
1939 	OSAddAtomic64(count, &(throttled_proc->was_throttled));
1940 
1941 	/* The throttling pid might have exited by now */
1942 	throttling_proc = proc_find(throttling_pid);
1943 	if (throttling_proc != PROC_NULL) {
1944 		OSAddAtomic64(count, &(throttling_proc->did_throttle));
1945 		proc_rele(throttling_proc);
1946 	}
1947 }
1948 
1949 /*
1950  * Block until woken up by the throttle timer or by a rethrottle call.
1951  * As long as we hold the throttle_lock while querying the throttle tier, we're
1952  * safe against seeing an old throttle tier after a rethrottle.
1953  */
1954 uint32_t
throttle_lowpri_io(int sleep_amount)1955 throttle_lowpri_io(int sleep_amount)
1956 {
1957 	uthread_t ut;
1958 	struct _throttle_io_info_t *info;
1959 	int     throttle_type = 0;
1960 	int     mylevel = 0;
1961 	int     throttling_level = THROTTLE_LEVEL_NONE;
1962 	int     sleep_cnt = 0;
1963 	uint32_t  throttle_io_period_num = 0;
1964 	boolean_t insert_tail = TRUE;
1965 	boolean_t s;
1966 
1967 	ut = current_uthread();
1968 
1969 	if (ut->uu_lowpri_window == 0) {
1970 		return 0;
1971 	}
1972 
1973 	info = ut->uu_throttle_info;
1974 
1975 	if (info == NULL) {
1976 		ut->uu_throttle_bc = false;
1977 		ut->uu_lowpri_window = 0;
1978 		return 0;
1979 	}
1980 	lck_mtx_lock(&info->throttle_lock);
1981 	assert(ut->uu_on_throttlelist < THROTTLE_LEVEL_THROTTLED);
1982 
1983 	if (sleep_amount == 0) {
1984 		goto done;
1985 	}
1986 
1987 	if (sleep_amount == 1 && !ut->uu_throttle_bc) {
1988 		sleep_amount = 0;
1989 	}
1990 
1991 	throttle_io_period_num = info->throttle_io_period_num;
1992 
1993 	ut->uu_was_rethrottled = false;
1994 
1995 	while ((throttle_type = throttle_io_will_be_throttled_internal(info, &mylevel, &throttling_level))) {
1996 		if (throttle_type == THROTTLE_ENGAGED) {
1997 			if (sleep_amount == 0) {
1998 				break;
1999 			}
2000 			if (info->throttle_io_period_num < throttle_io_period_num) {
2001 				break;
2002 			}
2003 			if ((info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) {
2004 				break;
2005 			}
2006 		}
2007 		/*
2008 		 * keep the same position in the list if "rethrottle_thread" changes our throttle level  and
2009 		 * then puts us back to the original level before we get a chance to run
2010 		 */
2011 		if (ut->uu_on_throttlelist >= THROTTLE_LEVEL_THROTTLED && ut->uu_on_throttlelist != mylevel) {
2012 			/*
2013 			 * must have been awakened via "rethrottle_thread" (the timer pulls us off the list)
2014 			 * and we've changed our throttling level, so pull ourselves off of the appropriate list
2015 			 * and make sure we get put on the tail of the new list since we're starting anew w/r to
2016 			 * the throttling engine
2017 			 */
2018 			TAILQ_REMOVE(&info->throttle_uthlist[ut->uu_on_throttlelist], ut, uu_throttlelist);
2019 			ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE;
2020 			insert_tail = TRUE;
2021 		}
2022 		if (ut->uu_on_throttlelist < THROTTLE_LEVEL_THROTTLED) {
2023 			if (throttle_add_to_list(info, ut, mylevel, insert_tail) == THROTTLE_LEVEL_END) {
2024 				goto done;
2025 			}
2026 		}
2027 		assert(throttling_level >= THROTTLE_LEVEL_START && throttling_level <= THROTTLE_LEVEL_END);
2028 
2029 		s = ml_set_interrupts_enabled(FALSE);
2030 		lck_spin_lock(&ut->uu_rethrottle_lock);
2031 
2032 		/*
2033 		 * this is the critical section w/r to our interaction
2034 		 * with "rethrottle_thread"
2035 		 */
2036 		if (ut->uu_was_rethrottled) {
2037 			lck_spin_unlock(&ut->uu_rethrottle_lock);
2038 			ml_set_interrupts_enabled(s);
2039 			lck_mtx_yield(&info->throttle_lock);
2040 
2041 			KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 103)),
2042 			    uthread_tid(ut), ut->uu_on_throttlelist, 0, 0, 0);
2043 
2044 			ut->uu_was_rethrottled = false;
2045 			continue;
2046 		}
2047 		KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, PROCESS_THROTTLED)) | DBG_FUNC_NONE,
2048 		    info->throttle_last_IO_pid[throttling_level], throttling_level, proc_selfpid(), mylevel, 0);
2049 
2050 		if (sleep_cnt == 0) {
2051 			KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START,
2052 			    throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0);
2053 			throttled_count[mylevel]++;
2054 		}
2055 		ut->uu_wmesg = "throttle_lowpri_io";
2056 
2057 		assert_wait((caddr_t)&ut->uu_on_throttlelist, THREAD_UNINT);
2058 
2059 		ut->uu_is_throttled = true;
2060 		lck_spin_unlock(&ut->uu_rethrottle_lock);
2061 		ml_set_interrupts_enabled(s);
2062 
2063 		lck_mtx_unlock(&info->throttle_lock);
2064 
2065 		thread_block(THREAD_CONTINUE_NULL);
2066 
2067 		ut->uu_wmesg = NULL;
2068 
2069 		ut->uu_is_throttled = false;
2070 		ut->uu_was_rethrottled = false;
2071 
2072 		lck_mtx_lock(&info->throttle_lock);
2073 
2074 		sleep_cnt++;
2075 
2076 		if (sleep_amount == 0) {
2077 			insert_tail = FALSE;
2078 		} else if (info->throttle_io_period_num < throttle_io_period_num ||
2079 		    (info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) {
2080 			insert_tail = FALSE;
2081 			sleep_amount = 0;
2082 		}
2083 	}
2084 done:
2085 	if (ut->uu_on_throttlelist >= THROTTLE_LEVEL_THROTTLED) {
2086 		TAILQ_REMOVE(&info->throttle_uthlist[ut->uu_on_throttlelist], ut, uu_throttlelist);
2087 		ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE;
2088 	}
2089 	lck_mtx_unlock(&info->throttle_lock);
2090 
2091 	if (sleep_cnt) {
2092 		KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END,
2093 		    throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0);
2094 		/*
2095 		 * We update the stats for the last pid which opened a throttle window for the throttled thread.
2096 		 * This might not be completely accurate since the multiple throttles seen by the lower tier pid
2097 		 * might have been caused by various higher prio pids. However, updating these stats accurately
2098 		 * means doing a proc_find while holding the throttle lock which leads to deadlock.
2099 		 */
2100 		throttle_update_proc_stats(info->throttle_last_IO_pid[throttling_level], sleep_cnt);
2101 	}
2102 
2103 	ut->uu_throttle_info = NULL;
2104 	ut->uu_throttle_bc = false;
2105 	ut->uu_lowpri_window = 0;
2106 
2107 	throttle_info_rel(info);
2108 
2109 	return sleep_cnt;
2110 }
2111 
2112 /*
2113  *  returns TRUE if the throttle_lowpri_io called with the same sleep_amount would've slept
2114  *  This function mimics the most of the throttle_lowpri_io checks but without actual sleeping
2115  */
2116 int
throttle_lowpri_io_will_be_throttled(int sleep_amount)2117 throttle_lowpri_io_will_be_throttled(int sleep_amount)
2118 {
2119 	if (sleep_amount == 0) {
2120 		return FALSE;
2121 	}
2122 
2123 	uthread_t ut = current_uthread();
2124 	if (ut->uu_lowpri_window == 0) {
2125 		return FALSE;
2126 	}
2127 
2128 	struct _throttle_io_info_t *info = ut->uu_throttle_info;
2129 	if (info == NULL) {
2130 		return FALSE;
2131 	}
2132 
2133 	lck_mtx_lock(&info->throttle_lock);
2134 	assert(ut->uu_on_throttlelist < THROTTLE_LEVEL_THROTTLED);
2135 
2136 	if (sleep_amount == 1 && !ut->uu_throttle_bc) {
2137 		sleep_amount = 0;
2138 	}
2139 
2140 	int result = FALSE;
2141 
2142 	int throttle_type = throttle_io_will_be_throttled_internal(info, NULL, NULL);
2143 	if (throttle_type > THROTTLE_DISENGAGED) {
2144 		result = TRUE;
2145 		if ((throttle_type == THROTTLE_ENGAGED) && (sleep_amount == 0)) {
2146 			result = FALSE;
2147 		}
2148 	}
2149 
2150 	lck_mtx_unlock(&info->throttle_lock);
2151 
2152 	return result;
2153 }
2154 
2155 
2156 /*
2157  * KPI routine
2158  *
2159  * set a kernel thread's IO policy.  policy can be:
2160  * IOPOL_NORMAL, IOPOL_THROTTLE, IOPOL_PASSIVE, IOPOL_UTILITY, IOPOL_STANDARD
2161  *
2162  * explanations about these policies are in the man page of setiopolicy_np
2163  */
2164 void
throttle_set_thread_io_policy(int policy)2165 throttle_set_thread_io_policy(int policy)
2166 {
2167 	proc_set_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_IOPOL, policy);
2168 }
2169 
2170 int
throttle_get_thread_effective_io_policy()2171 throttle_get_thread_effective_io_policy()
2172 {
2173 	return proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
2174 }
2175 
2176 int
throttle_thread_io_tier_above_metadata(void)2177 throttle_thread_io_tier_above_metadata(void)
2178 {
2179 	return throttle_get_thread_effective_io_policy() < IOSCHED_METADATA_TIER;
2180 }
2181 
2182 void
throttle_info_reset_window(uthread_t ut)2183 throttle_info_reset_window(uthread_t ut)
2184 {
2185 	struct _throttle_io_info_t *info;
2186 
2187 	if (ut == NULL) {
2188 		ut = current_uthread();
2189 	}
2190 
2191 	if ((info = ut->uu_throttle_info)) {
2192 		throttle_info_rel(info);
2193 
2194 		ut->uu_throttle_info = NULL;
2195 		ut->uu_lowpri_window = 0;
2196 		ut->uu_throttle_bc = false;
2197 	}
2198 }
2199 
2200 static
2201 void
throttle_info_set_initial_window(uthread_t ut,struct _throttle_io_info_t * info,boolean_t BC_throttle,boolean_t isssd)2202 throttle_info_set_initial_window(uthread_t ut, struct _throttle_io_info_t *info, boolean_t BC_throttle, boolean_t isssd)
2203 {
2204 	if (lowpri_throttle_enabled == 0 || info->throttle_disabled) {
2205 		return;
2206 	}
2207 
2208 	if (info->throttle_io_periods == 0) {
2209 		throttle_init_throttle_period(info, isssd);
2210 	}
2211 	if (ut->uu_throttle_info == NULL) {
2212 		ut->uu_throttle_info = info;
2213 		throttle_info_ref(info);
2214 		DEBUG_ALLOC_THROTTLE_INFO("updating info = %p\n", info, info );
2215 
2216 		ut->uu_lowpri_window = 1;
2217 		ut->uu_throttle_bc = BC_throttle;
2218 	}
2219 }
2220 
2221 /*
2222  * Update inflight IO count and throttling window
2223  * Should be called when an IO is done
2224  *
2225  * Only affects IO that was sent through spec_strategy
2226  */
2227 void
throttle_info_end_io(buf_t bp)2228 throttle_info_end_io(buf_t bp)
2229 {
2230 	vnode_t vp;
2231 	mount_t mp;
2232 	struct bufattr *bap;
2233 	struct _throttle_io_info_t *info;
2234 	int io_tier;
2235 
2236 	bap = &bp->b_attr;
2237 	if (!ISSET(bap->ba_flags, BA_STRATEGY_TRACKED_IO)) {
2238 		return;
2239 	}
2240 	CLR(bap->ba_flags, BA_STRATEGY_TRACKED_IO);
2241 
2242 	vp = buf_vnode(bp);
2243 	mp = vp->v_mount;
2244 
2245 	if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) {
2246 		info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit];
2247 	} else if (mp != NULL) {
2248 		info = &_throttle_io_info[mp->mnt_devbsdunit];
2249 	} else {
2250 		info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
2251 	}
2252 
2253 	io_tier = GET_BUFATTR_IO_TIER(bap);
2254 	if (ISSET(bap->ba_flags, BA_IO_TIER_UPGRADE)) {
2255 		io_tier--;
2256 	}
2257 
2258 	throttle_info_end_io_internal(info, io_tier);
2259 }
2260 
2261 /*
2262  * Decrement inflight count initially incremented by throttle_info_update_internal
2263  */
2264 static
2265 void
throttle_info_end_io_internal(struct _throttle_io_info_t * info,int throttle_level)2266 throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level)
2267 {
2268 	if (throttle_level == THROTTLE_LEVEL_NONE) {
2269 		return;
2270 	}
2271 
2272 	microuptime(&info->throttle_window_start_timestamp[throttle_level]);
2273 	OSDecrementAtomic(&info->throttle_inflight_count[throttle_level]);
2274 	assert(info->throttle_inflight_count[throttle_level] >= 0);
2275 }
2276 
2277 /*
2278  * If inflight is TRUE and bap is NULL then the caller is responsible for calling
2279  * throttle_info_end_io_internal to avoid leaking in-flight I/O.
2280  */
2281 static
2282 int
throttle_info_update_internal(struct _throttle_io_info_t * info,uthread_t ut,int flags,boolean_t isssd,boolean_t inflight,struct bufattr * bap)2283 throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int flags, boolean_t isssd, boolean_t inflight, struct bufattr *bap)
2284 {
2285 	int     thread_throttle_level;
2286 
2287 	if (lowpri_throttle_enabled == 0 || info->throttle_disabled) {
2288 		return THROTTLE_LEVEL_NONE;
2289 	}
2290 
2291 	if (ut == NULL) {
2292 		ut = current_uthread();
2293 	}
2294 
2295 	if (bap && inflight && !ut->uu_throttle_bc) {
2296 		thread_throttle_level = GET_BUFATTR_IO_TIER(bap);
2297 		if (ISSET(bap->ba_flags, BA_IO_TIER_UPGRADE)) {
2298 			thread_throttle_level--;
2299 		}
2300 	} else {
2301 		thread_throttle_level = throttle_get_thread_throttle_level(ut);
2302 	}
2303 
2304 	if (thread_throttle_level != THROTTLE_LEVEL_NONE) {
2305 		if (!ISSET(flags, B_PASSIVE)) {
2306 			info->throttle_last_IO_pid[thread_throttle_level] = proc_selfpid();
2307 			if (inflight && !ut->uu_throttle_bc) {
2308 				if (NULL != bap) {
2309 					SET(bap->ba_flags, BA_STRATEGY_TRACKED_IO);
2310 				}
2311 				OSIncrementAtomic(&info->throttle_inflight_count[thread_throttle_level]);
2312 			} else {
2313 				microuptime(&info->throttle_window_start_timestamp[thread_throttle_level]);
2314 			}
2315 			KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, OPEN_THROTTLE_WINDOW)) | DBG_FUNC_NONE,
2316 			    proc_getpid(current_proc()), thread_throttle_level, 0, 0, 0);
2317 		}
2318 		microuptime(&info->throttle_last_IO_timestamp[thread_throttle_level]);
2319 	}
2320 
2321 
2322 	if (thread_throttle_level >= THROTTLE_LEVEL_THROTTLED) {
2323 		/*
2324 		 * I'd really like to do the IOSleep here, but
2325 		 * we may be holding all kinds of filesystem related locks
2326 		 * and the pages for this I/O marked 'busy'...
2327 		 * we don't want to cause a normal task to block on
2328 		 * one of these locks while we're throttling a task marked
2329 		 * for low priority I/O... we'll mark the uthread and
2330 		 * do the delay just before we return from the system
2331 		 * call that triggered this I/O or from vnode_pagein
2332 		 */
2333 		OSAddAtomic(1, &info->throttle_io_count);
2334 
2335 		throttle_info_set_initial_window(ut, info, FALSE, isssd);
2336 	}
2337 
2338 	return thread_throttle_level;
2339 }
2340 
2341 void *
throttle_info_update_by_mount(mount_t mp)2342 throttle_info_update_by_mount(mount_t mp)
2343 {
2344 	struct _throttle_io_info_t *info;
2345 	uthread_t ut;
2346 	boolean_t isssd = FALSE;
2347 
2348 	ut = current_uthread();
2349 
2350 	if (mp != NULL) {
2351 		if (disk_conditioner_mount_is_ssd(mp)) {
2352 			isssd = TRUE;
2353 		}
2354 		info = &_throttle_io_info[mp->mnt_devbsdunit];
2355 	} else {
2356 		info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
2357 	}
2358 
2359 	if (!ut->uu_lowpri_window) {
2360 		throttle_info_set_initial_window(ut, info, FALSE, isssd);
2361 	}
2362 
2363 	return info;
2364 }
2365 
2366 
2367 /*
2368  * KPI routine
2369  *
2370  * this is usually called before every I/O, used for throttled I/O
2371  * book keeping.  This routine has low overhead and does not sleep
2372  */
2373 void
throttle_info_update(void * throttle_info,int flags)2374 throttle_info_update(void *throttle_info, int flags)
2375 {
2376 	if (throttle_info) {
2377 		throttle_info_update_internal(throttle_info, NULL, flags, FALSE, FALSE, NULL);
2378 	}
2379 }
2380 
2381 /*
2382  * KPI routine
2383  *
2384  * this is usually called before every I/O, used for throttled I/O
2385  * book keeping.  This routine has low overhead and does not sleep
2386  */
2387 void
throttle_info_update_by_mask(void * throttle_info_handle,int flags)2388 throttle_info_update_by_mask(void *throttle_info_handle, int flags)
2389 {
2390 	void *throttle_info = throttle_info_handle;
2391 
2392 	/*
2393 	 * for now we only use the lowest bit of the throttle mask, so the
2394 	 * handle is the same as the throttle_info.  Later if we store a
2395 	 * set of throttle infos in the handle, we will want to loop through
2396 	 * them and call throttle_info_update in a loop
2397 	 */
2398 	throttle_info_update(throttle_info, flags);
2399 }
2400 /*
2401  * KPI routine
2402  *
2403  * This routine marks the throttle info as disabled. Used for mount points which
2404  * support I/O scheduling.
2405  */
2406 
2407 void
throttle_info_disable_throttle(int devno,boolean_t isfusion)2408 throttle_info_disable_throttle(int devno, boolean_t isfusion)
2409 {
2410 	struct _throttle_io_info_t *info;
2411 
2412 	if (devno < 0 || devno >= LOWPRI_MAX_NUM_DEV) {
2413 		panic("Illegal devno (%d) passed into throttle_info_disable_throttle()", devno);
2414 	}
2415 
2416 	info = &_throttle_io_info[devno];
2417 	// don't disable software throttling on devices that are part of a fusion device
2418 	// and override the software throttle periods to use HDD periods
2419 	if (isfusion) {
2420 		info->throttle_is_fusion_with_priority = isfusion;
2421 		throttle_init_throttle_period(info, FALSE);
2422 	}
2423 	info->throttle_disabled = !info->throttle_is_fusion_with_priority;
2424 	return;
2425 }
2426 
2427 
2428 /*
2429  * KPI routine (private)
2430  * Called to determine if this IO is being throttled to this level so that it can be treated specially
2431  */
2432 int
throttle_info_io_will_be_throttled(void * throttle_info,int policy)2433 throttle_info_io_will_be_throttled(void * throttle_info, int policy)
2434 {
2435 	struct _throttle_io_info_t *info = throttle_info;
2436 	struct timeval elapsed;
2437 	uint64_t elapsed_msecs;
2438 	int     throttle_level;
2439 	int     thread_throttle_level;
2440 
2441 	switch (policy) {
2442 	case IOPOL_THROTTLE:
2443 		thread_throttle_level = THROTTLE_LEVEL_TIER3;
2444 		break;
2445 	case IOPOL_UTILITY:
2446 		thread_throttle_level = THROTTLE_LEVEL_TIER2;
2447 		break;
2448 	case IOPOL_STANDARD:
2449 		thread_throttle_level = THROTTLE_LEVEL_TIER1;
2450 		break;
2451 	default:
2452 		thread_throttle_level = THROTTLE_LEVEL_TIER0;
2453 		break;
2454 	}
2455 	for (throttle_level = THROTTLE_LEVEL_START; throttle_level < thread_throttle_level; throttle_level++) {
2456 		if (info->throttle_inflight_count[throttle_level]) {
2457 			break;
2458 		}
2459 
2460 		microuptime(&elapsed);
2461 		timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]);
2462 		elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
2463 
2464 		if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level]) {
2465 			break;
2466 		}
2467 	}
2468 	if (throttle_level >= thread_throttle_level) {
2469 		/*
2470 		 * we're beyond all of the throttle windows
2471 		 * so go ahead and treat as normal I/O
2472 		 */
2473 		return THROTTLE_DISENGAGED;
2474 	}
2475 	/*
2476 	 * we're in the throttle window
2477 	 */
2478 	return THROTTLE_ENGAGED;
2479 }
2480 
2481 int
throttle_lowpri_window(void)2482 throttle_lowpri_window(void)
2483 {
2484 	return current_uthread()->uu_lowpri_window;
2485 }
2486 
2487 
2488 #if CONFIG_IOSCHED
2489 int upl_get_cached_tier(void *);
2490 #endif
2491 
2492 #if CONFIG_PHYS_WRITE_ACCT
2493 extern thread_t pm_sync_thread;
2494 #endif /* CONFIG_PHYS_WRITE_ACCT */
2495 
2496 int
spec_strategy(struct vnop_strategy_args * ap)2497 spec_strategy(struct vnop_strategy_args *ap)
2498 {
2499 	buf_t   bp;
2500 	int     bflags;
2501 	int     io_tier;
2502 	int     passive;
2503 	dev_t   bdev;
2504 	uthread_t ut;
2505 	vnode_t vp;
2506 	mount_t mp;
2507 	struct  bufattr *bap;
2508 	int     strategy_ret;
2509 	struct _throttle_io_info_t *throttle_info;
2510 	boolean_t isssd = FALSE;
2511 	boolean_t inflight = FALSE;
2512 	boolean_t upgrade = FALSE;
2513 	int code = 0;
2514 
2515 #if CONFIG_DELAY_IDLE_SLEEP
2516 	proc_t curproc = current_proc();
2517 #endif /* CONFIG_DELAY_IDLE_SLEEP */
2518 
2519 	bp = ap->a_bp;
2520 	bdev = buf_device(bp);
2521 	vp = buf_vnode(bp);
2522 	mp = vp ? vp->v_mount : NULL;
2523 	bap = &bp->b_attr;
2524 
2525 #if CONFIG_PHYS_WRITE_ACCT
2526 	if (current_thread() == pm_sync_thread) {
2527 		OSAddAtomic64(buf_count(bp), (SInt64 *)&(kernel_pm_writes));
2528 	}
2529 #endif /* CONFIG_PHYS_WRITE_ACCT */
2530 
2531 #if CONFIG_IOSCHED
2532 	if (bp->b_flags & B_CLUSTER) {
2533 		io_tier = upl_get_cached_tier(bp->b_upl);
2534 
2535 		if (io_tier == -1) {
2536 			io_tier = throttle_get_io_policy(&ut);
2537 		}
2538 #if DEVELOPMENT || DEBUG
2539 		else {
2540 			int my_io_tier = throttle_get_io_policy(&ut);
2541 
2542 			if (io_tier != my_io_tier) {
2543 				KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, IO_TIER_UPL_MISMATCH)) | DBG_FUNC_NONE, buf_kernel_addrperm_addr(bp), my_io_tier, io_tier, 0, 0);
2544 			}
2545 		}
2546 #endif
2547 	} else {
2548 		io_tier = throttle_get_io_policy(&ut);
2549 	}
2550 #else
2551 	io_tier = throttle_get_io_policy(&ut);
2552 #endif
2553 	passive = throttle_get_passive_io_policy(&ut);
2554 
2555 	/*
2556 	 * Mark if the I/O was upgraded by throttle_get_thread_throttle_level
2557 	 * while preserving the original issued tier (throttle_get_io_policy
2558 	 * does not return upgraded tiers)
2559 	 */
2560 	if (mp && io_tier > throttle_get_thread_throttle_level_internal(ut, io_tier)) {
2561 #if CONFIG_IOSCHED
2562 		if (!(mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED)) {
2563 			upgrade = TRUE;
2564 		}
2565 #else /* CONFIG_IOSCHED */
2566 		upgrade = TRUE;
2567 #endif /* CONFIG_IOSCHED */
2568 	}
2569 
2570 	if (bp->b_flags & B_META) {
2571 		bap->ba_flags |= BA_META;
2572 	}
2573 
2574 #if CONFIG_IOSCHED
2575 	/*
2576 	 * For metadata reads, ceil the I/O tier to IOSCHED_METADATA_EXPEDITED_TIER if they are expedited, otherwise
2577 	 * ceil it to IOSCHED_METADATA_TIER. Mark them passive if the I/O tier was upgraded.
2578 	 * For metadata writes, set the I/O tier to IOSCHED_METADATA_EXPEDITED_TIER if they are expedited. Otherwise
2579 	 * set it to IOSCHED_METADATA_TIER. In addition, mark them as passive.
2580 	 */
2581 	if (bap->ba_flags & BA_META) {
2582 		if ((mp && (mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED)) || (bap->ba_flags & BA_IO_SCHEDULED)) {
2583 			if (bp->b_flags & B_READ) {
2584 				if ((bap->ba_flags & BA_EXPEDITED_META_IO) && (io_tier > IOSCHED_METADATA_EXPEDITED_TIER)) {
2585 					io_tier = IOSCHED_METADATA_EXPEDITED_TIER;
2586 					passive = 1;
2587 				} else if (io_tier > IOSCHED_METADATA_TIER) {
2588 					io_tier = IOSCHED_METADATA_TIER;
2589 					passive = 1;
2590 				}
2591 			} else {
2592 				if (bap->ba_flags & BA_EXPEDITED_META_IO) {
2593 					io_tier = IOSCHED_METADATA_EXPEDITED_TIER;
2594 				} else {
2595 					io_tier = IOSCHED_METADATA_TIER;
2596 				}
2597 				passive = 1;
2598 			}
2599 		}
2600 	}
2601 #endif /* CONFIG_IOSCHED */
2602 
2603 	SET_BUFATTR_IO_TIER(bap, io_tier);
2604 
2605 	if (passive) {
2606 		bp->b_flags |= B_PASSIVE;
2607 		bap->ba_flags |= BA_PASSIVE;
2608 	}
2609 
2610 #if CONFIG_DELAY_IDLE_SLEEP
2611 	if ((curproc != NULL) && ((curproc->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP)) {
2612 		bap->ba_flags |= BA_DELAYIDLESLEEP;
2613 	}
2614 #endif /* CONFIG_DELAY_IDLE_SLEEP */
2615 
2616 	bflags = bp->b_flags;
2617 
2618 	if (((bflags & B_READ) == 0) && ((bflags & B_ASYNC) == 0)) {
2619 		bufattr_markquickcomplete(bap);
2620 	}
2621 
2622 	if (bflags & B_READ) {
2623 		code |= DKIO_READ;
2624 	}
2625 	if (bflags & B_ASYNC) {
2626 		code |= DKIO_ASYNC;
2627 	}
2628 
2629 	if (bap->ba_flags & BA_META) {
2630 		code |= DKIO_META;
2631 	} else if (bflags & B_PAGEIO) {
2632 		code |= DKIO_PAGING;
2633 	}
2634 
2635 	if (io_tier != 0) {
2636 		code |= DKIO_THROTTLE;
2637 	}
2638 
2639 	code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK);
2640 
2641 	if (bflags & B_PASSIVE) {
2642 		code |= DKIO_PASSIVE;
2643 	}
2644 
2645 	if (bap->ba_flags & BA_NOCACHE) {
2646 		code |= DKIO_NOCACHE;
2647 	}
2648 
2649 	if (upgrade) {
2650 		code |= DKIO_TIER_UPGRADE;
2651 		SET(bap->ba_flags, BA_IO_TIER_UPGRADE);
2652 	}
2653 
2654 	if (kdebug_enable) {
2655 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
2656 		    buf_kernel_addrperm_addr(bp), bdev, buf_blkno(bp), buf_count(bp), 0);
2657 	}
2658 
2659 #if CONFIG_IO_COMPRESSION_STATS
2660 	// Do not run IO Compression Stats when a privilege thread is active
2661 	if (!is_vm_privileged() && !is_external_pageout_thread()) {
2662 		io_compression_stats(bp);
2663 	}
2664 #endif /* CONFIG_IO_COMPRESSION_STATS */
2665 	thread_update_io_stats(current_thread(), buf_count(bp), code);
2666 
2667 	if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) {
2668 		if (!vp->v_un.vu_specinfo->si_initted) {
2669 			SPEC_INIT_BSDUNIT(vp, vfs_context_current());
2670 		}
2671 		if (vp->v_un.vu_specinfo->si_devbsdunit > (LOWPRI_MAX_NUM_DEV - 1)) {
2672 			panic("Invalid value (%d) for si_devbsdunit for vnode %p",
2673 			    vp->v_un.vu_specinfo->si_devbsdunit, vp);
2674 		}
2675 		if (vp->v_un.vu_specinfo->si_isssd > 1) {
2676 			panic("Invalid value (%d) for si_isssd for vnode %p",
2677 			    vp->v_un.vu_specinfo->si_isssd, vp);
2678 		}
2679 		throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit];
2680 		isssd = vp->v_un.vu_specinfo->si_isssd;
2681 	} else if (mp != NULL) {
2682 		if (disk_conditioner_mount_is_ssd(mp)) {
2683 			isssd = TRUE;
2684 		}
2685 		/*
2686 		 * Partially initialized mounts don't have a final devbsdunit and should not be tracked.
2687 		 * Verify that devbsdunit is initialized (non-zero) or that 0 is the correct initialized value
2688 		 * (mnt_throttle_mask is initialized and num_trailing_0 would be 0)
2689 		 */
2690 		if (mp->mnt_devbsdunit || (mp->mnt_throttle_mask != LOWPRI_MAX_NUM_DEV - 1 && mp->mnt_throttle_mask & 0x1)) {
2691 			inflight = TRUE;
2692 		}
2693 		throttle_info = &_throttle_io_info[mp->mnt_devbsdunit];
2694 	} else {
2695 		throttle_info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
2696 	}
2697 
2698 	throttle_info_update_internal(throttle_info, ut, bflags, isssd, inflight, bap);
2699 
2700 	if ((bflags & B_READ) == 0) {
2701 		microuptime(&throttle_info->throttle_last_write_timestamp);
2702 
2703 		if (!(vp && (vp->v_type == VBLK || vp->v_type == VCHR)) && mp) {
2704 			mp->mnt_last_write_issued_timestamp = throttle_info->throttle_last_write_timestamp;
2705 			INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_write_size);
2706 		}
2707 	} else if (!(vp && (vp->v_type == VBLK || vp->v_type == VCHR)) && mp) {
2708 		INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_read_size);
2709 	}
2710 	/*
2711 	 * The BootCache may give us special information about
2712 	 * the IO, so it returns special values that we check
2713 	 * for here.
2714 	 *
2715 	 * IO_SATISFIED_BY_CACHE
2716 	 * The read has been satisfied by the boot cache. Don't
2717 	 * throttle the thread unnecessarily.
2718 	 *
2719 	 * IO_SHOULD_BE_THROTTLED
2720 	 * The boot cache is playing back a playlist and this IO
2721 	 * cut through. Throttle it so we're not cutting through
2722 	 * the boot cache too often.
2723 	 *
2724 	 * Note that typical strategy routines are defined with
2725 	 * a void return so we'll get garbage here. In the
2726 	 * unlikely case the garbage matches our special return
2727 	 * value, it's not a big deal since we're only adjusting
2728 	 * the throttling delay.
2729 	 */
2730 #define IO_SATISFIED_BY_CACHE  ((int)0xcafefeed)
2731 #define IO_SHOULD_BE_THROTTLED ((int)0xcafebeef)
2732 #pragma clang diagnostic push
2733 #pragma clang diagnostic ignored "-Wcast-function-type"
2734 
2735 	typedef int strategy_fcn_ret_t(struct buf *bp);
2736 
2737 	strategy_ret = (*(strategy_fcn_ret_t*)bdevsw[major(bdev)].d_strategy)(bp);
2738 
2739 #pragma clang diagnostic pop
2740 
2741 	// disk conditioner needs to track when this I/O actually starts
2742 	// which means track it after `strategy` which may include delays
2743 	// from inflight I/Os
2744 	microuptime(&bp->b_timestamp_tv);
2745 
2746 	if (IO_SATISFIED_BY_CACHE == strategy_ret) {
2747 		/*
2748 		 * If this was a throttled IO satisfied by the boot cache,
2749 		 * don't delay the thread.
2750 		 */
2751 		throttle_info_reset_window(ut);
2752 	} else if (IO_SHOULD_BE_THROTTLED == strategy_ret) {
2753 		/*
2754 		 * If the boot cache indicates this IO should be throttled,
2755 		 * delay the thread.
2756 		 */
2757 		throttle_info_set_initial_window(ut, throttle_info, TRUE, isssd);
2758 	}
2759 	return 0;
2760 }
2761 
2762 
2763 /*
2764  * This is a noop, simply returning what one has been given.
2765  */
2766 int
spec_blockmap(__unused struct vnop_blockmap_args * ap)2767 spec_blockmap(__unused struct vnop_blockmap_args *ap)
2768 {
2769 	return ENOTSUP;
2770 }
2771 
2772 
2773 /*
2774  * Device close routine
2775  */
2776 int
spec_close(struct vnop_close_args * ap)2777 spec_close(struct vnop_close_args *ap)
2778 {
2779 	struct vnode *vp = ap->a_vp;
2780 	dev_t dev = vp->v_rdev;
2781 	int error = 0;
2782 	int flags = ap->a_fflag;
2783 	struct proc *p = vfs_context_proc(ap->a_context);
2784 	struct session *sessp;
2785 	struct pgrp *pg;
2786 
2787 	switch (vp->v_type) {
2788 	case VCHR:
2789 		/*
2790 		 * Hack: a tty device that is a controlling terminal
2791 		 * has a reference from the session structure.
2792 		 * We cannot easily tell that a character device is
2793 		 * a controlling terminal, unless it is the closing
2794 		 * process' controlling terminal.  In that case,
2795 		 * if the reference count is 1 (this is the very
2796 		 * last close)
2797 		 */
2798 		pg = proc_pgrp(p, &sessp);
2799 		devsw_lock(dev, S_IFCHR);
2800 		if (sessp != SESSION_NULL) {
2801 			if (vp == sessp->s_ttyvp && vcount(vp) == 1) {
2802 				struct tty *tp = TTY_NULL;
2803 
2804 				devsw_unlock(dev, S_IFCHR);
2805 				session_lock(sessp);
2806 				if (vp == sessp->s_ttyvp) {
2807 					tp = session_clear_tty_locked(sessp);
2808 				}
2809 				session_unlock(sessp);
2810 
2811 				if (tp != TTY_NULL) {
2812 					ttyfree(tp);
2813 				}
2814 				devsw_lock(dev, S_IFCHR);
2815 			}
2816 		}
2817 		pgrp_rele(pg);
2818 
2819 		if (--vp->v_specinfo->si_opencount < 0) {
2820 			panic("negative open count (c, %u, %u)", major(dev), minor(dev));
2821 		}
2822 
2823 		/*
2824 		 * close on last reference or on vnode revoke call
2825 		 */
2826 		if (vcount(vp) == 0 || (flags & IO_REVOKE) != 0) {
2827 			error = cdevsw[major(dev)].d_close(dev, flags, S_IFCHR, p);
2828 		}
2829 
2830 		devsw_unlock(dev, S_IFCHR);
2831 		break;
2832 
2833 	case VBLK:
2834 		/*
2835 		 * If there is more than one outstanding open, don't
2836 		 * send the close to the device.
2837 		 */
2838 		devsw_lock(dev, S_IFBLK);
2839 		if (vcount(vp) > 1) {
2840 			vp->v_specinfo->si_opencount--;
2841 			devsw_unlock(dev, S_IFBLK);
2842 			return 0;
2843 		}
2844 		devsw_unlock(dev, S_IFBLK);
2845 
2846 		/*
2847 		 * On last close of a block device (that isn't mounted)
2848 		 * we must invalidate any in core blocks, so that
2849 		 * we can, for instance, change floppy disks.
2850 		 */
2851 		if ((error = spec_fsync_internal(vp, MNT_WAIT, ap->a_context))) {
2852 			return error;
2853 		}
2854 
2855 		error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0);
2856 		if (error) {
2857 			return error;
2858 		}
2859 
2860 		devsw_lock(dev, S_IFBLK);
2861 
2862 		if (--vp->v_specinfo->si_opencount < 0) {
2863 			panic("negative open count (b, %u, %u)", major(dev), minor(dev));
2864 		}
2865 
2866 		if (vcount(vp) == 0) {
2867 			error = bdevsw[major(dev)].d_close(dev, flags, S_IFBLK, p);
2868 		}
2869 
2870 		devsw_unlock(dev, S_IFBLK);
2871 		break;
2872 
2873 	default:
2874 		panic("spec_close: not special");
2875 		return EBADF;
2876 	}
2877 
2878 	return error;
2879 }
2880 
2881 /*
2882  * Return POSIX pathconf information applicable to special devices.
2883  */
2884 int
spec_pathconf(struct vnop_pathconf_args * ap)2885 spec_pathconf(struct vnop_pathconf_args *ap)
2886 {
2887 	switch (ap->a_name) {
2888 	case _PC_LINK_MAX:
2889 		*ap->a_retval = LINK_MAX;
2890 		return 0;
2891 	case _PC_MAX_CANON:
2892 		*ap->a_retval = MAX_CANON;
2893 		return 0;
2894 	case _PC_MAX_INPUT:
2895 		*ap->a_retval = MAX_INPUT;
2896 		return 0;
2897 	case _PC_PIPE_BUF:
2898 		*ap->a_retval = PIPE_BUF;
2899 		return 0;
2900 	case _PC_CHOWN_RESTRICTED:
2901 		*ap->a_retval = 200112;         /* _POSIX_CHOWN_RESTRICTED */
2902 		return 0;
2903 	case _PC_VDISABLE:
2904 		*ap->a_retval = _POSIX_VDISABLE;
2905 		return 0;
2906 	default:
2907 		return EINVAL;
2908 	}
2909 	/* NOTREACHED */
2910 }
2911 
2912 /*
2913  * Special device failed operation
2914  */
2915 int
spec_ebadf(__unused void * dummy)2916 spec_ebadf(__unused void *dummy)
2917 {
2918 	return EBADF;
2919 }
2920 
2921 /* Blktooff derives file offset from logical block number */
2922 int
spec_blktooff(struct vnop_blktooff_args * ap)2923 spec_blktooff(struct vnop_blktooff_args *ap)
2924 {
2925 	struct vnode *vp = ap->a_vp;
2926 
2927 	switch (vp->v_type) {
2928 	case VCHR:
2929 		*ap->a_offset = (off_t)-1; /* failure */
2930 		return ENOTSUP;
2931 
2932 	case VBLK:
2933 		printf("spec_blktooff: not implemented for VBLK\n");
2934 		*ap->a_offset = (off_t)-1; /* failure */
2935 		return ENOTSUP;
2936 
2937 	default:
2938 		panic("spec_blktooff type");
2939 	}
2940 	/* NOTREACHED */
2941 
2942 	return 0;
2943 }
2944 
2945 /* Offtoblk derives logical block number from file offset */
2946 int
spec_offtoblk(struct vnop_offtoblk_args * ap)2947 spec_offtoblk(struct vnop_offtoblk_args *ap)
2948 {
2949 	struct vnode *vp = ap->a_vp;
2950 
2951 	switch (vp->v_type) {
2952 	case VCHR:
2953 		*ap->a_lblkno = (daddr64_t)-1; /* failure */
2954 		return ENOTSUP;
2955 
2956 	case VBLK:
2957 		printf("spec_offtoblk: not implemented for VBLK\n");
2958 		*ap->a_lblkno = (daddr64_t)-1; /* failure */
2959 		return ENOTSUP;
2960 
2961 	default:
2962 		panic("spec_offtoblk type");
2963 	}
2964 	/* NOTREACHED */
2965 
2966 	return 0;
2967 }
2968 
2969 static int filt_specattach(struct knote *kn, struct kevent_qos_s *kev);
2970 static void filt_specdetach(struct knote *kn);
2971 static int filt_specevent(struct knote *kn, long hint);
2972 static int filt_spectouch(struct knote *kn, struct kevent_qos_s *kev);
2973 static int filt_specprocess(struct knote *kn, struct kevent_qos_s *kev);
2974 
2975 SECURITY_READ_ONLY_EARLY(struct filterops) spec_filtops = {
2976 	.f_isfd    = 1,
2977 	.f_attach  = filt_specattach,
2978 	.f_detach  = filt_specdetach,
2979 	.f_event   = filt_specevent,
2980 	.f_touch   = filt_spectouch,
2981 	.f_process = filt_specprocess,
2982 };
2983 
2984 static void
filt_spec_make_eof(struct knote * kn)2985 filt_spec_make_eof(struct knote *kn)
2986 {
2987 	/*
2988 	 * The spec filter might touch kn_flags from f_event
2989 	 * without holding "the primitive lock", so make it atomic.
2990 	 */
2991 	os_atomic_or(&kn->kn_flags, EV_EOF | EV_ONESHOT, relaxed);
2992 }
2993 
2994 static int
filt_spec_common(struct knote * kn,struct kevent_qos_s * kev,bool attach)2995 filt_spec_common(struct knote *kn, struct kevent_qos_s *kev, bool attach)
2996 {
2997 	uthread_t uth = current_uthread();
2998 	vfs_context_t ctx = vfs_context_current();
2999 	vnode_t vp = (vnode_t)fp_get_data(kn->kn_fp);
3000 	__block bool selrecorded = false;
3001 	struct select_set *old_wqs;
3002 	int64_t data = 0;
3003 	int ret, selret;
3004 
3005 	if (kn->kn_flags & EV_EOF) {
3006 		ret = FILTER_ACTIVE;
3007 		goto out;
3008 	}
3009 
3010 	if (!attach && vnode_getwithvid(vp, vnode_vid(vp)) != 0) {
3011 		filt_spec_make_eof(kn);
3012 		ret = FILTER_ACTIVE;
3013 		goto out;
3014 	}
3015 
3016 	selspec_record_hook_t cb = ^(struct selinfo *si) {
3017 		selspec_attach(kn, si);
3018 		selrecorded = true;
3019 	};
3020 
3021 	old_wqs = uth->uu_selset;
3022 	uth->uu_selset = SELSPEC_RECORD_MARKER;
3023 	selret = VNOP_SELECT(vp, knote_get_seltype(kn), 0, cb, ctx);
3024 	uth->uu_selset = old_wqs;
3025 
3026 	if (!attach) {
3027 		vnode_put(vp);
3028 	}
3029 
3030 	if (!selrecorded && selret == 0) {
3031 		/*
3032 		 * The device indicated that there's no data to read,
3033 		 * but didn't call `selrecord`.
3034 		 *
3035 		 * Nothing will be notified of changes to this vnode,
3036 		 * so return an error back to user space on attach,
3037 		 * or pretend the knote disappeared for other cases,
3038 		 * to make it clear that the knote is not attached.
3039 		 */
3040 		if (attach) {
3041 			knote_set_error(kn, ENODEV);
3042 			return 0;
3043 		}
3044 
3045 		filt_spec_make_eof(kn);
3046 		ret = FILTER_ACTIVE;
3047 		goto out;
3048 	}
3049 
3050 	if (kn->kn_vnode_use_ofst) {
3051 		if (kn->kn_fp->fp_glob->fg_offset >= (uint32_t)selret) {
3052 			data = 0;
3053 		} else {
3054 			data = ((uint32_t)selret) - kn->kn_fp->fp_glob->fg_offset;
3055 		}
3056 	} else {
3057 		data = selret;
3058 	}
3059 
3060 	if (data >= knote_low_watermark(kn)) {
3061 		ret = FILTER_ACTIVE;
3062 	} else {
3063 		ret = 0;
3064 	}
3065 out:
3066 	if (ret) {
3067 		knote_fill_kevent(kn, kev, data);
3068 	}
3069 	return ret;
3070 }
3071 
3072 static int
filt_specattach(struct knote * kn,__unused struct kevent_qos_s * kev)3073 filt_specattach(struct knote *kn, __unused struct kevent_qos_s *kev)
3074 {
3075 	vnode_t vp = (vnode_t)fp_get_data(kn->kn_fp); /* Already have iocount, and vnode is alive */
3076 	dev_t dev;
3077 
3078 	assert(vnode_ischr(vp));
3079 
3080 	dev = vnode_specrdev(vp);
3081 
3082 	/*
3083 	 * For a few special kinds of devices, we can attach knotes with
3084 	 * no restrictions because their "select" vectors return the amount
3085 	 * of data available.  Others require an explicit NOTE_LOWAT with
3086 	 * data of 1, indicating that the caller doesn't care about actual
3087 	 * data counts, just an indication that the device has data.
3088 	 */
3089 	if (!kn->kn_vnode_kqok &&
3090 	    ((kn->kn_sfflags & NOTE_LOWAT) == 0 || kn->kn_sdata != 1)) {
3091 		knote_set_error(kn, EINVAL);
3092 		return 0;
3093 	}
3094 
3095 	return filt_spec_common(kn, kev, true);
3096 }
3097 
3098 static void
filt_specdetach(struct knote * kn)3099 filt_specdetach(struct knote *kn)
3100 {
3101 	selspec_detach(kn);
3102 }
3103 
3104 static int
filt_specevent(struct knote * kn,long hint)3105 filt_specevent(struct knote *kn, long hint)
3106 {
3107 	/* Due to selwakeup_internal() on SI_SELSPEC */
3108 	assert(KNOTE_IS_AUTODETACHED(kn));
3109 	knote_kn_hook_set_raw(kn, NULL);
3110 
3111 	/* called by selwakeup with the selspec_lock lock held */
3112 	if (hint & NOTE_REVOKE) {
3113 		filt_spec_make_eof(kn);
3114 	}
3115 	return FILTER_ACTIVE;
3116 }
3117 
3118 static int
filt_spectouch(struct knote * kn,struct kevent_qos_s * kev)3119 filt_spectouch(struct knote *kn, struct kevent_qos_s *kev)
3120 {
3121 	kn->kn_sdata = kev->data;
3122 	kn->kn_sfflags = kev->fflags;
3123 
3124 	return filt_spec_common(kn, kev, false);
3125 }
3126 
3127 static int
filt_specprocess(struct knote * kn,struct kevent_qos_s * kev)3128 filt_specprocess(struct knote *kn, struct kevent_qos_s *kev)
3129 {
3130 	return filt_spec_common(kn, kev, false);
3131 }
3132