xref: /xnu-11215.1.10/bsd/kern/sys_generic.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)sys_generic.c	8.9 (Berkeley) 2/14/95
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/ioctl.h>
79 #include <sys/file_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/socketvar.h>
82 #include <sys/uio_internal.h>
83 #include <sys/kernel.h>
84 #include <sys/guarded.h>
85 #include <sys/stat.h>
86 #include <sys/malloc.h>
87 #include <sys/sysproto.h>
88 
89 #include <sys/mount_internal.h>
90 #include <sys/protosw.h>
91 #include <sys/ev.h>
92 #include <sys/user.h>
93 #include <sys/kdebug.h>
94 #include <sys/poll.h>
95 #include <sys/event.h>
96 #include <sys/eventvar.h>
97 #include <sys/proc.h>
98 #include <sys/kauth.h>
99 
100 #include <machine/smp.h>
101 #include <mach/mach_types.h>
102 #include <kern/kern_types.h>
103 #include <kern/assert.h>
104 #include <kern/kalloc.h>
105 #include <kern/thread.h>
106 #include <kern/clock.h>
107 #include <kern/ledger.h>
108 #include <kern/monotonic.h>
109 #include <kern/task.h>
110 #include <kern/telemetry.h>
111 #include <kern/waitq.h>
112 #include <kern/sched_hygiene.h>
113 #include <kern/sched_prim.h>
114 #include <kern/mpsc_queue.h>
115 #include <kern/debug.h>
116 
117 #include <sys/mbuf.h>
118 #include <sys/domain.h>
119 #include <sys/socket.h>
120 #include <sys/socketvar.h>
121 #include <sys/errno.h>
122 #include <sys/syscall.h>
123 #include <sys/pipe.h>
124 
125 #include <security/audit/audit.h>
126 
127 #include <net/if.h>
128 #include <net/route.h>
129 
130 #include <netinet/in.h>
131 #include <netinet/in_systm.h>
132 #include <netinet/ip.h>
133 #include <netinet/in_pcb.h>
134 #include <netinet/ip_var.h>
135 #include <netinet/ip6.h>
136 #include <netinet/tcp.h>
137 #include <netinet/tcp_fsm.h>
138 #include <netinet/tcp_seq.h>
139 #include <netinet/tcp_timer.h>
140 #include <netinet/tcp_var.h>
141 #include <netinet/tcpip.h>
142 /* for wait queue based select */
143 #include <kern/waitq.h>
144 #include <sys/vnode_internal.h>
145 /* for remote time api*/
146 #include <kern/remote_time.h>
147 #include <os/log.h>
148 #include <sys/log_data.h>
149 
150 #include <machine/monotonic.h>
151 
152 #if CONFIG_MACF
153 #include <security/mac_framework.h>
154 #endif
155 
156 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
157 #include <mach_debug/mach_debug_types.h>
158 #endif
159 
160 /* for entitlement check */
161 #include <IOKit/IOBSD.h>
162 
163 /* XXX should be in a header file somewhere */
164 extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp);
165 
166 int do_uiowrite(struct proc *p, struct fileproc *fp, uio_t uio, int flags, user_ssize_t *retval);
167 __private_extern__ int  dofileread(vfs_context_t ctx, struct fileproc *fp,
168     user_addr_t bufp, user_size_t nbyte,
169     off_t offset, int flags, user_ssize_t *retval);
170 __private_extern__ int  dofilewrite(vfs_context_t ctx, struct fileproc *fp,
171     user_addr_t bufp, user_size_t nbyte,
172     off_t offset, int flags, user_ssize_t *retval);
173 static int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode);
174 
175 /* needed by guarded_writev, etc. */
176 int write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
177     off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval);
178 int writev_uio(struct proc *p, int fd, user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
179     guardid_t *puguard, user_ssize_t *retval);
180 
181 #define f_flag fp_glob->fg_flag
182 #define f_type fp_glob->fg_ops->fo_type
183 #define f_cred fp_glob->fg_cred
184 #define f_ops fp_glob->fg_ops
185 
186 /*
187  * Validate if the file can be used for random access (pread, pwrite, etc).
188  *
189  * Conditions:
190  *		proc_fdlock is held
191  *
192  * Returns:    0                       Success
193  *             ESPIPE
194  *             ENXIO
195  */
196 static int
valid_for_random_access(struct fileproc * fp)197 valid_for_random_access(struct fileproc *fp)
198 {
199 	if (__improbable(fp->f_type != DTYPE_VNODE)) {
200 		return ESPIPE;
201 	}
202 
203 	vnode_t vp = (struct vnode *)fp_get_data(fp);
204 	if (__improbable(vnode_isfifo(vp))) {
205 		return ESPIPE;
206 	}
207 
208 	if (__improbable(vp->v_flag & VISTTY)) {
209 		return ENXIO;
210 	}
211 
212 	return 0;
213 }
214 
215 /*
216  * Returns:	0			Success
217  *		EBADF
218  *		ESPIPE
219  *		ENXIO
220  *	fp_lookup:EBADF
221  *  valid_for_random_access:ESPIPE
222  *  valid_for_random_access:ENXIO
223  */
224 static int
preparefileread(struct proc * p,struct fileproc ** fp_ret,int fd,int check_for_pread)225 preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread)
226 {
227 	int     error;
228 	struct fileproc *fp;
229 
230 	AUDIT_ARG(fd, fd);
231 
232 	proc_fdlock_spin(p);
233 
234 	error = fp_lookup(p, fd, &fp, 1);
235 
236 	if (error) {
237 		proc_fdunlock(p);
238 		return error;
239 	}
240 	if ((fp->f_flag & FREAD) == 0) {
241 		error = EBADF;
242 		goto out;
243 	}
244 	if (check_for_pread) {
245 		if ((error = valid_for_random_access(fp))) {
246 			goto out;
247 		}
248 	}
249 
250 	*fp_ret = fp;
251 
252 	proc_fdunlock(p);
253 	return 0;
254 
255 out:
256 	fp_drop(p, fd, fp, 1);
257 	proc_fdunlock(p);
258 	return error;
259 }
260 
261 static int
fp_readv(vfs_context_t ctx,struct fileproc * fp,uio_t uio,int flags,user_ssize_t * retval)262 fp_readv(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
263     user_ssize_t *retval)
264 {
265 	int error;
266 	user_ssize_t count;
267 
268 	if ((error = uio_calculateresid_user(uio))) {
269 		*retval = 0;
270 		return error;
271 	}
272 
273 	count = uio_resid(uio);
274 	error = fo_read(fp, uio, flags, ctx);
275 
276 	switch (error) {
277 	case ERESTART:
278 	case EINTR:
279 	case EWOULDBLOCK:
280 		if (uio_resid(uio) != count) {
281 			error = 0;
282 		}
283 		break;
284 
285 	default:
286 		break;
287 	}
288 
289 	*retval = count - uio_resid(uio);
290 	return error;
291 }
292 
293 /*
294  * Returns:	0			Success
295  *		EINVAL
296  *	fo_read:???
297  */
298 __private_extern__ int
dofileread(vfs_context_t ctx,struct fileproc * fp,user_addr_t bufp,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)299 dofileread(vfs_context_t ctx, struct fileproc *fp,
300     user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
301     user_ssize_t *retval)
302 {
303 	UIO_STACKBUF(uio_buf, 1);
304 	uio_t uio;
305 	int spacetype;
306 
307 	if (nbyte > INT_MAX) {
308 		*retval = 0;
309 		return EINVAL;
310 	}
311 
312 	spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
313 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_READ, &uio_buf[0],
314 	    sizeof(uio_buf));
315 
316 	if (uio_addiov(uio, bufp, nbyte) != 0) {
317 		*retval = 0;
318 		return EINVAL;
319 	}
320 
321 	return fp_readv(ctx, fp, uio, flags, retval);
322 }
323 
324 static int
readv_internal(struct proc * p,int fd,uio_t uio,int flags,user_ssize_t * retval)325 readv_internal(struct proc *p, int fd, uio_t uio, int flags,
326     user_ssize_t *retval)
327 {
328 	struct fileproc *fp = NULL;
329 	struct vfs_context context;
330 	int error;
331 
332 	if ((error = preparefileread(p, &fp, fd, flags & FOF_OFFSET))) {
333 		*retval = 0;
334 		return error;
335 	}
336 
337 	context = *(vfs_context_current());
338 	context.vc_ucred = fp->fp_glob->fg_cred;
339 
340 	error = fp_readv(&context, fp, uio, flags, retval);
341 
342 	fp_drop(p, fd, fp, 0);
343 	return error;
344 }
345 
346 static int
read_internal(struct proc * p,int fd,user_addr_t buf,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)347 read_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
348     off_t offset, int flags, user_ssize_t *retval)
349 {
350 	UIO_STACKBUF(uio_buf, 1);
351 	uio_t uio;
352 	int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
353 
354 	if (nbyte > INT_MAX) {
355 		*retval = 0;
356 		return EINVAL;
357 	}
358 
359 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_READ,
360 	    &uio_buf[0], sizeof(uio_buf));
361 
362 	if (uio_addiov(uio, buf, nbyte) != 0) {
363 		*retval = 0;
364 		return EINVAL;
365 	}
366 
367 	return readv_internal(p, fd, uio, flags, retval);
368 }
369 
370 int
read_nocancel(struct proc * p,struct read_nocancel_args * uap,user_ssize_t * retval)371 read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retval)
372 {
373 	return read_internal(p, uap->fd, uap->cbuf, uap->nbyte, (off_t)-1, 0,
374 	           retval);
375 }
376 
377 /*
378  * Read system call.
379  *
380  * Returns:	0			Success
381  *	preparefileread:EBADF
382  *	preparefileread:ESPIPE
383  *	preparefileread:ENXIO
384  *	preparefileread:EBADF
385  *	dofileread:???
386  */
387 int
read(struct proc * p,struct read_args * uap,user_ssize_t * retval)388 read(struct proc *p, struct read_args *uap, user_ssize_t *retval)
389 {
390 	__pthread_testcancel(1);
391 	return read_nocancel(p, (struct read_nocancel_args *)uap, retval);
392 }
393 
394 int
pread_nocancel(struct proc * p,struct pread_nocancel_args * uap,user_ssize_t * retval)395 pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *retval)
396 {
397 	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE),
398 	    uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
399 
400 	return read_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset,
401 	           FOF_OFFSET, retval);
402 }
403 
404 /*
405  * Pread system call
406  *
407  * Returns:	0			Success
408  *	preparefileread:EBADF
409  *	preparefileread:ESPIPE
410  *	preparefileread:ENXIO
411  *	preparefileread:EBADF
412  *	dofileread:???
413  */
414 int
pread(struct proc * p,struct pread_args * uap,user_ssize_t * retval)415 pread(struct proc *p, struct pread_args *uap, user_ssize_t *retval)
416 {
417 	__pthread_testcancel(1);
418 	return pread_nocancel(p, (struct pread_nocancel_args *)uap, retval);
419 }
420 
421 /*
422  * Vector read.
423  *
424  * Returns:    0                       Success
425  *             EINVAL
426  *             ENOMEM
427  *     preparefileread:EBADF
428  *     preparefileread:ESPIPE
429  *     preparefileread:ENXIO
430  *     preparefileread:EBADF
431  *     copyin:EFAULT
432  *     rd_uio:???
433  */
434 static int
readv_uio(struct proc * p,int fd,user_addr_t user_iovp,int iovcnt,off_t offset,int flags,user_ssize_t * retval)435 readv_uio(struct proc *p, int fd,
436     user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
437     user_ssize_t *retval)
438 {
439 	uio_t uio = NULL;
440 	int error;
441 	struct user_iovec *iovp;
442 
443 	if (iovcnt <= 0 || iovcnt > UIO_MAXIOV) {
444 		error = EINVAL;
445 		goto out;
446 	}
447 
448 	uio = uio_create(iovcnt, offset,
449 	    (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
450 	    UIO_READ);
451 
452 	iovp = uio_iovsaddr_user(uio);
453 	if (iovp == NULL) {
454 		error = ENOMEM;
455 		goto out;
456 	}
457 
458 	error = copyin_user_iovec_array(user_iovp,
459 	    IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
460 	    iovcnt, iovp);
461 
462 	if (error) {
463 		goto out;
464 	}
465 
466 	error = readv_internal(p, fd, uio, flags, retval);
467 
468 out:
469 	if (uio != NULL) {
470 		uio_free(uio);
471 	}
472 
473 	return error;
474 }
475 
476 int
readv_nocancel(struct proc * p,struct readv_nocancel_args * uap,user_ssize_t * retval)477 readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval)
478 {
479 	return readv_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, retval);
480 }
481 
482 /*
483  * Scatter read system call.
484  */
485 int
readv(struct proc * p,struct readv_args * uap,user_ssize_t * retval)486 readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval)
487 {
488 	__pthread_testcancel(1);
489 	return readv_nocancel(p, (struct readv_nocancel_args *)uap, retval);
490 }
491 
492 int
sys_preadv_nocancel(struct proc * p,struct preadv_nocancel_args * uap,user_ssize_t * retval)493 sys_preadv_nocancel(struct proc *p, struct preadv_nocancel_args *uap, user_ssize_t *retval)
494 {
495 	return readv_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset,
496 	           FOF_OFFSET, retval);
497 }
498 
499 /*
500  * Preadv system call
501  */
502 int
sys_preadv(struct proc * p,struct preadv_args * uap,user_ssize_t * retval)503 sys_preadv(struct proc *p, struct preadv_args *uap, user_ssize_t *retval)
504 {
505 	__pthread_testcancel(1);
506 	return sys_preadv_nocancel(p, (struct preadv_nocancel_args *)uap, retval);
507 }
508 
509 /*
510  * Returns:	0			Success
511  *		EBADF
512  *		ESPIPE
513  *		ENXIO
514  *	fp_lookup:EBADF
515  *	fp_guard_exception:???
516  *  valid_for_random_access:ESPIPE
517  *  valid_for_random_access:ENXIO
518  */
519 static int
preparefilewrite(struct proc * p,struct fileproc ** fp_ret,int fd,int check_for_pwrite,guardid_t * puguard)520 preparefilewrite(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pwrite,
521     guardid_t *puguard)
522 {
523 	int error;
524 	struct fileproc *fp;
525 
526 	AUDIT_ARG(fd, fd);
527 
528 	proc_fdlock_spin(p);
529 
530 	if (puguard) {
531 		error = fp_lookup_guarded(p, fd, *puguard, &fp, 1);
532 		if (error) {
533 			proc_fdunlock(p);
534 			return error;
535 		}
536 
537 		if ((fp->f_flag & FWRITE) == 0) {
538 			error = EBADF;
539 			goto out;
540 		}
541 	} else {
542 		error = fp_lookup(p, fd, &fp, 1);
543 		if (error) {
544 			proc_fdunlock(p);
545 			return error;
546 		}
547 
548 		/* Allow EBADF first. */
549 		if ((fp->f_flag & FWRITE) == 0) {
550 			error = EBADF;
551 			goto out;
552 		}
553 
554 		if (fp_isguarded(fp, GUARD_WRITE)) {
555 			error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
556 			goto out;
557 		}
558 	}
559 
560 	if (check_for_pwrite) {
561 		if ((error = valid_for_random_access(fp))) {
562 			goto out;
563 		}
564 	}
565 
566 	*fp_ret = fp;
567 
568 	proc_fdunlock(p);
569 	return 0;
570 
571 out:
572 	fp_drop(p, fd, fp, 1);
573 	proc_fdunlock(p);
574 	return error;
575 }
576 
577 static int
fp_writev(vfs_context_t ctx,struct fileproc * fp,uio_t uio,int flags,user_ssize_t * retval)578 fp_writev(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
579     user_ssize_t *retval)
580 {
581 	int error;
582 	user_ssize_t count;
583 
584 	if ((error = uio_calculateresid_user(uio))) {
585 		*retval = 0;
586 		return error;
587 	}
588 
589 	count = uio_resid(uio);
590 	error = fo_write(fp, uio, flags, ctx);
591 
592 	switch (error) {
593 	case ERESTART:
594 	case EINTR:
595 	case EWOULDBLOCK:
596 		if (uio_resid(uio) != count) {
597 			error = 0;
598 		}
599 		break;
600 
601 	case EPIPE:
602 		if (fp->f_type != DTYPE_SOCKET &&
603 		    (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) == 0) {
604 			/* XXX Raise the signal on the thread? */
605 			psignal(vfs_context_proc(ctx), SIGPIPE);
606 		}
607 		break;
608 
609 	default:
610 		break;
611 	}
612 
613 	if ((*retval = count - uio_resid(uio))) {
614 		os_atomic_or(&fp->fp_glob->fg_flag, FWASWRITTEN, relaxed);
615 	}
616 
617 	return error;
618 }
619 
620 /*
621  * Returns:	0			Success
622  *		EINVAL
623  *	<fo_write>:EPIPE
624  *	<fo_write>:???			[indirect through struct fileops]
625  */
626 __private_extern__ int
dofilewrite(vfs_context_t ctx,struct fileproc * fp,user_addr_t bufp,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)627 dofilewrite(vfs_context_t ctx, struct fileproc *fp,
628     user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
629     user_ssize_t *retval)
630 {
631 	UIO_STACKBUF(uio_buf, 1);
632 	uio_t uio;
633 	int spacetype;
634 
635 	if (nbyte > INT_MAX) {
636 		*retval = 0;
637 		return EINVAL;
638 	}
639 
640 	spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
641 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_WRITE, &uio_buf[0],
642 	    sizeof(uio_buf));
643 
644 	if (uio_addiov(uio, bufp, nbyte) != 0) {
645 		*retval = 0;
646 		return EINVAL;
647 	}
648 
649 	return fp_writev(ctx, fp, uio, flags, retval);
650 }
651 
652 static int
writev_internal(struct proc * p,int fd,uio_t uio,int flags,guardid_t * puguard,user_ssize_t * retval)653 writev_internal(struct proc *p, int fd, uio_t uio, int flags,
654     guardid_t *puguard, user_ssize_t *retval)
655 {
656 	struct fileproc *fp = NULL;
657 	struct vfs_context context;
658 	int error;
659 
660 	if ((error = preparefilewrite(p, &fp, fd, flags & FOF_OFFSET, puguard))) {
661 		*retval = 0;
662 		return error;
663 	}
664 
665 	context = *(vfs_context_current());
666 	context.vc_ucred = fp->fp_glob->fg_cred;
667 
668 	error = fp_writev(&context, fp, uio, flags, retval);
669 
670 	fp_drop(p, fd, fp, 0);
671 	return error;
672 }
673 
674 int
write_internal(struct proc * p,int fd,user_addr_t buf,user_size_t nbyte,off_t offset,int flags,guardid_t * puguard,user_ssize_t * retval)675 write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
676     off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval)
677 {
678 	UIO_STACKBUF(uio_buf, 1);
679 	uio_t uio;
680 	int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
681 
682 	if (nbyte > INT_MAX) {
683 		*retval = 0;
684 		return EINVAL;
685 	}
686 
687 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_WRITE,
688 	    &uio_buf[0], sizeof(uio_buf));
689 
690 	if (uio_addiov(uio, buf, nbyte) != 0) {
691 		*retval = 0;
692 		return EINVAL;
693 	}
694 
695 	return writev_internal(p, fd, uio, flags, puguard, retval);
696 }
697 
698 int
write_nocancel(struct proc * p,struct write_nocancel_args * uap,user_ssize_t * retval)699 write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *retval)
700 {
701 	return write_internal(p, uap->fd, uap->cbuf, uap->nbyte, (off_t)-1, 0,
702 	           NULL, retval);
703 }
704 
705 /*
706  * Write system call
707  *
708  * Returns:	0			Success
709  *		EBADF
710  *	fp_lookup:EBADF
711  *	dofilewrite:???
712  */
713 int
write(struct proc * p,struct write_args * uap,user_ssize_t * retval)714 write(struct proc *p, struct write_args *uap, user_ssize_t *retval)
715 {
716 	__pthread_testcancel(1);
717 	return write_nocancel(p, (struct write_nocancel_args *)uap, retval);
718 }
719 
720 int
pwrite_nocancel(struct proc * p,struct pwrite_nocancel_args * uap,user_ssize_t * retval)721 pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *retval)
722 {
723 	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE),
724 	    uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
725 
726 	/* XXX: Should be < 0 instead? (See man page + pwritev) */
727 	if (uap->offset == (off_t)-1) {
728 		return EINVAL;
729 	}
730 
731 	return write_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset,
732 	           FOF_OFFSET, NULL, retval);
733 }
734 
735 /*
736  * pwrite system call
737  *
738  * Returns:	0			Success
739  *		EBADF
740  *		ESPIPE
741  *		ENXIO
742  *		EINVAL
743  *	fp_lookup:EBADF
744  *	dofilewrite:???
745  */
746 int
pwrite(struct proc * p,struct pwrite_args * uap,user_ssize_t * retval)747 pwrite(struct proc *p, struct pwrite_args *uap, user_ssize_t *retval)
748 {
749 	__pthread_testcancel(1);
750 	return pwrite_nocancel(p, (struct pwrite_nocancel_args *)uap, retval);
751 }
752 
753 int
writev_uio(struct proc * p,int fd,user_addr_t user_iovp,int iovcnt,off_t offset,int flags,guardid_t * puguard,user_ssize_t * retval)754 writev_uio(struct proc *p, int fd,
755     user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
756     guardid_t *puguard, user_ssize_t *retval)
757 {
758 	uio_t uio = NULL;
759 	int error;
760 	struct user_iovec *iovp;
761 
762 	if (iovcnt <= 0 || iovcnt > UIO_MAXIOV || offset < 0) {
763 		error = EINVAL;
764 		goto out;
765 	}
766 
767 	uio = uio_create(iovcnt, offset,
768 	    (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
769 	    UIO_WRITE);
770 
771 	iovp = uio_iovsaddr_user(uio);
772 	if (iovp == NULL) {
773 		error = ENOMEM;
774 		goto out;
775 	}
776 
777 	error = copyin_user_iovec_array(user_iovp,
778 	    IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
779 	    iovcnt, iovp);
780 
781 	if (error) {
782 		goto out;
783 	}
784 
785 	error = writev_internal(p, fd, uio, flags, puguard, retval);
786 
787 out:
788 	if (uio != NULL) {
789 		uio_free(uio);
790 	}
791 
792 	return error;
793 }
794 
795 int
writev_nocancel(struct proc * p,struct writev_nocancel_args * uap,user_ssize_t * retval)796 writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval)
797 {
798 	return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, NULL, retval);
799 }
800 
801 /*
802  * Gather write system call
803  */
804 int
writev(struct proc * p,struct writev_args * uap,user_ssize_t * retval)805 writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval)
806 {
807 	__pthread_testcancel(1);
808 	return writev_nocancel(p, (struct writev_nocancel_args *)uap, retval);
809 }
810 
811 int
sys_pwritev_nocancel(struct proc * p,struct pwritev_nocancel_args * uap,user_ssize_t * retval)812 sys_pwritev_nocancel(struct proc *p, struct pwritev_nocancel_args *uap, user_ssize_t *retval)
813 {
814 	return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset,
815 	           FOF_OFFSET, NULL, retval);
816 }
817 
818 /*
819  * Pwritev system call
820  */
821 int
sys_pwritev(struct proc * p,struct pwritev_args * uap,user_ssize_t * retval)822 sys_pwritev(struct proc *p, struct pwritev_args *uap, user_ssize_t *retval)
823 {
824 	__pthread_testcancel(1);
825 	return sys_pwritev_nocancel(p, (struct pwritev_nocancel_args *)uap, retval);
826 }
827 
828 /*
829  * Ioctl system call
830  *
831  * Returns:	0			Success
832  *		EBADF
833  *		ENOTTY
834  *		ENOMEM
835  *		ESRCH
836  *	copyin:EFAULT
837  *	copyoutEFAULT
838  *	fp_lookup:EBADF			Bad file descriptor
839  *	fo_ioctl:???
840  */
841 int
ioctl(struct proc * p,struct ioctl_args * uap,__unused int32_t * retval)842 ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval)
843 {
844 	struct fileproc *fp = NULL;
845 	int error = 0;
846 	u_int size = 0;
847 	caddr_t datap = NULL, memp = NULL;
848 	boolean_t is64bit = FALSE;
849 	int tmp = 0;
850 #define STK_PARAMS      128
851 	char stkbuf[STK_PARAMS] = {};
852 	int fd = uap->fd;
853 	u_long com = uap->com;
854 	struct vfs_context context = *vfs_context_current();
855 
856 	AUDIT_ARG(fd, uap->fd);
857 	AUDIT_ARG(addr, uap->data);
858 
859 	is64bit = proc_is64bit(p);
860 #if CONFIG_AUDIT
861 	if (is64bit) {
862 		AUDIT_ARG(value64, com);
863 	} else {
864 		AUDIT_ARG(cmd, CAST_DOWN_EXPLICIT(int, com));
865 	}
866 #endif /* CONFIG_AUDIT */
867 
868 	/*
869 	 * Interpret high order word to find amount of data to be
870 	 * copied to/from the user's address space.
871 	 */
872 	size = IOCPARM_LEN(com);
873 	if (size > IOCPARM_MAX) {
874 		return ENOTTY;
875 	}
876 	if (size > sizeof(stkbuf)) {
877 		memp = (caddr_t)kalloc_data(size, Z_WAITOK);
878 		if (memp == 0) {
879 			return ENOMEM;
880 		}
881 		datap = memp;
882 	} else {
883 		datap = &stkbuf[0];
884 	}
885 	if (com & IOC_IN) {
886 		if (size) {
887 			error = copyin(uap->data, datap, size);
888 			if (error) {
889 				goto out_nofp;
890 			}
891 		} else {
892 			/* XXX - IOC_IN and no size?  we should proably return an error here!! */
893 			if (is64bit) {
894 				*(user_addr_t *)datap = uap->data;
895 			} else {
896 				*(uint32_t *)datap = (uint32_t)uap->data;
897 			}
898 		}
899 	} else if ((com & IOC_OUT) && size) {
900 		/*
901 		 * Zero the buffer so the user always
902 		 * gets back something deterministic.
903 		 */
904 		bzero(datap, size);
905 	} else if (com & IOC_VOID) {
906 		/* XXX - this is odd since IOC_VOID means no parameters */
907 		if (is64bit) {
908 			*(user_addr_t *)datap = uap->data;
909 		} else {
910 			*(uint32_t *)datap = (uint32_t)uap->data;
911 		}
912 	}
913 
914 	proc_fdlock(p);
915 	error = fp_lookup(p, fd, &fp, 1);
916 	if (error) {
917 		proc_fdunlock(p);
918 		goto out_nofp;
919 	}
920 
921 	AUDIT_ARG(file, p, fp);
922 
923 	if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
924 		error = EBADF;
925 		goto out;
926 	}
927 
928 	context.vc_ucred = fp->fp_glob->fg_cred;
929 
930 #if CONFIG_MACF
931 	error = mac_file_check_ioctl(context.vc_ucred, fp->fp_glob, com);
932 	if (error) {
933 		goto out;
934 	}
935 #endif
936 
937 	switch (com) {
938 	case FIONCLEX:
939 		fp->fp_flags &= ~FP_CLOEXEC;
940 		break;
941 
942 	case FIOCLEX:
943 		fp->fp_flags |= FP_CLOEXEC;
944 		break;
945 
946 	case FIONBIO:
947 		// FIXME (rdar://54898652)
948 		//
949 		// this code is broken if fnctl(F_SETFL), ioctl() are
950 		// called concurrently for the same fileglob.
951 		if ((tmp = *(int *)datap)) {
952 			os_atomic_or(&fp->f_flag, FNONBLOCK, relaxed);
953 		} else {
954 			os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed);
955 		}
956 		error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
957 		break;
958 
959 	case FIOASYNC:
960 		// FIXME (rdar://54898652)
961 		//
962 		// this code is broken if fnctl(F_SETFL), ioctl() are
963 		// called concurrently for the same fileglob.
964 		if ((tmp = *(int *)datap)) {
965 			os_atomic_or(&fp->f_flag, FASYNC, relaxed);
966 		} else {
967 			os_atomic_andnot(&fp->f_flag, FASYNC, relaxed);
968 		}
969 		error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
970 		break;
971 
972 	case FIOSETOWN:
973 		tmp = *(int *)datap;
974 		if (fp->f_type == DTYPE_SOCKET) {
975 			((struct socket *)fp_get_data(fp))->so_pgid = tmp;
976 			break;
977 		}
978 		if (fp->f_type == DTYPE_PIPE) {
979 			error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
980 			break;
981 		}
982 		if (tmp <= 0) {
983 			tmp = -tmp;
984 		} else {
985 			struct proc *p1 = proc_find(tmp);
986 			if (p1 == 0) {
987 				error = ESRCH;
988 				break;
989 			}
990 			tmp = p1->p_pgrpid;
991 			proc_rele(p1);
992 		}
993 		error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
994 		break;
995 
996 	case FIOGETOWN:
997 		if (fp->f_type == DTYPE_SOCKET) {
998 			*(int *)datap = ((struct socket *)fp_get_data(fp))->so_pgid;
999 			break;
1000 		}
1001 		error = fo_ioctl(fp, TIOCGPGRP, datap, &context);
1002 		*(int *)datap = -*(int *)datap;
1003 		break;
1004 
1005 	default:
1006 		error = fo_ioctl(fp, com, datap, &context);
1007 		/*
1008 		 * Copy any data to user, size was
1009 		 * already set and checked above.
1010 		 */
1011 		if (error == 0 && (com & IOC_OUT) && size) {
1012 			error = copyout(datap, uap->data, (u_int)size);
1013 		}
1014 		break;
1015 	}
1016 out:
1017 	fp_drop(p, fd, fp, 1);
1018 	proc_fdunlock(p);
1019 
1020 out_nofp:
1021 	if (memp) {
1022 		kfree_data(memp, size);
1023 	}
1024 	return error;
1025 }
1026 
1027 int     selwait;
1028 #define SEL_FIRSTPASS 1
1029 #define SEL_SECONDPASS 2
1030 static int selprocess(struct proc *p, int error, int sel_pass);
1031 static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata,
1032     int nfd, int32_t *retval, int sel_pass, struct select_set *selset);
1033 static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count);
1034 static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup);
1035 static int seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim);
1036 static int select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval);
1037 
1038 /*
1039  * This is used for the special device nodes that do not implement
1040  * a proper kevent filter (see filt_specattach).
1041  *
1042  * In order to enable kevents on those, the spec_filtops will pretend
1043  * to call select, and try to sniff the selrecord(), if it observes one,
1044  * the knote is attached, which pairs with selwakeup() or selthreadclear().
1045  *
1046  * The last issue remaining, is that we need to serialize filt_specdetach()
1047  * with this, but it really can't know the "selinfo" or any locking domain.
1048  * To make up for this, We protect knote list operations with a global lock,
1049  * which give us a safe shared locking domain.
1050  *
1051  * Note: It is a little distasteful, but we really have very few of those.
1052  *       The big problem here is that sharing a lock domain without
1053  *       any kind of shared knowledge is a little complicated.
1054  *
1055  *       1. filters can really implement their own kqueue integration
1056  *          to side step this,
1057  *
1058  *       2. There's an opportunity to pick a private lock in selspec_attach()
1059  *          because both the selinfo and the knote are locked at that time.
1060  *          The cleanup story is however a little complicated.
1061  */
1062 static LCK_GRP_DECLARE(selspec_grp, "spec_filtops");
1063 static LCK_SPIN_DECLARE(selspec_lock, &selspec_grp);
1064 
1065 /*
1066  * The "primitive" lock is held.
1067  * The knote lock is held.
1068  */
1069 void
selspec_attach(struct knote * kn,struct selinfo * si)1070 selspec_attach(struct knote *kn, struct selinfo *si)
1071 {
1072 	struct selinfo *cur = knote_kn_hook_get_raw(kn);
1073 
1074 	if (cur == NULL) {
1075 		si->si_flags |= SI_SELSPEC;
1076 		lck_spin_lock(&selspec_lock);
1077 		knote_kn_hook_set_raw(kn, (void *) si);
1078 		KNOTE_ATTACH(&si->si_note, kn);
1079 		lck_spin_unlock(&selspec_lock);
1080 	} else {
1081 		/*
1082 		 * selspec_attach() can be called from e.g. filt_spectouch()
1083 		 * which might be called before any event was dequeued.
1084 		 *
1085 		 * It is hence not impossible for the knote already be hooked.
1086 		 *
1087 		 * Note that selwakeup_internal() could possibly
1088 		 * already have cleared this pointer. This is a race
1089 		 * that filt_specprocess will debounce.
1090 		 */
1091 		assert(si->si_flags & SI_SELSPEC);
1092 		assert(cur == si);
1093 	}
1094 }
1095 
1096 /*
1097  * The "primitive" lock is _not_ held.
1098  *
1099  * knote "lock" is held
1100  */
1101 void
selspec_detach(struct knote * kn)1102 selspec_detach(struct knote *kn)
1103 {
1104 	lck_spin_lock(&selspec_lock);
1105 
1106 	if (!KNOTE_IS_AUTODETACHED(kn)) {
1107 		struct selinfo *sip = knote_kn_hook_get_raw(kn);
1108 		if (sip) {
1109 			KNOTE_DETACH(&sip->si_note, kn);
1110 		}
1111 	}
1112 
1113 	knote_kn_hook_set_raw(kn, NULL);
1114 
1115 	lck_spin_unlock(&selspec_lock);
1116 }
1117 
1118 /*
1119  * Select system call.
1120  *
1121  * Returns:	0			Success
1122  *		EINVAL			Invalid argument
1123  *		EAGAIN			Nonconformant error if allocation fails
1124  */
1125 int
select(struct proc * p,struct select_args * uap,int32_t * retval)1126 select(struct proc *p, struct select_args *uap, int32_t *retval)
1127 {
1128 	__pthread_testcancel(1);
1129 	return select_nocancel(p, (struct select_nocancel_args *)uap, retval);
1130 }
1131 
1132 int
select_nocancel(struct proc * p,struct select_nocancel_args * uap,int32_t * retval)1133 select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retval)
1134 {
1135 	uint64_t timeout = 0;
1136 
1137 	if (uap->tv) {
1138 		int err;
1139 		struct timeval atv;
1140 		if (IS_64BIT_PROCESS(p)) {
1141 			struct user64_timeval atv64;
1142 			err = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
1143 			/* Loses resolution - assume timeout < 68 years */
1144 			atv.tv_sec = (__darwin_time_t)atv64.tv_sec;
1145 			atv.tv_usec = atv64.tv_usec;
1146 		} else {
1147 			struct user32_timeval atv32;
1148 			err = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
1149 			atv.tv_sec = atv32.tv_sec;
1150 			atv.tv_usec = atv32.tv_usec;
1151 		}
1152 		if (err) {
1153 			return err;
1154 		}
1155 
1156 		if (itimerfix(&atv)) {
1157 			err = EINVAL;
1158 			return err;
1159 		}
1160 
1161 		clock_absolutetime_interval_to_deadline(tvtoabstime(&atv), &timeout);
1162 	}
1163 
1164 	return select_internal(p, uap, timeout, retval);
1165 }
1166 
1167 int
pselect(struct proc * p,struct pselect_args * uap,int32_t * retval)1168 pselect(struct proc *p, struct pselect_args *uap, int32_t *retval)
1169 {
1170 	__pthread_testcancel(1);
1171 	return pselect_nocancel(p, (struct pselect_nocancel_args *)uap, retval);
1172 }
1173 
1174 int
pselect_nocancel(struct proc * p,struct pselect_nocancel_args * uap,int32_t * retval)1175 pselect_nocancel(struct proc *p, struct pselect_nocancel_args *uap, int32_t *retval)
1176 {
1177 	int err;
1178 	struct uthread *ut;
1179 	uint64_t timeout = 0;
1180 
1181 	if (uap->ts) {
1182 		struct timespec ts;
1183 
1184 		if (IS_64BIT_PROCESS(p)) {
1185 			struct user64_timespec ts64;
1186 			err = copyin(uap->ts, (caddr_t)&ts64, sizeof(ts64));
1187 			ts.tv_sec = (__darwin_time_t)ts64.tv_sec;
1188 			ts.tv_nsec = (long)ts64.tv_nsec;
1189 		} else {
1190 			struct user32_timespec ts32;
1191 			err = copyin(uap->ts, (caddr_t)&ts32, sizeof(ts32));
1192 			ts.tv_sec = ts32.tv_sec;
1193 			ts.tv_nsec = ts32.tv_nsec;
1194 		}
1195 		if (err) {
1196 			return err;
1197 		}
1198 
1199 		if (!timespec_is_valid(&ts)) {
1200 			return EINVAL;
1201 		}
1202 		clock_absolutetime_interval_to_deadline(tstoabstime(&ts), &timeout);
1203 	}
1204 
1205 	ut = current_uthread();
1206 
1207 	if (uap->mask != USER_ADDR_NULL) {
1208 		/* save current mask, then copyin and set new mask */
1209 		sigset_t newset;
1210 		err = copyin(uap->mask, &newset, sizeof(sigset_t));
1211 		if (err) {
1212 			return err;
1213 		}
1214 		ut->uu_oldmask = ut->uu_sigmask;
1215 		ut->uu_flag |= UT_SAS_OLDMASK;
1216 		ut->uu_sigmask = (newset & ~sigcantmask);
1217 	}
1218 
1219 	err = select_internal(p, (struct select_nocancel_args *)uap, timeout, retval);
1220 
1221 	if (err != EINTR && ut->uu_flag & UT_SAS_OLDMASK) {
1222 		/*
1223 		 * Restore old mask (direct return case). NOTE: EINTR can also be returned
1224 		 * if the thread is cancelled. In that case, we don't reset the signal
1225 		 * mask to its original value (which usually happens in the signal
1226 		 * delivery path). This behavior is permitted by POSIX.
1227 		 */
1228 		ut->uu_sigmask = ut->uu_oldmask;
1229 		ut->uu_oldmask = 0;
1230 		ut->uu_flag &= ~UT_SAS_OLDMASK;
1231 	}
1232 
1233 	return err;
1234 }
1235 
1236 void
select_cleanup_uthread(struct _select * sel)1237 select_cleanup_uthread(struct _select *sel)
1238 {
1239 	kfree_data(sel->ibits, 2 * sel->nbytes);
1240 	sel->ibits = sel->obits = NULL;
1241 	sel->nbytes = 0;
1242 }
1243 
1244 static int
select_grow_uthread_cache(struct _select * sel,uint32_t nbytes)1245 select_grow_uthread_cache(struct _select *sel, uint32_t nbytes)
1246 {
1247 	uint32_t *buf;
1248 
1249 	buf = kalloc_data(2 * nbytes, Z_WAITOK | Z_ZERO);
1250 	if (buf) {
1251 		select_cleanup_uthread(sel);
1252 		sel->ibits = buf;
1253 		sel->obits = buf + nbytes / sizeof(uint32_t);
1254 		sel->nbytes = nbytes;
1255 		return true;
1256 	}
1257 	return false;
1258 }
1259 
1260 static void
select_bzero_uthread_cache(struct _select * sel)1261 select_bzero_uthread_cache(struct _select *sel)
1262 {
1263 	bzero(sel->ibits, sel->nbytes * 2);
1264 }
1265 
1266 /*
1267  * Generic implementation of {,p}select. Care: we type-pun uap across the two
1268  * syscalls, which differ slightly. The first 4 arguments (nfds and the fd sets)
1269  * are identical. The 5th (timeout) argument points to different types, so we
1270  * unpack in the syscall-specific code, but the generic code still does a null
1271  * check on this argument to determine if a timeout was specified.
1272  */
1273 static int
select_internal(struct proc * p,struct select_nocancel_args * uap,uint64_t timeout,int32_t * retval)1274 select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval)
1275 {
1276 	struct uthread *uth = current_uthread();
1277 	struct _select *sel = &uth->uu_select;
1278 	struct _select_data *seldata = &uth->uu_save.uus_select_data;
1279 	int error = 0;
1280 	u_int ni, nw;
1281 
1282 	*retval = 0;
1283 
1284 	seldata->abstime = timeout;
1285 	seldata->args = uap;
1286 	seldata->retval = retval;
1287 	seldata->count = 0;
1288 
1289 	if (uap->nd < 0) {
1290 		return EINVAL;
1291 	}
1292 
1293 	if (uap->nd > p->p_fd.fd_nfiles) {
1294 		uap->nd = p->p_fd.fd_nfiles; /* forgiving; slightly wrong */
1295 	}
1296 	nw = howmany(uap->nd, NFDBITS);
1297 	ni = nw * sizeof(fd_mask);
1298 
1299 	/*
1300 	 * if the previously allocated space for the bits is smaller than
1301 	 * what is requested or no space has yet been allocated for this
1302 	 * thread, allocate enough space now.
1303 	 *
1304 	 * Note: If this process fails, select() will return EAGAIN; this
1305 	 * is the same thing pool() returns in a no-memory situation, but
1306 	 * it is not a POSIX compliant error code for select().
1307 	 */
1308 	if (sel->nbytes >= (3 * ni)) {
1309 		select_bzero_uthread_cache(sel);
1310 	} else if (!select_grow_uthread_cache(sel, 3 * ni)) {
1311 		return EAGAIN;
1312 	}
1313 
1314 	/*
1315 	 * get the bits from the user address space
1316 	 */
1317 #define getbits(name, x) \
1318 	(uap->name ? copyin(uap->name, &sel->ibits[(x) * nw], ni) : 0)
1319 
1320 	if ((error = getbits(in, 0))) {
1321 		return error;
1322 	}
1323 	if ((error = getbits(ou, 1))) {
1324 		return error;
1325 	}
1326 	if ((error = getbits(ex, 2))) {
1327 		return error;
1328 	}
1329 #undef  getbits
1330 
1331 	if ((error = selcount(p, sel->ibits, uap->nd, &seldata->count))) {
1332 		return error;
1333 	}
1334 
1335 	if (uth->uu_selset == NULL) {
1336 		uth->uu_selset = select_set_alloc();
1337 	}
1338 	return selprocess(p, 0, SEL_FIRSTPASS);
1339 }
1340 
1341 static int
selcontinue(int error)1342 selcontinue(int error)
1343 {
1344 	return selprocess(current_proc(), error, SEL_SECONDPASS);
1345 }
1346 
1347 
1348 /*
1349  * selprocess
1350  *
1351  * Parameters:	error			The error code from our caller
1352  *		sel_pass		The pass we are on
1353  */
1354 int
selprocess(struct proc * p,int error,int sel_pass)1355 selprocess(struct proc *p, int error, int sel_pass)
1356 {
1357 	struct uthread *uth = current_uthread();
1358 	struct _select *sel = &uth->uu_select;
1359 	struct _select_data *seldata = &uth->uu_save.uus_select_data;
1360 	struct select_nocancel_args *uap = seldata->args;
1361 	int *retval = seldata->retval;
1362 
1363 	int unwind = 1;
1364 	int prepost = 0;
1365 	int somewakeup = 0;
1366 	int doretry = 0;
1367 	wait_result_t wait_result;
1368 
1369 	if ((error != 0) && (sel_pass == SEL_FIRSTPASS)) {
1370 		unwind = 0;
1371 	}
1372 	if (seldata->count == 0) {
1373 		unwind = 0;
1374 	}
1375 retry:
1376 	if (error != 0) {
1377 		goto done;
1378 	}
1379 
1380 	OSBitOrAtomic(P_SELECT, &p->p_flag);
1381 
1382 	/* skip scans if the select is just for timeouts */
1383 	if (seldata->count) {
1384 		error = selscan(p, sel, seldata, uap->nd, retval, sel_pass,
1385 		    uth->uu_selset);
1386 		if (error || *retval) {
1387 			goto done;
1388 		}
1389 		if (prepost || somewakeup) {
1390 			/*
1391 			 * if the select of log, then we can wakeup and
1392 			 * discover some one else already read the data;
1393 			 * go to select again if time permits
1394 			 */
1395 			prepost = 0;
1396 			somewakeup = 0;
1397 			doretry = 1;
1398 		}
1399 	}
1400 
1401 	if (uap->tv) {
1402 		uint64_t        now;
1403 
1404 		clock_get_uptime(&now);
1405 		if (now >= seldata->abstime) {
1406 			goto done;
1407 		}
1408 	}
1409 
1410 	if (doretry) {
1411 		/* cleanup obits and try again */
1412 		doretry = 0;
1413 		sel_pass = SEL_FIRSTPASS;
1414 		goto retry;
1415 	}
1416 
1417 	/*
1418 	 * To effect a poll, the timeout argument should be
1419 	 * non-nil, pointing to a zero-valued timeval structure.
1420 	 */
1421 	if (uap->tv && seldata->abstime == 0) {
1422 		goto done;
1423 	}
1424 
1425 	/* No spurious wakeups due to colls,no need to check for them */
1426 	if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) {
1427 		sel_pass = SEL_FIRSTPASS;
1428 		goto retry;
1429 	}
1430 
1431 	OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1432 
1433 	/* if the select is just for timeout skip check */
1434 	if (seldata->count && (sel_pass == SEL_SECONDPASS)) {
1435 		panic("selprocess: 2nd pass assertwaiting");
1436 	}
1437 
1438 	wait_result = waitq_assert_wait64_leeway(uth->uu_selset,
1439 	    NO_EVENT64, THREAD_ABORTSAFE,
1440 	    TIMEOUT_URGENCY_USER_NORMAL,
1441 	    seldata->abstime,
1442 	    TIMEOUT_NO_LEEWAY);
1443 	if (wait_result != THREAD_AWAKENED) {
1444 		/* there are no preposted events */
1445 		error = tsleep1(NULL, PSOCK | PCATCH,
1446 		    "select", 0, selcontinue);
1447 	} else {
1448 		prepost = 1;
1449 		error = 0;
1450 	}
1451 
1452 	if (error == 0) {
1453 		sel_pass = SEL_SECONDPASS;
1454 		if (!prepost) {
1455 			somewakeup = 1;
1456 		}
1457 		goto retry;
1458 	}
1459 done:
1460 	if (unwind) {
1461 		seldrop(p, sel->ibits, uap->nd, seldata->count);
1462 		select_set_reset(uth->uu_selset);
1463 	}
1464 	OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1465 	/* select is not restarted after signals... */
1466 	if (error == ERESTART) {
1467 		error = EINTR;
1468 	}
1469 	if (error == EWOULDBLOCK) {
1470 		error = 0;
1471 	}
1472 
1473 	if (error == 0) {
1474 		uint32_t nw = howmany(uap->nd, NFDBITS);
1475 		uint32_t ni = nw * sizeof(fd_mask);
1476 
1477 #define putbits(name, x) \
1478 	(uap->name ? copyout(&sel->obits[(x) * nw], uap->name, ni) : 0)
1479 		int e0 = putbits(in, 0);
1480 		int e1 = putbits(ou, 1);
1481 		int e2 = putbits(ex, 2);
1482 
1483 		error = e0 ?: e1 ?: e2;
1484 #undef putbits
1485 	}
1486 
1487 	if (error != EINTR && sel_pass == SEL_SECONDPASS && uth->uu_flag & UT_SAS_OLDMASK) {
1488 		/* restore signal mask - continuation case */
1489 		uth->uu_sigmask = uth->uu_oldmask;
1490 		uth->uu_oldmask = 0;
1491 		uth->uu_flag &= ~UT_SAS_OLDMASK;
1492 	}
1493 
1494 	return error;
1495 }
1496 
1497 
1498 /**
1499  * remove the fileproc's underlying waitq from the supplied waitq set;
1500  * clear FP_INSELECT when appropriate
1501  *
1502  * Parameters:
1503  *		fp	File proc that is potentially currently in select
1504  *		selset	Waitq set to which the fileproc may belong
1505  *			(usually this is the thread's private waitq set)
1506  * Conditions:
1507  *		proc_fdlock is held
1508  */
1509 static void
selunlinkfp(struct fileproc * fp,struct select_set * selset)1510 selunlinkfp(struct fileproc *fp, struct select_set *selset)
1511 {
1512 	if (fp->fp_flags & FP_INSELECT) {
1513 		if (fp->fp_guard_attrs) {
1514 			if (fp->fp_guard->fpg_wset == selset) {
1515 				fp->fp_guard->fpg_wset = NULL;
1516 				fp->fp_flags &= ~FP_INSELECT;
1517 			}
1518 		} else {
1519 			if (fp->fp_wset == selset) {
1520 				fp->fp_wset = NULL;
1521 				fp->fp_flags &= ~FP_INSELECT;
1522 			}
1523 		}
1524 	}
1525 }
1526 
1527 /**
1528  * connect a fileproc to the given selset, potentially bridging to a waitq
1529  * pointed to indirectly by wq_data
1530  *
1531  * Parameters:
1532  *		fp	File proc potentially currently in select
1533  *		selset	Waitq set to which the fileproc should now belong
1534  *			(usually this is the thread's private waitq set)
1535  *
1536  * Conditions:
1537  *		proc_fdlock is held
1538  */
1539 static void
sellinkfp(struct fileproc * fp,struct select_set * selset,waitq_link_t * linkp)1540 sellinkfp(struct fileproc *fp, struct select_set *selset, waitq_link_t *linkp)
1541 {
1542 	if ((fp->fp_flags & FP_INSELECT) == 0) {
1543 		if (fp->fp_guard_attrs) {
1544 			fp->fp_guard->fpg_wset = selset;
1545 		} else {
1546 			fp->fp_wset = selset;
1547 		}
1548 		fp->fp_flags |= FP_INSELECT;
1549 	} else {
1550 		fp->fp_flags |= FP_SELCONFLICT;
1551 		if (linkp->wqlh == NULL) {
1552 			*linkp = waitq_link_alloc(WQT_SELECT_SET);
1553 		}
1554 		select_set_link(&select_conflict_queue, selset, linkp);
1555 	}
1556 }
1557 
1558 
1559 /*
1560  * selscan
1561  *
1562  * Parameters:	p			Process performing the select
1563  *		sel			The per-thread select context structure
1564  *		nfd			The number of file descriptors to scan
1565  *		retval			The per thread system call return area
1566  *		sel_pass		Which pass this is; allowed values are
1567  *						SEL_FIRSTPASS and SEL_SECONDPASS
1568  *		selset			The per thread wait queue set
1569  *
1570  * Returns:	0			Success
1571  *		EIO			Invalid p->p_fd field XXX Obsolete?
1572  *		EBADF			One of the files in the bit vector is
1573  *						invalid.
1574  */
1575 static int
selscan(struct proc * p,struct _select * sel,struct _select_data * seldata,int nfd,int32_t * retval,int sel_pass,struct select_set * selset)1576 selscan(struct proc *p, struct _select *sel, struct _select_data * seldata,
1577     int nfd, int32_t *retval, int sel_pass, struct select_set *selset)
1578 {
1579 	int msk, i, j, fd;
1580 	u_int32_t bits;
1581 	struct fileproc *fp;
1582 	int n = 0;              /* count of bits */
1583 	int nc = 0;             /* bit vector offset (nc'th bit) */
1584 	static int flag[3] = { FREAD, FWRITE, 0 };
1585 	u_int32_t *iptr, *optr;
1586 	u_int nw;
1587 	u_int32_t *ibits, *obits;
1588 	int count;
1589 	struct vfs_context context = {
1590 		.vc_thread = current_thread(),
1591 	};
1592 	waitq_link_t link = WQL_NULL;
1593 	void *s_data;
1594 
1595 	ibits = sel->ibits;
1596 	obits = sel->obits;
1597 
1598 	nw = howmany(nfd, NFDBITS);
1599 
1600 	count = seldata->count;
1601 
1602 	nc = 0;
1603 	if (!count) {
1604 		*retval = 0;
1605 		return 0;
1606 	}
1607 
1608 	if (sel_pass == SEL_FIRSTPASS) {
1609 		/*
1610 		 * Make sure the waitq-set is all clean:
1611 		 *
1612 		 * select loops until it finds at least one event, however it
1613 		 * doesn't mean that the event that woke up select is still
1614 		 * fired by the time the second pass runs, and then
1615 		 * select_internal will loop back to a first pass.
1616 		 */
1617 		select_set_reset(selset);
1618 		s_data = &link;
1619 	} else {
1620 		s_data = NULL;
1621 	}
1622 
1623 	proc_fdlock(p);
1624 	for (msk = 0; msk < 3; msk++) {
1625 		iptr = (u_int32_t *)&ibits[msk * nw];
1626 		optr = (u_int32_t *)&obits[msk * nw];
1627 
1628 		for (i = 0; i < nfd; i += NFDBITS) {
1629 			bits = iptr[i / NFDBITS];
1630 
1631 			while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1632 				bits &= ~(1U << j);
1633 
1634 				fp = fp_get_noref_locked(p, fd);
1635 				if (fp == NULL) {
1636 					/*
1637 					 * If we abort because of a bad
1638 					 * fd, let the caller unwind...
1639 					 */
1640 					proc_fdunlock(p);
1641 					return EBADF;
1642 				}
1643 				if (sel_pass == SEL_SECONDPASS) {
1644 					selunlinkfp(fp, selset);
1645 				} else if (link.wqlh == NULL) {
1646 					link = waitq_link_alloc(WQT_SELECT_SET);
1647 				}
1648 
1649 				context.vc_ucred = fp->f_cred;
1650 
1651 				/* The select; set the bit, if true */
1652 				if (fo_select(fp, flag[msk], s_data, &context)) {
1653 					optr[fd / NFDBITS] |= (1U << (fd % NFDBITS));
1654 					n++;
1655 				}
1656 				if (sel_pass == SEL_FIRSTPASS) {
1657 					/*
1658 					 * Hook up the thread's waitq set either to
1659 					 * the fileproc structure, or to the global
1660 					 * conflict queue: but only on the first
1661 					 * select pass.
1662 					 */
1663 					sellinkfp(fp, selset, &link);
1664 				}
1665 				nc++;
1666 			}
1667 		}
1668 	}
1669 	proc_fdunlock(p);
1670 
1671 	if (link.wqlh) {
1672 		waitq_link_free(WQT_SELECT_SET, link);
1673 	}
1674 
1675 	*retval = n;
1676 	return 0;
1677 }
1678 
1679 static int poll_callback(struct kevent_qos_s *, kevent_ctx_t);
1680 
1681 int
poll(struct proc * p,struct poll_args * uap,int32_t * retval)1682 poll(struct proc *p, struct poll_args *uap, int32_t *retval)
1683 {
1684 	__pthread_testcancel(1);
1685 	return poll_nocancel(p, (struct poll_nocancel_args *)uap, retval);
1686 }
1687 
1688 
1689 int
poll_nocancel(struct proc * p,struct poll_nocancel_args * uap,int32_t * retval)1690 poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval)
1691 {
1692 	struct pollfd *fds = NULL;
1693 	struct kqueue *kq = NULL;
1694 	int error = 0;
1695 	u_int nfds = uap->nfds;
1696 	u_int rfds = 0;
1697 	rlim_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE);
1698 	size_t ni = nfds * sizeof(struct pollfd);
1699 
1700 	/*
1701 	 * This is kinda bogus.  We have fd limits, but that is not
1702 	 * really related to the size of the pollfd array.  Make sure
1703 	 * we let the process use at least FD_SETSIZE entries and at
1704 	 * least enough for the current limits.  We want to be reasonably
1705 	 * safe, but not overly restrictive.
1706 	 */
1707 	if (nfds > OPEN_MAX ||
1708 	    (nfds > nofile && (proc_suser(p) || nfds > FD_SETSIZE))) {
1709 		return EINVAL;
1710 	}
1711 
1712 	kq = kqueue_alloc(p);
1713 	if (kq == NULL) {
1714 		return EAGAIN;
1715 	}
1716 
1717 	if (nfds) {
1718 		fds = (struct pollfd *)kalloc_data(ni, Z_WAITOK);
1719 		if (NULL == fds) {
1720 			error = EAGAIN;
1721 			goto out;
1722 		}
1723 
1724 		error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd));
1725 		if (error) {
1726 			goto out;
1727 		}
1728 	}
1729 
1730 	/* JMM - all this P_SELECT stuff is bogus */
1731 	OSBitOrAtomic(P_SELECT, &p->p_flag);
1732 	for (u_int i = 0; i < nfds; i++) {
1733 		short events = fds[i].events;
1734 		__assert_only int rc;
1735 
1736 		/* per spec, ignore fd values below zero */
1737 		if (fds[i].fd < 0) {
1738 			fds[i].revents = 0;
1739 			continue;
1740 		}
1741 
1742 		/* convert the poll event into a kqueue kevent */
1743 		struct kevent_qos_s kev = {
1744 			.ident = fds[i].fd,
1745 			.flags = EV_ADD | EV_ONESHOT | EV_POLL,
1746 			.udata = i, /* Index into pollfd array */
1747 		};
1748 
1749 		/* Handle input events */
1750 		if (events & (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP)) {
1751 			kev.filter = EVFILT_READ;
1752 			if (events & (POLLPRI | POLLRDBAND)) {
1753 				kev.flags |= EV_OOBAND;
1754 			}
1755 			rc = kevent_register(kq, &kev, NULL);
1756 			assert((rc & FILTER_REGISTER_WAIT) == 0);
1757 		}
1758 
1759 		/* Handle output events */
1760 		if ((kev.flags & EV_ERROR) == 0 &&
1761 		    (events & (POLLOUT | POLLWRNORM | POLLWRBAND))) {
1762 			kev.filter = EVFILT_WRITE;
1763 			rc = kevent_register(kq, &kev, NULL);
1764 			assert((rc & FILTER_REGISTER_WAIT) == 0);
1765 		}
1766 
1767 		/* Handle BSD extension vnode events */
1768 		if ((kev.flags & EV_ERROR) == 0 &&
1769 		    (events & (POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE))) {
1770 			kev.filter = EVFILT_VNODE;
1771 			kev.fflags = 0;
1772 			if (events & POLLEXTEND) {
1773 				kev.fflags |= NOTE_EXTEND;
1774 			}
1775 			if (events & POLLATTRIB) {
1776 				kev.fflags |= NOTE_ATTRIB;
1777 			}
1778 			if (events & POLLNLINK) {
1779 				kev.fflags |= NOTE_LINK;
1780 			}
1781 			if (events & POLLWRITE) {
1782 				kev.fflags |= NOTE_WRITE;
1783 			}
1784 			rc = kevent_register(kq, &kev, NULL);
1785 			assert((rc & FILTER_REGISTER_WAIT) == 0);
1786 		}
1787 
1788 		if (kev.flags & EV_ERROR) {
1789 			fds[i].revents = POLLNVAL;
1790 			rfds++;
1791 		} else {
1792 			fds[i].revents = 0;
1793 		}
1794 	}
1795 
1796 	/*
1797 	 * Did we have any trouble registering?
1798 	 * If user space passed 0 FDs, then respect any timeout value passed.
1799 	 * This is an extremely inefficient sleep. If user space passed one or
1800 	 * more FDs, and we had trouble registering _all_ of them, then bail
1801 	 * out. If a subset of the provided FDs failed to register, then we
1802 	 * will still call the kqueue_scan function.
1803 	 */
1804 	if (nfds && (rfds == nfds)) {
1805 		goto done;
1806 	}
1807 
1808 	/* scan for, and possibly wait for, the kevents to trigger */
1809 	kevent_ctx_t kectx = kevent_get_context(current_thread());
1810 	*kectx = (struct kevent_ctx_s){
1811 		.kec_process_noutputs = rfds,
1812 		.kec_process_flags    = KEVENT_FLAG_POLL,
1813 		.kec_deadline         = 0, /* wait forever */
1814 		.kec_poll_fds         = fds,
1815 	};
1816 
1817 	/*
1818 	 * If any events have trouble registering, an event has fired and we
1819 	 * shouldn't wait for events in kqueue_scan.
1820 	 */
1821 	if (rfds) {
1822 		kectx->kec_process_flags |= KEVENT_FLAG_IMMEDIATE;
1823 	} else if (uap->timeout != -1) {
1824 		clock_interval_to_deadline(uap->timeout, NSEC_PER_MSEC,
1825 		    &kectx->kec_deadline);
1826 	}
1827 
1828 	error = kqueue_scan(kq, kectx->kec_process_flags, kectx, poll_callback);
1829 	rfds = kectx->kec_process_noutputs;
1830 
1831 done:
1832 	OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1833 	/* poll is not restarted after signals... */
1834 	if (error == ERESTART) {
1835 		error = EINTR;
1836 	}
1837 	if (error == 0) {
1838 		error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd));
1839 		*retval = rfds;
1840 	}
1841 
1842 out:
1843 	kfree_data(fds, ni);
1844 
1845 	kqueue_dealloc(kq);
1846 	return error;
1847 }
1848 
1849 static int
poll_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)1850 poll_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
1851 {
1852 	assert(kectx->kec_process_flags & KEVENT_FLAG_POLL);
1853 	struct pollfd *fds = &kectx->kec_poll_fds[kevp->udata];
1854 
1855 	short prev_revents = fds->revents;
1856 	short mask = 0;
1857 
1858 	/* convert the results back into revents */
1859 	if (kevp->flags & EV_EOF) {
1860 		fds->revents |= POLLHUP;
1861 	}
1862 	if (kevp->flags & EV_ERROR) {
1863 		fds->revents |= POLLERR;
1864 	}
1865 
1866 	switch (kevp->filter) {
1867 	case EVFILT_READ:
1868 		if (fds->revents & POLLHUP) {
1869 			mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND);
1870 		} else {
1871 			mask = (POLLIN | POLLRDNORM);
1872 			if (kevp->flags & EV_OOBAND) {
1873 				mask |= (POLLPRI | POLLRDBAND);
1874 			}
1875 		}
1876 		fds->revents |= (fds->events & mask);
1877 		break;
1878 
1879 	case EVFILT_WRITE:
1880 		if (!(fds->revents & POLLHUP)) {
1881 			fds->revents |= (fds->events & (POLLOUT | POLLWRNORM | POLLWRBAND));
1882 		}
1883 		break;
1884 
1885 	case EVFILT_VNODE:
1886 		if (kevp->fflags & NOTE_EXTEND) {
1887 			fds->revents |= (fds->events & POLLEXTEND);
1888 		}
1889 		if (kevp->fflags & NOTE_ATTRIB) {
1890 			fds->revents |= (fds->events & POLLATTRIB);
1891 		}
1892 		if (kevp->fflags & NOTE_LINK) {
1893 			fds->revents |= (fds->events & POLLNLINK);
1894 		}
1895 		if (kevp->fflags & NOTE_WRITE) {
1896 			fds->revents |= (fds->events & POLLWRITE);
1897 		}
1898 		break;
1899 	}
1900 
1901 	if (fds->revents != 0 && prev_revents == 0) {
1902 		kectx->kec_process_noutputs++;
1903 	}
1904 
1905 	return 0;
1906 }
1907 
1908 int
seltrue(__unused dev_t dev,__unused int flag,__unused struct proc * p)1909 seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p)
1910 {
1911 	return 1;
1912 }
1913 
1914 /*
1915  * selcount
1916  *
1917  * Count the number of bits set in the input bit vector, and establish an
1918  * outstanding fp->fp_iocount for each of the descriptors which will be in
1919  * use in the select operation.
1920  *
1921  * Parameters:	p			The process doing the select
1922  *		ibits			The input bit vector
1923  *		nfd			The number of fd's in the vector
1924  *		countp			Pointer to where to store the bit count
1925  *
1926  * Returns:	0			Success
1927  *		EIO			Bad per process open file table
1928  *		EBADF			One of the bits in the input bit vector
1929  *						references an invalid fd
1930  *
1931  * Implicit:	*countp (modified)	Count of fd's
1932  *
1933  * Notes:	This function is the first pass under the proc_fdlock() that
1934  *		permits us to recognize invalid descriptors in the bit vector;
1935  *		the may, however, not remain valid through the drop and
1936  *		later reacquisition of the proc_fdlock().
1937  */
1938 static int
selcount(struct proc * p,u_int32_t * ibits,int nfd,int * countp)1939 selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp)
1940 {
1941 	int msk, i, j, fd;
1942 	u_int32_t bits;
1943 	struct fileproc *fp;
1944 	int n = 0;
1945 	u_int32_t *iptr;
1946 	u_int nw;
1947 	int error = 0;
1948 	int need_wakeup = 0;
1949 
1950 	nw = howmany(nfd, NFDBITS);
1951 
1952 	proc_fdlock(p);
1953 	for (msk = 0; msk < 3; msk++) {
1954 		iptr = (u_int32_t *)&ibits[msk * nw];
1955 		for (i = 0; i < nfd; i += NFDBITS) {
1956 			bits = iptr[i / NFDBITS];
1957 			while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1958 				bits &= ~(1U << j);
1959 
1960 				fp = fp_get_noref_locked(p, fd);
1961 				if (fp == NULL) {
1962 					*countp = 0;
1963 					error = EBADF;
1964 					goto bad;
1965 				}
1966 				os_ref_retain_locked(&fp->fp_iocount);
1967 				n++;
1968 			}
1969 		}
1970 	}
1971 	proc_fdunlock(p);
1972 
1973 	*countp = n;
1974 	return 0;
1975 
1976 bad:
1977 	if (n == 0) {
1978 		goto out;
1979 	}
1980 	/* Ignore error return; it's already EBADF */
1981 	(void)seldrop_locked(p, ibits, nfd, n, &need_wakeup);
1982 
1983 out:
1984 	proc_fdunlock(p);
1985 	if (need_wakeup) {
1986 		wakeup(&p->p_fd.fd_fpdrainwait);
1987 	}
1988 	return error;
1989 }
1990 
1991 
1992 /*
1993  * seldrop_locked
1994  *
1995  * Drop outstanding wait queue references set up during selscan(); drop the
1996  * outstanding per fileproc fp_iocount picked up during the selcount().
1997  *
1998  * Parameters:	p			Process performing the select
1999  *		ibits			Input bit bector of fd's
2000  *		nfd			Number of fd's
2001  *		lim			Limit to number of vector entries to
2002  *						consider, or -1 for "all"
2003  *		inselect		True if
2004  *		need_wakeup		Pointer to flag to set to do a wakeup
2005  *					if f_iocont on any descriptor goes to 0
2006  *
2007  * Returns:	0			Success
2008  *		EBADF			One or more fds in the bit vector
2009  *						were invalid, but the rest
2010  *						were successfully dropped
2011  *
2012  * Notes:	An fd make become bad while the proc_fdlock() is not held,
2013  *		if a multithreaded application closes the fd out from under
2014  *		the in progress select.  In this case, we still have to
2015  *		clean up after the set up on the remaining fds.
2016  */
2017 static int
seldrop_locked(struct proc * p,u_int32_t * ibits,int nfd,int lim,int * need_wakeup)2018 seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup)
2019 {
2020 	int msk, i, j, nc, fd;
2021 	u_int32_t bits;
2022 	struct fileproc *fp;
2023 	u_int32_t *iptr;
2024 	u_int nw;
2025 	int error = 0;
2026 	uthread_t uth = current_uthread();
2027 	struct _select_data *seldata;
2028 
2029 	*need_wakeup = 0;
2030 
2031 	nw = howmany(nfd, NFDBITS);
2032 	seldata = &uth->uu_save.uus_select_data;
2033 
2034 	nc = 0;
2035 	for (msk = 0; msk < 3; msk++) {
2036 		iptr = (u_int32_t *)&ibits[msk * nw];
2037 		for (i = 0; i < nfd; i += NFDBITS) {
2038 			bits = iptr[i / NFDBITS];
2039 			while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
2040 				bits &= ~(1U << j);
2041 				/*
2042 				 * If we've already dropped as many as were
2043 				 * counted/scanned, then we are done.
2044 				 */
2045 				if (nc >= lim) {
2046 					goto done;
2047 				}
2048 
2049 				/*
2050 				 * We took an I/O reference in selcount,
2051 				 * so the fp can't possibly be NULL.
2052 				 */
2053 				fp = fp_get_noref_locked_with_iocount(p, fd);
2054 				selunlinkfp(fp, uth->uu_selset);
2055 
2056 				nc++;
2057 
2058 				const os_ref_count_t refc = os_ref_release_locked(&fp->fp_iocount);
2059 				if (0 == refc) {
2060 					panic("fp_iocount overdecrement!");
2061 				}
2062 
2063 				if (1 == refc) {
2064 					/*
2065 					 * The last iocount is responsible for clearing
2066 					 * selconfict flag - even if we didn't set it -
2067 					 * and is also responsible for waking up anyone
2068 					 * waiting on iocounts to drain.
2069 					 */
2070 					if (fp->fp_flags & FP_SELCONFLICT) {
2071 						fp->fp_flags &= ~FP_SELCONFLICT;
2072 					}
2073 					if (p->p_fd.fd_fpdrainwait) {
2074 						p->p_fd.fd_fpdrainwait = 0;
2075 						*need_wakeup = 1;
2076 					}
2077 				}
2078 			}
2079 		}
2080 	}
2081 done:
2082 	return error;
2083 }
2084 
2085 
2086 static int
seldrop(struct proc * p,u_int32_t * ibits,int nfd,int lim)2087 seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim)
2088 {
2089 	int error;
2090 	int need_wakeup = 0;
2091 
2092 	proc_fdlock(p);
2093 	error = seldrop_locked(p, ibits, nfd, lim, &need_wakeup);
2094 	proc_fdunlock(p);
2095 	if (need_wakeup) {
2096 		wakeup(&p->p_fd.fd_fpdrainwait);
2097 	}
2098 	return error;
2099 }
2100 
2101 /*
2102  * Record a select request.
2103  */
2104 void
selrecord(__unused struct proc * selector,struct selinfo * sip,void * s_data)2105 selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data)
2106 {
2107 	struct select_set *selset = current_uthread()->uu_selset;
2108 
2109 	/* do not record if this is second pass of select */
2110 	if (!s_data) {
2111 		return;
2112 	}
2113 
2114 	if (selset == SELSPEC_RECORD_MARKER) {
2115 		/*
2116 		 * The kevent subsystem is trying to sniff
2117 		 * the selinfo::si_note to attach to.
2118 		 */
2119 		((selspec_record_hook_t)s_data)(sip);
2120 	} else {
2121 		waitq_link_t *linkp = s_data;
2122 
2123 		if (!waitq_is_valid(&sip->si_waitq)) {
2124 			waitq_init(&sip->si_waitq, WQT_SELECT, SYNC_POLICY_FIFO);
2125 		}
2126 
2127 		/* note: this checks for pre-existing linkage */
2128 		select_set_link(&sip->si_waitq, selset, linkp);
2129 	}
2130 }
2131 
2132 static void
selwakeup_internal(struct selinfo * sip,long hint,wait_result_t wr)2133 selwakeup_internal(struct selinfo *sip, long hint, wait_result_t wr)
2134 {
2135 	if (sip->si_flags & SI_SELSPEC) {
2136 		/*
2137 		 * The "primitive" lock is held.
2138 		 * The knote lock is not held.
2139 		 *
2140 		 * All knotes will transition their kn_hook to NULL and we will
2141 		 * reeinitialize the primitive's klist
2142 		 */
2143 		lck_spin_lock(&selspec_lock);
2144 		knote(&sip->si_note, hint, /*autodetach=*/ true);
2145 		lck_spin_unlock(&selspec_lock);
2146 		sip->si_flags &= ~SI_SELSPEC;
2147 	}
2148 
2149 	/*
2150 	 * After selrecord() has been called, selinfo owners must call
2151 	 * at least one of selwakeup() or selthreadclear().
2152 	 *
2153 	 * Use this opportunity to deinit the waitq
2154 	 * so that all linkages are garbage collected
2155 	 * in a combined wakeup-all + unlink + deinit call.
2156 	 */
2157 	select_waitq_wakeup_and_deinit(&sip->si_waitq, NO_EVENT64, wr);
2158 }
2159 
2160 
2161 void
selwakeup(struct selinfo * sip)2162 selwakeup(struct selinfo *sip)
2163 {
2164 	selwakeup_internal(sip, 0, THREAD_AWAKENED);
2165 }
2166 
2167 void
selthreadclear(struct selinfo * sip)2168 selthreadclear(struct selinfo *sip)
2169 {
2170 	selwakeup_internal(sip, NOTE_REVOKE, THREAD_RESTART);
2171 }
2172 
2173 
2174 /*
2175  * gethostuuid
2176  *
2177  * Description:	Get the host UUID from IOKit and return it to user space.
2178  *
2179  * Parameters:	uuid_buf		Pointer to buffer to receive UUID
2180  *		timeout			Timespec for timout
2181  *
2182  * Returns:	0			Success
2183  *		EWOULDBLOCK		Timeout is too short
2184  *		copyout:EFAULT		Bad user buffer
2185  *		mac_system_check_info:EPERM		Client not allowed to perform this operation
2186  *
2187  * Notes:	A timeout seems redundant, since if it's tolerable to not
2188  *		have a system UUID in hand, then why ask for one?
2189  */
2190 int
gethostuuid(struct proc * p,struct gethostuuid_args * uap,__unused int32_t * retval)2191 gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval)
2192 {
2193 	kern_return_t kret;
2194 	int error;
2195 	mach_timespec_t mach_ts;        /* for IOKit call */
2196 	__darwin_uuid_t uuid_kern = {}; /* for IOKit call */
2197 
2198 	/* Check entitlement */
2199 	if (!IOCurrentTaskHasEntitlement("com.apple.private.getprivatesysid")) {
2200 #if !defined(XNU_TARGET_OS_OSX)
2201 #if CONFIG_MACF
2202 		if ((error = mac_system_check_info(kauth_cred_get(), "hw.uuid")) != 0) {
2203 			/* EPERM invokes userspace upcall if present */
2204 			return error;
2205 		}
2206 #endif
2207 #endif
2208 	}
2209 
2210 	/* Convert the 32/64 bit timespec into a mach_timespec_t */
2211 	if (proc_is64bit(p)) {
2212 		struct user64_timespec ts;
2213 		error = copyin(uap->timeoutp, &ts, sizeof(ts));
2214 		if (error) {
2215 			return error;
2216 		}
2217 		mach_ts.tv_sec = (unsigned int)ts.tv_sec;
2218 		mach_ts.tv_nsec = (clock_res_t)ts.tv_nsec;
2219 	} else {
2220 		struct user32_timespec ts;
2221 		error = copyin(uap->timeoutp, &ts, sizeof(ts));
2222 		if (error) {
2223 			return error;
2224 		}
2225 		mach_ts.tv_sec = ts.tv_sec;
2226 		mach_ts.tv_nsec = ts.tv_nsec;
2227 	}
2228 
2229 	/* Call IOKit with the stack buffer to get the UUID */
2230 	kret = IOBSDGetPlatformUUID(uuid_kern, mach_ts);
2231 
2232 	/*
2233 	 * If we get it, copy out the data to the user buffer; note that a
2234 	 * uuid_t is an array of characters, so this is size invariant for
2235 	 * 32 vs. 64 bit.
2236 	 */
2237 	if (kret == KERN_SUCCESS) {
2238 		error = copyout(uuid_kern, uap->uuid_buf, sizeof(uuid_kern));
2239 	} else {
2240 		error = EWOULDBLOCK;
2241 	}
2242 
2243 	return error;
2244 }
2245 
2246 /*
2247  * ledger
2248  *
2249  * Description:	Omnibus system call for ledger operations
2250  */
2251 int
ledger(struct proc * p,struct ledger_args * args,__unused int32_t * retval)2252 ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval)
2253 {
2254 #if !CONFIG_MACF
2255 #pragma unused(p)
2256 #endif
2257 	int rval, pid, len, error;
2258 #ifdef LEDGER_DEBUG
2259 	struct ledger_limit_args lla;
2260 #endif
2261 	task_t task;
2262 	proc_t proc;
2263 
2264 	/* Finish copying in the necessary args before taking the proc lock */
2265 	error = 0;
2266 	len = 0;
2267 	if (args->cmd == LEDGER_ENTRY_INFO) {
2268 		error = copyin(args->arg3, (char *)&len, sizeof(len));
2269 	} else if (args->cmd == LEDGER_TEMPLATE_INFO) {
2270 		error = copyin(args->arg2, (char *)&len, sizeof(len));
2271 	} else if (args->cmd == LEDGER_LIMIT)
2272 #ifdef LEDGER_DEBUG
2273 	{ error = copyin(args->arg2, (char *)&lla, sizeof(lla));}
2274 #else
2275 	{ return EINVAL; }
2276 #endif
2277 	else if ((args->cmd < 0) || (args->cmd > LEDGER_MAX_CMD)) {
2278 		return EINVAL;
2279 	}
2280 
2281 	if (error) {
2282 		return error;
2283 	}
2284 	if (len < 0) {
2285 		return EINVAL;
2286 	}
2287 
2288 	rval = 0;
2289 	if (args->cmd != LEDGER_TEMPLATE_INFO) {
2290 		pid = (int)args->arg1;
2291 		proc = proc_find(pid);
2292 		if (proc == NULL) {
2293 			return ESRCH;
2294 		}
2295 
2296 #if CONFIG_MACF
2297 		error = mac_proc_check_ledger(p, proc, args->cmd);
2298 		if (error) {
2299 			proc_rele(proc);
2300 			return error;
2301 		}
2302 #endif
2303 
2304 		task = proc_task(proc);
2305 	}
2306 
2307 	switch (args->cmd) {
2308 #ifdef LEDGER_DEBUG
2309 	case LEDGER_LIMIT: {
2310 		if (!kauth_cred_issuser(kauth_cred_get())) {
2311 			rval = EPERM;
2312 		}
2313 		rval = ledger_limit(task, &lla);
2314 		proc_rele(proc);
2315 		break;
2316 	}
2317 #endif
2318 	case LEDGER_INFO: {
2319 		struct ledger_info info = {};
2320 
2321 		rval = ledger_info(task, &info);
2322 		proc_rele(proc);
2323 		if (rval == 0) {
2324 			rval = copyout(&info, args->arg2,
2325 			    sizeof(info));
2326 		}
2327 		break;
2328 	}
2329 
2330 	case LEDGER_ENTRY_INFO: {
2331 		void *buf;
2332 		int sz;
2333 
2334 #if CONFIG_MEMORYSTATUS
2335 		task_ledger_settle_dirty_time(task);
2336 #endif /* CONFIG_MEMORYSTATUS */
2337 
2338 		rval = ledger_get_task_entry_info_multiple(task, &buf, &len);
2339 		proc_rele(proc);
2340 		if ((rval == 0) && (len >= 0)) {
2341 			sz = len * sizeof(struct ledger_entry_info);
2342 			rval = copyout(buf, args->arg2, sz);
2343 			kfree_data(buf, sz);
2344 		}
2345 		if (rval == 0) {
2346 			rval = copyout(&len, args->arg3, sizeof(len));
2347 		}
2348 		break;
2349 	}
2350 
2351 	case LEDGER_TEMPLATE_INFO: {
2352 		void *buf;
2353 		int sz;
2354 
2355 		rval = ledger_template_info(&buf, &len);
2356 		if ((rval == 0) && (len >= 0)) {
2357 			sz = len * sizeof(struct ledger_template_info);
2358 			rval = copyout(buf, args->arg1, sz);
2359 			kfree_data(buf, sz);
2360 		}
2361 		if (rval == 0) {
2362 			rval = copyout(&len, args->arg2, sizeof(len));
2363 		}
2364 		break;
2365 	}
2366 
2367 	default:
2368 		panic("ledger syscall logic error -- command type %d", args->cmd);
2369 		proc_rele(proc);
2370 		rval = EINVAL;
2371 	}
2372 
2373 	return rval;
2374 }
2375 
2376 int
telemetry(__unused struct proc * p,struct telemetry_args * args,__unused int32_t * retval)2377 telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t *retval)
2378 {
2379 	int error = 0;
2380 
2381 	switch (args->cmd) {
2382 #if CONFIG_TELEMETRY
2383 	case TELEMETRY_CMD_TIMER_EVENT:
2384 		error = telemetry_timer_event(args->deadline, args->interval, args->leeway);
2385 		break;
2386 	case TELEMETRY_CMD_PMI_SETUP:
2387 		error = telemetry_pmi_setup((enum telemetry_pmi)args->deadline, args->interval);
2388 		break;
2389 #endif /* CONFIG_TELEMETRY */
2390 	case TELEMETRY_CMD_VOUCHER_NAME:
2391 		if (thread_set_voucher_name((mach_port_name_t)args->deadline)) {
2392 			error = EINVAL;
2393 		}
2394 		break;
2395 
2396 	default:
2397 		error = EINVAL;
2398 		break;
2399 	}
2400 
2401 	return error;
2402 }
2403 
2404 /*
2405  * Logging
2406  *
2407  * Description: syscall to access kernel logging from userspace
2408  *
2409  * Args:
2410  *	tag - used for syncing with userspace on the version.
2411  *	flags - flags used by the syscall.
2412  *	buffer - userspace address of string to copy.
2413  *	size - size of buffer.
2414  */
2415 int
log_data(__unused struct proc * p,struct log_data_args * args,int * retval)2416 log_data(__unused struct proc *p, struct log_data_args *args, int *retval)
2417 {
2418 	unsigned int tag = args->tag;
2419 	unsigned int flags = args->flags;
2420 	user_addr_t buffer = args->buffer;
2421 	unsigned int size = args->size;
2422 	int ret = 0;
2423 	*retval = 0;
2424 
2425 	/* Only DEXTs are suppose to use this syscall. */
2426 	if (!task_is_driver(current_task())) {
2427 		return EPERM;
2428 	}
2429 
2430 	/*
2431 	 * Tag synchronize the syscall version with userspace.
2432 	 * Tag == 0 => flags == OS_LOG_TYPE
2433 	 */
2434 	if (tag != 0) {
2435 		return EINVAL;
2436 	}
2437 
2438 	/*
2439 	 * OS_LOG_TYPE are defined in libkern/os/log.h
2440 	 * In userspace they are defined in libtrace/os/log.h
2441 	 */
2442 	if (flags != OS_LOG_TYPE_DEFAULT &&
2443 	    flags != OS_LOG_TYPE_INFO &&
2444 	    flags != OS_LOG_TYPE_DEBUG &&
2445 	    flags != OS_LOG_TYPE_ERROR &&
2446 	    flags != OS_LOG_TYPE_FAULT) {
2447 		return EINVAL;
2448 	}
2449 
2450 	if (size == 0) {
2451 		return EINVAL;
2452 	}
2453 
2454 	/* truncate to OS_LOG_DATA_MAX_SIZE */
2455 	if (size > OS_LOG_DATA_MAX_SIZE) {
2456 		size = OS_LOG_DATA_MAX_SIZE;
2457 	}
2458 
2459 	char *log_msg = (char *)kalloc_data(size, Z_WAITOK);
2460 	if (!log_msg) {
2461 		return ENOMEM;
2462 	}
2463 
2464 	if (copyin(buffer, log_msg, size) != 0) {
2465 		ret = EFAULT;
2466 		goto out;
2467 	}
2468 	log_msg[size - 1] = '\0';
2469 
2470 	/*
2471 	 * This will log to dmesg and logd.
2472 	 * The call will fail if the current
2473 	 * process is not a driverKit process.
2474 	 */
2475 	os_log_driverKit(&ret, OS_LOG_DEFAULT, (os_log_type_t)flags, "%s", log_msg);
2476 
2477 out:
2478 	if (log_msg != NULL) {
2479 		kfree_data(log_msg, size);
2480 	}
2481 
2482 	return ret;
2483 }
2484 
2485 #if DEVELOPMENT || DEBUG
2486 
2487 static int
2488 sysctl_mpsc_test_pingpong SYSCTL_HANDLER_ARGS
2489 {
2490 #pragma unused(oidp, arg1, arg2)
2491 	uint64_t value = 0;
2492 	int error;
2493 
2494 	error = SYSCTL_IN(req, &value, sizeof(value));
2495 	if (error) {
2496 		return error;
2497 	}
2498 
2499 	if (error == 0 && req->newptr) {
2500 		error = mpsc_test_pingpong(value, &value);
2501 		if (error == 0) {
2502 			error = SYSCTL_OUT(req, &value, sizeof(value));
2503 		}
2504 	}
2505 
2506 	return error;
2507 }
2508 SYSCTL_PROC(_kern, OID_AUTO, mpsc_test_pingpong, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2509     0, 0, sysctl_mpsc_test_pingpong, "Q", "MPSC tests: pingpong");
2510 
2511 #endif /* DEVELOPMENT || DEBUG */
2512 
2513 /* Telemetry, microstackshots */
2514 
2515 SYSCTL_NODE(_kern, OID_AUTO, microstackshot, CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2516     "microstackshot info");
2517 
2518 extern uint32_t telemetry_sample_rate;
2519 SYSCTL_UINT(_kern_microstackshot, OID_AUTO, interrupt_sample_rate,
2520     CTLFLAG_RD | CTLFLAG_LOCKED, &telemetry_sample_rate, 0,
2521     "interrupt-based sampling rate in Hz");
2522 
2523 #if defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES)
2524 
2525 extern uint64_t mt_microstackshot_period;
2526 SYSCTL_QUAD(_kern_microstackshot, OID_AUTO, pmi_sample_period,
2527     CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_period,
2528     "PMI sampling rate");
2529 extern unsigned int mt_microstackshot_ctr;
2530 SYSCTL_UINT(_kern_microstackshot, OID_AUTO, pmi_sample_counter,
2531     CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_ctr, 0,
2532     "PMI counter");
2533 
2534 #endif /* defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) */
2535 
2536 /*Remote Time api*/
2537 SYSCTL_NODE(_machdep, OID_AUTO, remotetime, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "Remote time api");
2538 
2539 #if DEVELOPMENT || DEBUG
2540 #if CONFIG_MACH_BRIDGE_SEND_TIME
2541 extern _Atomic uint32_t bt_init_flag;
2542 extern uint32_t mach_bridge_timer_enable(uint32_t, int);
2543 
2544 SYSCTL_INT(_machdep_remotetime, OID_AUTO, bridge_timer_init_flag,
2545     CTLFLAG_RD | CTLFLAG_LOCKED, &bt_init_flag, 0, "");
2546 
2547 static int sysctl_mach_bridge_timer_enable SYSCTL_HANDLER_ARGS
2548 {
2549 #pragma unused(oidp, arg1, arg2)
2550 	uint32_t value = 0;
2551 	int error = 0;
2552 	/* User is querying buffer size */
2553 	if (req->oldptr == USER_ADDR_NULL && req->newptr == USER_ADDR_NULL) {
2554 		req->oldidx = sizeof(value);
2555 		return 0;
2556 	}
2557 	if (os_atomic_load(&bt_init_flag, acquire)) {
2558 		if (req->newptr) {
2559 			int new_value = 0;
2560 			error = SYSCTL_IN(req, &new_value, sizeof(new_value));
2561 			if (error) {
2562 				return error;
2563 			}
2564 			if (new_value == 0 || new_value == 1) {
2565 				value = mach_bridge_timer_enable(new_value, 1);
2566 			} else {
2567 				return EPERM;
2568 			}
2569 		} else {
2570 			value = mach_bridge_timer_enable(0, 0);
2571 		}
2572 	}
2573 	error = SYSCTL_OUT(req, &value, sizeof(value));
2574 	return error;
2575 }
2576 
2577 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, bridge_timer_enable,
2578     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2579     0, 0, sysctl_mach_bridge_timer_enable, "I", "");
2580 
2581 #endif /* CONFIG_MACH_BRIDGE_SEND_TIME */
2582 
2583 static int sysctl_mach_bridge_remote_time SYSCTL_HANDLER_ARGS
2584 {
2585 #pragma unused(oidp, arg1, arg2)
2586 	uint64_t ltime = 0, rtime = 0;
2587 	if (req->oldptr == USER_ADDR_NULL) {
2588 		req->oldidx = sizeof(rtime);
2589 		return 0;
2590 	}
2591 	if (req->newptr) {
2592 		int error = SYSCTL_IN(req, &ltime, sizeof(ltime));
2593 		if (error) {
2594 			return error;
2595 		}
2596 	}
2597 	rtime = mach_bridge_remote_time(ltime);
2598 	return SYSCTL_OUT(req, &rtime, sizeof(rtime));
2599 }
2600 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, mach_bridge_remote_time,
2601     CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2602     0, 0, sysctl_mach_bridge_remote_time, "Q", "");
2603 
2604 #endif /* DEVELOPMENT || DEBUG */
2605 
2606 #if CONFIG_MACH_BRIDGE_RECV_TIME
2607 extern struct bt_params bt_params_get_latest(void);
2608 
2609 static int sysctl_mach_bridge_conversion_params SYSCTL_HANDLER_ARGS
2610 {
2611 #pragma unused(oidp, arg1, arg2)
2612 	struct bt_params params = {};
2613 	if (req->oldptr == USER_ADDR_NULL) {
2614 		req->oldidx = sizeof(struct bt_params);
2615 		return 0;
2616 	}
2617 	if (req->newptr) {
2618 		return EPERM;
2619 	}
2620 	params = bt_params_get_latest();
2621 	return SYSCTL_OUT(req, &params, MIN(sizeof(params), req->oldlen));
2622 }
2623 
2624 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, conversion_params,
2625     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2626     0, sysctl_mach_bridge_conversion_params, "S,bt_params", "");
2627 
2628 #endif /* CONFIG_MACH_BRIDGE_RECV_TIME */
2629 
2630 #if DEVELOPMENT || DEBUG
2631 
2632 #include <pexpert/pexpert.h>
2633 extern int32_t sysctl_get_bound_cpuid(void);
2634 extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
2635 static int
2636 sysctl_kern_sched_thread_bind_cpu SYSCTL_HANDLER_ARGS
2637 {
2638 #pragma unused(oidp, arg1, arg2)
2639 
2640 	/*
2641 	 * DO NOT remove this bootarg guard or make this non-development.
2642 	 * This kind of binding should only be used for tests and
2643 	 * experiments in a custom configuration, never shipping code.
2644 	 */
2645 
2646 	if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2647 		return ENOENT;
2648 	}
2649 
2650 	int32_t cpuid = sysctl_get_bound_cpuid();
2651 
2652 	int32_t new_value;
2653 	int changed;
2654 	int error = sysctl_io_number(req, cpuid, sizeof(cpuid), &new_value, &changed);
2655 	if (error) {
2656 		return error;
2657 	}
2658 
2659 	if (changed) {
2660 		kern_return_t kr = sysctl_thread_bind_cpuid(new_value);
2661 
2662 		if (kr == KERN_NOT_SUPPORTED) {
2663 			return ENOTSUP;
2664 		}
2665 
2666 		if (kr == KERN_INVALID_VALUE) {
2667 			return ERANGE;
2668 		}
2669 	}
2670 
2671 	return error;
2672 }
2673 
2674 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cpu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2675     0, 0, sysctl_kern_sched_thread_bind_cpu, "I", "");
2676 
2677 #if __AMP__
2678 
2679 extern char sysctl_get_bound_cluster_type(void);
2680 static int
2681 sysctl_kern_sched_thread_bind_cluster_type SYSCTL_HANDLER_ARGS
2682 {
2683 #pragma unused(oidp, arg1, arg2)
2684 	char buff[4];
2685 
2686 	if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2687 		return ENOENT;
2688 	}
2689 
2690 	int error = SYSCTL_IN(req, buff, 1);
2691 	if (error) {
2692 		return error;
2693 	}
2694 	char cluster_type = buff[0];
2695 
2696 	if (!req->newptr) {
2697 		goto out;
2698 	}
2699 
2700 	if (cluster_type != 'P' &&
2701 	    cluster_type != 'p' &&
2702 	    cluster_type != 'E' &&
2703 	    cluster_type != 'e') {
2704 		return EINVAL;
2705 	}
2706 
2707 	thread_bind_cluster_type(current_thread(), cluster_type, false);
2708 
2709 out:
2710 	buff[0] = sysctl_get_bound_cluster_type();
2711 
2712 	return SYSCTL_OUT(req, buff, 1);
2713 }
2714 
2715 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2716     0, 0, sysctl_kern_sched_thread_bind_cluster_type, "A", "");
2717 
2718 extern char sysctl_get_task_cluster_type(void);
2719 extern void sysctl_task_set_cluster_type(char cluster_type);
2720 static int
2721 sysctl_kern_sched_task_set_cluster_type SYSCTL_HANDLER_ARGS
2722 {
2723 #pragma unused(oidp, arg1, arg2)
2724 	char buff[4];
2725 
2726 	if (!PE_parse_boot_argn("enable_skstsct", NULL, 0)) {
2727 		return ENOENT;
2728 	}
2729 
2730 	int error = SYSCTL_IN(req, buff, 1);
2731 	if (error) {
2732 		return error;
2733 	}
2734 	char cluster_type = buff[0];
2735 
2736 	if (!req->newptr) {
2737 		goto out;
2738 	}
2739 
2740 	if (cluster_type != 'E' &&
2741 	    cluster_type != 'e' &&
2742 	    cluster_type != 'P' &&
2743 	    cluster_type != 'p') {
2744 		return EINVAL;
2745 	}
2746 
2747 	sysctl_task_set_cluster_type(cluster_type);
2748 out:
2749 	cluster_type = sysctl_get_task_cluster_type();
2750 	buff[0] = cluster_type;
2751 
2752 	return SYSCTL_OUT(req, buff, 1);
2753 }
2754 
2755 SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2756     0, 0, sysctl_kern_sched_task_set_cluster_type, "A", "");
2757 
2758 extern kern_return_t thread_bind_cluster_id(thread_t thread, uint32_t cluster_id, thread_bind_option_t options);
2759 extern uint32_t thread_bound_cluster_id(thread_t);
2760 static int
2761 sysctl_kern_sched_thread_bind_cluster_id SYSCTL_HANDLER_ARGS
2762 {
2763 #pragma unused(oidp, arg1, arg2)
2764 	if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2765 		return ENOENT;
2766 	}
2767 
2768 	thread_t self = current_thread();
2769 	int32_t cluster_id = thread_bound_cluster_id(self);
2770 	int32_t new_value;
2771 	int changed;
2772 	int error = sysctl_io_number(req, cluster_id, sizeof(cluster_id), &new_value, &changed);
2773 	if (error) {
2774 		return error;
2775 	}
2776 
2777 	if (changed) {
2778 		/*
2779 		 * This sysctl binds the thread to the cluster without any flags, which
2780 		 * means it will be hard bound and not check eligibility.
2781 		 */
2782 		kern_return_t kr = thread_bind_cluster_id(self, new_value, 0);
2783 		if (kr == KERN_INVALID_VALUE) {
2784 			return ERANGE;
2785 		}
2786 
2787 		if (kr != KERN_SUCCESS) {
2788 			return EINVAL;
2789 		}
2790 	}
2791 
2792 	return error;
2793 }
2794 
2795 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_id, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2796     0, 0, sysctl_kern_sched_thread_bind_cluster_id, "I", "");
2797 
2798 #if CONFIG_SCHED_EDGE
2799 
2800 extern int sched_edge_migrate_ipi_immediate;
2801 SYSCTL_INT(_kern, OID_AUTO, sched_edge_migrate_ipi_immediate, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_migrate_ipi_immediate, 0, "Edge Scheduler uses immediate IPIs for migration event based on execution latency");
2802 
2803 #endif /* CONFIG_SCHED_EDGE */
2804 
2805 #endif /* __AMP__ */
2806 
2807 #if SCHED_HYGIENE_DEBUG
2808 
2809 SYSCTL_QUAD(_kern, OID_AUTO, interrupt_masked_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED,
2810     &interrupt_masked_timeout,
2811     "Interrupt masked duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2812 
2813 SYSCTL_INT(_kern, OID_AUTO, interrupt_masked_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2814     &interrupt_masked_debug_mode, 0,
2815     "Enable interrupt masked tracing or panic (0: off, 1: trace, 2: panic)");
2816 
2817 SYSCTL_QUAD(_kern, OID_AUTO, sched_preemption_disable_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED,
2818     &sched_preemption_disable_threshold_mt,
2819     "Preemption disablement duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2820 
2821 SYSCTL_INT(_kern, OID_AUTO, sched_preemption_disable_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2822     &sched_preemption_disable_debug_mode, 0,
2823     "Enable preemption disablement tracing or panic (0: off, 1: trace, 2: panic)");
2824 
2825 static int
sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2826 sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2827 {
2828 	extern unsigned int preemption_disable_get_max_durations(uint64_t *durations, size_t count);
2829 	extern void preemption_disable_reset_max_durations(void);
2830 
2831 	uint64_t stats[MAX_CPUS]; // maximum per CPU
2832 
2833 	unsigned int ncpus = preemption_disable_get_max_durations(stats, MAX_CPUS);
2834 	if (req->newlen > 0) {
2835 		/* Reset when attempting to write to the sysctl. */
2836 		preemption_disable_reset_max_durations();
2837 	}
2838 
2839 	return sysctl_io_opaque(req, stats, ncpus * sizeof(uint64_t), NULL);
2840 }
2841 
2842 SYSCTL_PROC(_kern, OID_AUTO, sched_preemption_disable_stats,
2843     CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2844     0, 0, sysctl_sched_preemption_disable_stats, "I", "Preemption disablement statistics");
2845 
2846 #endif /* SCHED_HYGIENE_DEBUG */
2847 
2848 /* used for testing by exception_tests */
2849 extern uint32_t ipc_control_port_options;
2850 SYSCTL_INT(_kern, OID_AUTO, ipc_control_port_options,
2851     CTLFLAG_RD | CTLFLAG_LOCKED, &ipc_control_port_options, 0, "");
2852 
2853 #endif /* DEVELOPMENT || DEBUG */
2854 
2855 extern uint32_t task_exc_guard_default;
2856 
2857 SYSCTL_INT(_kern, OID_AUTO, task_exc_guard_default,
2858     CTLFLAG_RD | CTLFLAG_LOCKED, &task_exc_guard_default, 0, "");
2859 
2860 
2861 static int
2862 sysctl_kern_tcsm_available SYSCTL_HANDLER_ARGS
2863 {
2864 #pragma unused(oidp, arg1, arg2)
2865 	uint32_t value = machine_csv(CPUVN_CI) ? 1 : 0;
2866 
2867 	if (req->newptr) {
2868 		return EINVAL;
2869 	}
2870 
2871 	return SYSCTL_OUT(req, &value, sizeof(value));
2872 }
2873 SYSCTL_PROC(_kern, OID_AUTO, tcsm_available,
2874     CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
2875     0, 0, sysctl_kern_tcsm_available, "I", "");
2876 
2877 
2878 static int
2879 sysctl_kern_tcsm_enable SYSCTL_HANDLER_ARGS
2880 {
2881 #pragma unused(oidp, arg1, arg2)
2882 	uint32_t soflags = 0;
2883 	uint32_t old_value = thread_get_no_smt() ? 1 : 0;
2884 
2885 	int error = SYSCTL_IN(req, &soflags, sizeof(soflags));
2886 	if (error) {
2887 		return error;
2888 	}
2889 
2890 	if (soflags && machine_csv(CPUVN_CI)) {
2891 		thread_set_no_smt(true);
2892 		machine_tecs(current_thread());
2893 	}
2894 
2895 	return SYSCTL_OUT(req, &old_value, sizeof(old_value));
2896 }
2897 SYSCTL_PROC(_kern, OID_AUTO, tcsm_enable,
2898     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
2899     0, 0, sysctl_kern_tcsm_enable, "I", "");
2900 
2901 static int
2902 sysctl_kern_debug_get_preoslog SYSCTL_HANDLER_ARGS
2903 {
2904 #pragma unused(oidp, arg1, arg2)
2905 	static bool oneshot_executed = false;
2906 	size_t preoslog_size = 0;
2907 	const char *preoslog = NULL;
2908 	int ret = 0;
2909 
2910 	// DumpPanic passes a non-zero write value when it needs oneshot behaviour
2911 	if (req->newptr != USER_ADDR_NULL) {
2912 		uint8_t oneshot = 0;
2913 		int error = SYSCTL_IN(req, &oneshot, sizeof(oneshot));
2914 		if (error) {
2915 			return error;
2916 		}
2917 
2918 		if (oneshot) {
2919 			if (!os_atomic_cmpxchg(&oneshot_executed, false, true, acq_rel)) {
2920 				return EPERM;
2921 			}
2922 		}
2923 	}
2924 
2925 	preoslog = sysctl_debug_get_preoslog(&preoslog_size);
2926 	if (preoslog != NULL && preoslog_size == 0) {
2927 		sysctl_debug_free_preoslog();
2928 		return 0;
2929 	}
2930 
2931 	if (preoslog == NULL || preoslog_size == 0) {
2932 		return 0;
2933 	}
2934 
2935 	if (req->oldptr == USER_ADDR_NULL) {
2936 		req->oldidx = preoslog_size;
2937 		return 0;
2938 	}
2939 
2940 	ret = SYSCTL_OUT(req, preoslog, preoslog_size);
2941 	sysctl_debug_free_preoslog();
2942 	return ret;
2943 }
2944 
2945 SYSCTL_PROC(_kern, OID_AUTO, preoslog, CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2946     0, 0, sysctl_kern_debug_get_preoslog, "-", "");
2947 
2948 #if DEVELOPMENT || DEBUG
2949 extern void sysctl_task_set_no_smt(char no_smt);
2950 extern char sysctl_task_get_no_smt(void);
2951 
2952 static int
2953 sysctl_kern_sched_task_set_no_smt SYSCTL_HANDLER_ARGS
2954 {
2955 #pragma unused(oidp, arg1, arg2)
2956 	char buff[4];
2957 
2958 	int error = SYSCTL_IN(req, buff, 1);
2959 	if (error) {
2960 		return error;
2961 	}
2962 	char no_smt = buff[0];
2963 
2964 	if (!req->newptr) {
2965 		goto out;
2966 	}
2967 
2968 	sysctl_task_set_no_smt(no_smt);
2969 out:
2970 	no_smt = sysctl_task_get_no_smt();
2971 	buff[0] = no_smt;
2972 
2973 	return SYSCTL_OUT(req, buff, 1);
2974 }
2975 
2976 SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_no_smt, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
2977     0, 0, sysctl_kern_sched_task_set_no_smt, "A", "");
2978 
2979 static int
sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2980 sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2981 {
2982 	int new_value, changed;
2983 	int old_value = thread_get_no_smt() ? 1 : 0;
2984 	int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2985 
2986 	if (changed) {
2987 		thread_set_no_smt(!!new_value);
2988 	}
2989 
2990 	return error;
2991 }
2992 
2993 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_set_no_smt,
2994     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
2995     0, 0, sysctl_kern_sched_thread_set_no_smt, "I", "");
2996 
2997 #if CONFIG_SCHED_RT_ALLOW
2998 
2999 #if DEVELOPMENT || DEBUG
3000 #define RT_ALLOW_CTLFLAGS CTLFLAG_RW
3001 #else
3002 #define RT_ALLOW_CTLFLAGS CTLFLAG_RD
3003 #endif /* DEVELOPMENT || DEBUG */
3004 
3005 static int
sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3006 sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid *oidp,
3007     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3008 {
3009 	extern uint8_t rt_allow_limit_percent;
3010 
3011 	int new_value = 0;
3012 	int old_value = rt_allow_limit_percent;
3013 	int changed = 0;
3014 
3015 	int error = sysctl_io_number(req, old_value, sizeof(old_value),
3016 	    &new_value, &changed);
3017 	if (error != 0) {
3018 		return error;
3019 	}
3020 
3021 	/* Only accept a percentage between 1 and 99 inclusive. */
3022 	if (changed) {
3023 		if (new_value >= 100 || new_value <= 0) {
3024 			return EINVAL;
3025 		}
3026 
3027 		rt_allow_limit_percent = (uint8_t)new_value;
3028 	}
3029 
3030 	return 0;
3031 }
3032 
3033 SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_percent,
3034     RT_ALLOW_CTLFLAGS | CTLTYPE_INT | CTLFLAG_LOCKED,
3035     0, 0, sysctl_kern_rt_allow_limit_percent, "I", "");
3036 
3037 static int
sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3038 sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid *oidp,
3039     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3040 {
3041 	extern uint16_t rt_allow_limit_interval_ms;
3042 
3043 	uint64_t new_value = 0;
3044 	uint64_t old_value = rt_allow_limit_interval_ms;
3045 	int changed = 0;
3046 
3047 	int error = sysctl_io_number(req, old_value, sizeof(old_value),
3048 	    &new_value, &changed);
3049 	if (error != 0) {
3050 		return error;
3051 	}
3052 
3053 	/* Value is in ns. Must be at least 1ms. */
3054 	if (changed) {
3055 		if (new_value < 1 || new_value > UINT16_MAX) {
3056 			return EINVAL;
3057 		}
3058 
3059 		rt_allow_limit_interval_ms = (uint16_t)new_value;
3060 	}
3061 
3062 	return 0;
3063 }
3064 
3065 SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_interval_ms,
3066     RT_ALLOW_CTLFLAGS | CTLTYPE_QUAD | CTLFLAG_LOCKED,
3067     0, 0, sysctl_kern_rt_allow_limit_interval_ms, "Q", "");
3068 
3069 #endif /* CONFIG_SCHED_RT_ALLOW */
3070 
3071 
3072 static int
3073 sysctl_kern_task_set_filter_msg_flag SYSCTL_HANDLER_ARGS
3074 {
3075 #pragma unused(oidp, arg1, arg2)
3076 	int new_value, changed;
3077 	int old_value = task_get_filter_msg_flag(current_task()) ? 1 : 0;
3078 	int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3079 
3080 	if (changed) {
3081 		task_set_filter_msg_flag(current_task(), !!new_value);
3082 	}
3083 
3084 	return error;
3085 }
3086 
3087 SYSCTL_PROC(_kern, OID_AUTO, task_set_filter_msg_flag, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3088     0, 0, sysctl_kern_task_set_filter_msg_flag, "I", "");
3089 
3090 #if CONFIG_PROC_RESOURCE_LIMITS
3091 
3092 extern mach_port_name_t current_task_get_fatal_port_name(void);
3093 
3094 static int
3095 sysctl_kern_task_get_fatal_port SYSCTL_HANDLER_ARGS
3096 {
3097 #pragma unused(oidp, arg1, arg2)
3098 	int port = 0;
3099 	int flag = 0;
3100 
3101 	if (req->oldptr == USER_ADDR_NULL) {
3102 		req->oldidx = sizeof(mach_port_t);
3103 		return 0;
3104 	}
3105 
3106 	int error = SYSCTL_IN(req, &flag, sizeof(flag));
3107 	if (error) {
3108 		return error;
3109 	}
3110 
3111 	if (flag == 1) {
3112 		port = (int)current_task_get_fatal_port_name();
3113 	}
3114 	return SYSCTL_OUT(req, &port, sizeof(port));
3115 }
3116 
3117 SYSCTL_PROC(_machdep, OID_AUTO, task_get_fatal_port, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3118     0, 0, sysctl_kern_task_get_fatal_port, "I", "");
3119 
3120 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
3121 
3122 extern unsigned int ipc_entry_table_count_max(void);
3123 
3124 static int
3125 sysctl_mach_max_port_table_size SYSCTL_HANDLER_ARGS
3126 {
3127 #pragma unused(oidp, arg1, arg2)
3128 	int old_value = ipc_entry_table_count_max();
3129 	int error = sysctl_io_number(req, old_value, sizeof(int), NULL, NULL);
3130 
3131 	return error;
3132 }
3133 
3134 SYSCTL_PROC(_machdep, OID_AUTO, max_port_table_size, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3135     0, 0, sysctl_mach_max_port_table_size, "I", "");
3136 
3137 #endif /* DEVELOPMENT || DEBUG */
3138 
3139 #if defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(CONFIG_KDP_COREDUMP_ENCRYPTION)
3140 
3141 #define COREDUMP_ENCRYPTION_KEY_ENTITLEMENT "com.apple.private.coredump-encryption-key"
3142 
3143 static int
3144 sysctl_coredump_encryption_key_update SYSCTL_HANDLER_ARGS
3145 {
3146 	kern_return_t ret = KERN_SUCCESS;
3147 	int error = 0;
3148 	struct kdp_core_encryption_key_descriptor key_descriptor = {
3149 		.kcekd_format = MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256,
3150 	};
3151 
3152 	/* Need to be root and have entitlement */
3153 	if (!kauth_cred_issuser(kauth_cred_get()) && !IOCurrentTaskHasEntitlement(COREDUMP_ENCRYPTION_KEY_ENTITLEMENT)) {
3154 		return EPERM;
3155 	}
3156 
3157 	// Sanity-check the given key length
3158 	if (req->newlen > UINT16_MAX) {
3159 		return EINVAL;
3160 	}
3161 
3162 	// It is allowed for the caller to pass in a NULL buffer.
3163 	// This indicates that they want us to forget about any public key we might have.
3164 	if (req->newptr) {
3165 		key_descriptor.kcekd_size = (uint16_t) req->newlen;
3166 		key_descriptor.kcekd_key = kalloc_data(key_descriptor.kcekd_size, Z_WAITOK);
3167 
3168 		if (key_descriptor.kcekd_key == NULL) {
3169 			return ENOMEM;
3170 		}
3171 
3172 		error = SYSCTL_IN(req, key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3173 		if (error) {
3174 			goto out;
3175 		}
3176 	}
3177 
3178 	ret = IOProvideCoreFileAccess(kdp_core_handle_new_encryption_key, (void *)&key_descriptor);
3179 	if (KERN_SUCCESS != ret) {
3180 		printf("Failed to handle the new encryption key. Error 0x%x", ret);
3181 		error = EFAULT;
3182 	}
3183 
3184 out:
3185 	kfree_data(key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3186 	return 0;
3187 }
3188 
3189 SYSCTL_PROC(_kern, OID_AUTO, coredump_encryption_key, CTLTYPE_OPAQUE | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED,
3190     0, 0, &sysctl_coredump_encryption_key_update, "-", "Set a new encryption key for coredumps");
3191 
3192 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING && CONFIG_KDP_COREDUMP_ENCRYPTION*/
3193