xref: /xnu-8796.101.5/bsd/kern/sys_generic.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)sys_generic.c	8.9 (Berkeley) 2/14/95
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/ioctl.h>
79 #include <sys/file_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/socketvar.h>
82 #include <sys/uio_internal.h>
83 #include <sys/kernel.h>
84 #include <sys/guarded.h>
85 #include <sys/stat.h>
86 #include <sys/malloc.h>
87 #include <sys/sysproto.h>
88 
89 #include <sys/mount_internal.h>
90 #include <sys/protosw.h>
91 #include <sys/ev.h>
92 #include <sys/user.h>
93 #include <sys/kdebug.h>
94 #include <sys/poll.h>
95 #include <sys/event.h>
96 #include <sys/eventvar.h>
97 #include <sys/proc.h>
98 #include <sys/kauth.h>
99 
100 #include <machine/smp.h>
101 #include <mach/mach_types.h>
102 #include <kern/kern_types.h>
103 #include <kern/assert.h>
104 #include <kern/kalloc.h>
105 #include <kern/thread.h>
106 #include <kern/clock.h>
107 #include <kern/ledger.h>
108 #include <kern/monotonic.h>
109 #include <kern/task.h>
110 #include <kern/telemetry.h>
111 #include <kern/waitq.h>
112 #include <kern/sched_hygiene.h>
113 #include <kern/sched_prim.h>
114 #include <kern/mpsc_queue.h>
115 #include <kern/debug.h>
116 
117 #include <sys/mbuf.h>
118 #include <sys/domain.h>
119 #include <sys/socket.h>
120 #include <sys/socketvar.h>
121 #include <sys/errno.h>
122 #include <sys/syscall.h>
123 #include <sys/pipe.h>
124 
125 #include <security/audit/audit.h>
126 
127 #include <net/if.h>
128 #include <net/route.h>
129 
130 #include <netinet/in.h>
131 #include <netinet/in_systm.h>
132 #include <netinet/ip.h>
133 #include <netinet/in_pcb.h>
134 #include <netinet/ip_var.h>
135 #include <netinet/ip6.h>
136 #include <netinet/tcp.h>
137 #include <netinet/tcp_fsm.h>
138 #include <netinet/tcp_seq.h>
139 #include <netinet/tcp_timer.h>
140 #include <netinet/tcp_var.h>
141 #include <netinet/tcpip.h>
142 #include <netinet/tcp_debug.h>
143 /* for wait queue based select */
144 #include <kern/waitq.h>
145 #include <sys/vnode_internal.h>
146 /* for remote time api*/
147 #include <kern/remote_time.h>
148 #include <os/log.h>
149 #include <sys/log_data.h>
150 
151 #if CONFIG_MACF
152 #include <security/mac_framework.h>
153 #endif
154 
155 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
156 #include <mach_debug/mach_debug_types.h>
157 #endif
158 
159 #if MONOTONIC
160 #include <machine/monotonic.h>
161 #endif /* MONOTONIC */
162 
163 /* for entitlement check */
164 #include <IOKit/IOBSD.h>
165 
166 /* XXX should be in a header file somewhere */
167 extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp);
168 
169 int do_uiowrite(struct proc *p, struct fileproc *fp, uio_t uio, int flags, user_ssize_t *retval);
170 __private_extern__ int  dofileread(vfs_context_t ctx, struct fileproc *fp,
171     user_addr_t bufp, user_size_t nbyte,
172     off_t offset, int flags, user_ssize_t *retval);
173 __private_extern__ int  dofilewrite(vfs_context_t ctx, struct fileproc *fp,
174     user_addr_t bufp, user_size_t nbyte,
175     off_t offset, int flags, user_ssize_t *retval);
176 static int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode);
177 
178 /* needed by guarded_writev, etc. */
179 int write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
180     off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval);
181 int writev_uio(struct proc *p, int fd, user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
182     guardid_t *puguard, user_ssize_t *retval);
183 
184 #define f_flag fp_glob->fg_flag
185 #define f_type fp_glob->fg_ops->fo_type
186 #define f_cred fp_glob->fg_cred
187 #define f_ops fp_glob->fg_ops
188 
189 /*
190  * Validate if the file can be used for random access (pread, pwrite, etc).
191  *
192  * Conditions:
193  *		proc_fdlock is held
194  *
195  * Returns:    0                       Success
196  *             ESPIPE
197  *             ENXIO
198  */
199 static int
valid_for_random_access(struct fileproc * fp)200 valid_for_random_access(struct fileproc *fp)
201 {
202 	if (__improbable(fp->f_type != DTYPE_VNODE)) {
203 		return ESPIPE;
204 	}
205 
206 	vnode_t vp = (struct vnode *)fp_get_data(fp);
207 	if (__improbable(vnode_isfifo(vp))) {
208 		return ESPIPE;
209 	}
210 
211 	if (__improbable(vp->v_flag & VISTTY)) {
212 		return ENXIO;
213 	}
214 
215 	return 0;
216 }
217 
218 /*
219  * Returns:	0			Success
220  *		EBADF
221  *		ESPIPE
222  *		ENXIO
223  *	fp_lookup:EBADF
224  *  valid_for_random_access:ESPIPE
225  *  valid_for_random_access:ENXIO
226  */
227 static int
preparefileread(struct proc * p,struct fileproc ** fp_ret,int fd,int check_for_pread)228 preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread)
229 {
230 	int     error;
231 	struct fileproc *fp;
232 
233 	AUDIT_ARG(fd, fd);
234 
235 	proc_fdlock_spin(p);
236 
237 	error = fp_lookup(p, fd, &fp, 1);
238 
239 	if (error) {
240 		proc_fdunlock(p);
241 		return error;
242 	}
243 	if ((fp->f_flag & FREAD) == 0) {
244 		error = EBADF;
245 		goto out;
246 	}
247 	if (check_for_pread) {
248 		if ((error = valid_for_random_access(fp))) {
249 			goto out;
250 		}
251 	}
252 
253 	*fp_ret = fp;
254 
255 	proc_fdunlock(p);
256 	return 0;
257 
258 out:
259 	fp_drop(p, fd, fp, 1);
260 	proc_fdunlock(p);
261 	return error;
262 }
263 
264 static int
fp_readv(vfs_context_t ctx,struct fileproc * fp,uio_t uio,int flags,user_ssize_t * retval)265 fp_readv(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
266     user_ssize_t *retval)
267 {
268 	int error;
269 	user_ssize_t count;
270 
271 	if ((error = uio_calculateresid(uio))) {
272 		*retval = 0;
273 		return error;
274 	}
275 
276 	count = uio_resid(uio);
277 	error = fo_read(fp, uio, flags, ctx);
278 
279 	switch (error) {
280 	case ERESTART:
281 	case EINTR:
282 	case EWOULDBLOCK:
283 		if (uio_resid(uio) != count) {
284 			error = 0;
285 		}
286 		break;
287 
288 	default:
289 		break;
290 	}
291 
292 	*retval = count - uio_resid(uio);
293 	return error;
294 }
295 
296 /*
297  * Returns:	0			Success
298  *		EINVAL
299  *	fo_read:???
300  */
301 __private_extern__ int
dofileread(vfs_context_t ctx,struct fileproc * fp,user_addr_t bufp,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)302 dofileread(vfs_context_t ctx, struct fileproc *fp,
303     user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
304     user_ssize_t *retval)
305 {
306 	uio_stackbuf_t uio_buf[UIO_SIZEOF(1)];
307 	uio_t uio;
308 	int spacetype;
309 
310 	if (nbyte > INT_MAX) {
311 		*retval = 0;
312 		return EINVAL;
313 	}
314 
315 	spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
316 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_READ, &uio_buf[0],
317 	    sizeof(uio_buf));
318 
319 	if (uio_addiov(uio, bufp, nbyte) != 0) {
320 		*retval = 0;
321 		return EINVAL;
322 	}
323 
324 	return fp_readv(ctx, fp, uio, flags, retval);
325 }
326 
327 static int
readv_internal(struct proc * p,int fd,uio_t uio,int flags,user_ssize_t * retval)328 readv_internal(struct proc *p, int fd, uio_t uio, int flags,
329     user_ssize_t *retval)
330 {
331 	struct fileproc *fp = NULL;
332 	struct vfs_context context;
333 	int error;
334 
335 	if ((error = preparefileread(p, &fp, fd, flags & FOF_OFFSET))) {
336 		*retval = 0;
337 		return error;
338 	}
339 
340 	context = *(vfs_context_current());
341 	context.vc_ucred = fp->fp_glob->fg_cred;
342 
343 	error = fp_readv(&context, fp, uio, flags, retval);
344 
345 	fp_drop(p, fd, fp, 0);
346 	return error;
347 }
348 
349 static int
read_internal(struct proc * p,int fd,user_addr_t buf,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)350 read_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
351     off_t offset, int flags, user_ssize_t *retval)
352 {
353 	uio_stackbuf_t uio_buf[UIO_SIZEOF(1)];
354 	uio_t uio;
355 	int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
356 
357 	if (nbyte > INT_MAX) {
358 		*retval = 0;
359 		return EINVAL;
360 	}
361 
362 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_READ,
363 	    &uio_buf[0], sizeof(uio_buf));
364 
365 	if (uio_addiov(uio, buf, nbyte) != 0) {
366 		*retval = 0;
367 		return EINVAL;
368 	}
369 
370 	return readv_internal(p, fd, uio, flags, retval);
371 }
372 
373 int
read_nocancel(struct proc * p,struct read_nocancel_args * uap,user_ssize_t * retval)374 read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retval)
375 {
376 	return read_internal(p, uap->fd, uap->cbuf, uap->nbyte, (off_t)-1, 0,
377 	           retval);
378 }
379 
380 /*
381  * Read system call.
382  *
383  * Returns:	0			Success
384  *	preparefileread:EBADF
385  *	preparefileread:ESPIPE
386  *	preparefileread:ENXIO
387  *	preparefileread:EBADF
388  *	dofileread:???
389  */
390 int
read(struct proc * p,struct read_args * uap,user_ssize_t * retval)391 read(struct proc *p, struct read_args *uap, user_ssize_t *retval)
392 {
393 	__pthread_testcancel(1);
394 	return read_nocancel(p, (struct read_nocancel_args *)uap, retval);
395 }
396 
397 int
pread_nocancel(struct proc * p,struct pread_nocancel_args * uap,user_ssize_t * retval)398 pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *retval)
399 {
400 	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE),
401 	    uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
402 
403 	return read_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset,
404 	           FOF_OFFSET, retval);
405 }
406 
407 /*
408  * Pread system call
409  *
410  * Returns:	0			Success
411  *	preparefileread:EBADF
412  *	preparefileread:ESPIPE
413  *	preparefileread:ENXIO
414  *	preparefileread:EBADF
415  *	dofileread:???
416  */
417 int
pread(struct proc * p,struct pread_args * uap,user_ssize_t * retval)418 pread(struct proc *p, struct pread_args *uap, user_ssize_t *retval)
419 {
420 	__pthread_testcancel(1);
421 	return pread_nocancel(p, (struct pread_nocancel_args *)uap, retval);
422 }
423 
424 /*
425  * Vector read.
426  *
427  * Returns:    0                       Success
428  *             EINVAL
429  *             ENOMEM
430  *     preparefileread:EBADF
431  *     preparefileread:ESPIPE
432  *     preparefileread:ENXIO
433  *     preparefileread:EBADF
434  *     copyin:EFAULT
435  *     rd_uio:???
436  */
437 static int
readv_uio(struct proc * p,int fd,user_addr_t user_iovp,int iovcnt,off_t offset,int flags,user_ssize_t * retval)438 readv_uio(struct proc *p, int fd,
439     user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
440     user_ssize_t *retval)
441 {
442 	uio_t uio = NULL;
443 	int error;
444 	struct user_iovec *iovp;
445 
446 	if (iovcnt <= 0 || iovcnt > UIO_MAXIOV) {
447 		error = EINVAL;
448 		goto out;
449 	}
450 
451 	uio = uio_create(iovcnt, offset,
452 	    (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
453 	    UIO_READ);
454 
455 	iovp = uio_iovsaddr(uio);
456 	if (iovp == NULL) {
457 		error = ENOMEM;
458 		goto out;
459 	}
460 
461 	error = copyin_user_iovec_array(user_iovp,
462 	    IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
463 	    iovcnt, iovp);
464 
465 	if (error) {
466 		goto out;
467 	}
468 
469 	error = readv_internal(p, fd, uio, flags, retval);
470 
471 out:
472 	if (uio != NULL) {
473 		uio_free(uio);
474 	}
475 
476 	return error;
477 }
478 
479 int
readv_nocancel(struct proc * p,struct readv_nocancel_args * uap,user_ssize_t * retval)480 readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval)
481 {
482 	return readv_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, retval);
483 }
484 
485 /*
486  * Scatter read system call.
487  */
488 int
readv(struct proc * p,struct readv_args * uap,user_ssize_t * retval)489 readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval)
490 {
491 	__pthread_testcancel(1);
492 	return readv_nocancel(p, (struct readv_nocancel_args *)uap, retval);
493 }
494 
495 int
sys_preadv_nocancel(struct proc * p,struct preadv_nocancel_args * uap,user_ssize_t * retval)496 sys_preadv_nocancel(struct proc *p, struct preadv_nocancel_args *uap, user_ssize_t *retval)
497 {
498 	return readv_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset,
499 	           FOF_OFFSET, retval);
500 }
501 
502 /*
503  * Preadv system call
504  */
505 int
sys_preadv(struct proc * p,struct preadv_args * uap,user_ssize_t * retval)506 sys_preadv(struct proc *p, struct preadv_args *uap, user_ssize_t *retval)
507 {
508 	__pthread_testcancel(1);
509 	return sys_preadv_nocancel(p, (struct preadv_nocancel_args *)uap, retval);
510 }
511 
512 /*
513  * Returns:	0			Success
514  *		EBADF
515  *		ESPIPE
516  *		ENXIO
517  *	fp_lookup:EBADF
518  *	fp_guard_exception:???
519  *  valid_for_random_access:ESPIPE
520  *  valid_for_random_access:ENXIO
521  */
522 static int
preparefilewrite(struct proc * p,struct fileproc ** fp_ret,int fd,int check_for_pwrite,guardid_t * puguard)523 preparefilewrite(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pwrite,
524     guardid_t *puguard)
525 {
526 	int error;
527 	struct fileproc *fp;
528 
529 	AUDIT_ARG(fd, fd);
530 
531 	proc_fdlock_spin(p);
532 
533 	if (puguard) {
534 		error = fp_lookup_guarded(p, fd, *puguard, &fp, 1);
535 		if (error) {
536 			proc_fdunlock(p);
537 			return error;
538 		}
539 
540 		if ((fp->f_flag & FWRITE) == 0) {
541 			error = EBADF;
542 			goto out;
543 		}
544 	} else {
545 		error = fp_lookup(p, fd, &fp, 1);
546 		if (error) {
547 			proc_fdunlock(p);
548 			return error;
549 		}
550 
551 		/* Allow EBADF first. */
552 		if ((fp->f_flag & FWRITE) == 0) {
553 			error = EBADF;
554 			goto out;
555 		}
556 
557 		if (fp_isguarded(fp, GUARD_WRITE)) {
558 			error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
559 			goto out;
560 		}
561 	}
562 
563 	if (check_for_pwrite) {
564 		if ((error = valid_for_random_access(fp))) {
565 			goto out;
566 		}
567 	}
568 
569 	*fp_ret = fp;
570 
571 	proc_fdunlock(p);
572 	return 0;
573 
574 out:
575 	fp_drop(p, fd, fp, 1);
576 	proc_fdunlock(p);
577 	return error;
578 }
579 
580 static int
fp_writev(vfs_context_t ctx,struct fileproc * fp,uio_t uio,int flags,user_ssize_t * retval)581 fp_writev(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
582     user_ssize_t *retval)
583 {
584 	int error;
585 	user_ssize_t count;
586 
587 	if ((error = uio_calculateresid(uio))) {
588 		*retval = 0;
589 		return error;
590 	}
591 
592 	count = uio_resid(uio);
593 	error = fo_write(fp, uio, flags, ctx);
594 
595 	switch (error) {
596 	case ERESTART:
597 	case EINTR:
598 	case EWOULDBLOCK:
599 		if (uio_resid(uio) != count) {
600 			error = 0;
601 		}
602 		break;
603 
604 	case EPIPE:
605 		if (fp->f_type != DTYPE_SOCKET &&
606 		    (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) == 0) {
607 			/* XXX Raise the signal on the thread? */
608 			psignal(vfs_context_proc(ctx), SIGPIPE);
609 		}
610 		break;
611 
612 	default:
613 		break;
614 	}
615 
616 	if ((*retval = count - uio_resid(uio))) {
617 		os_atomic_or(&fp->fp_glob->fg_flag, FWASWRITTEN, relaxed);
618 	}
619 
620 	return error;
621 }
622 
623 /*
624  * Returns:	0			Success
625  *		EINVAL
626  *	<fo_write>:EPIPE
627  *	<fo_write>:???			[indirect through struct fileops]
628  */
629 __private_extern__ int
dofilewrite(vfs_context_t ctx,struct fileproc * fp,user_addr_t bufp,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)630 dofilewrite(vfs_context_t ctx, struct fileproc *fp,
631     user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
632     user_ssize_t *retval)
633 {
634 	uio_stackbuf_t uio_buf[UIO_SIZEOF(1)];
635 	uio_t uio;
636 	int spacetype;
637 
638 	if (nbyte > INT_MAX) {
639 		*retval = 0;
640 		return EINVAL;
641 	}
642 
643 	spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
644 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_WRITE, &uio_buf[0],
645 	    sizeof(uio_buf));
646 
647 	if (uio_addiov(uio, bufp, nbyte) != 0) {
648 		*retval = 0;
649 		return EINVAL;
650 	}
651 
652 	return fp_writev(ctx, fp, uio, flags, retval);
653 }
654 
655 static int
writev_internal(struct proc * p,int fd,uio_t uio,int flags,guardid_t * puguard,user_ssize_t * retval)656 writev_internal(struct proc *p, int fd, uio_t uio, int flags,
657     guardid_t *puguard, user_ssize_t *retval)
658 {
659 	struct fileproc *fp = NULL;
660 	struct vfs_context context;
661 	int error;
662 
663 	if ((error = preparefilewrite(p, &fp, fd, flags & FOF_OFFSET, puguard))) {
664 		*retval = 0;
665 		return error;
666 	}
667 
668 	context = *(vfs_context_current());
669 	context.vc_ucred = fp->fp_glob->fg_cred;
670 
671 	error = fp_writev(&context, fp, uio, flags, retval);
672 
673 	fp_drop(p, fd, fp, 0);
674 	return error;
675 }
676 
677 int
write_internal(struct proc * p,int fd,user_addr_t buf,user_size_t nbyte,off_t offset,int flags,guardid_t * puguard,user_ssize_t * retval)678 write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
679     off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval)
680 {
681 	uio_stackbuf_t uio_buf[UIO_SIZEOF(1)];
682 	uio_t uio;
683 	int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
684 
685 	if (nbyte > INT_MAX) {
686 		*retval = 0;
687 		return EINVAL;
688 	}
689 
690 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_WRITE,
691 	    &uio_buf[0], sizeof(uio_buf));
692 
693 	if (uio_addiov(uio, buf, nbyte) != 0) {
694 		*retval = 0;
695 		return EINVAL;
696 	}
697 
698 	return writev_internal(p, fd, uio, flags, puguard, retval);
699 }
700 
701 int
write_nocancel(struct proc * p,struct write_nocancel_args * uap,user_ssize_t * retval)702 write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *retval)
703 {
704 	return write_internal(p, uap->fd, uap->cbuf, uap->nbyte, (off_t)-1, 0,
705 	           NULL, retval);
706 }
707 
708 /*
709  * Write system call
710  *
711  * Returns:	0			Success
712  *		EBADF
713  *	fp_lookup:EBADF
714  *	dofilewrite:???
715  */
716 int
write(struct proc * p,struct write_args * uap,user_ssize_t * retval)717 write(struct proc *p, struct write_args *uap, user_ssize_t *retval)
718 {
719 	__pthread_testcancel(1);
720 	return write_nocancel(p, (struct write_nocancel_args *)uap, retval);
721 }
722 
723 int
pwrite_nocancel(struct proc * p,struct pwrite_nocancel_args * uap,user_ssize_t * retval)724 pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *retval)
725 {
726 	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE),
727 	    uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
728 
729 	/* XXX: Should be < 0 instead? (See man page + pwritev) */
730 	if (uap->offset == (off_t)-1) {
731 		return EINVAL;
732 	}
733 
734 	return write_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset,
735 	           FOF_OFFSET, NULL, retval);
736 }
737 
738 /*
739  * pwrite system call
740  *
741  * Returns:	0			Success
742  *		EBADF
743  *		ESPIPE
744  *		ENXIO
745  *		EINVAL
746  *	fp_lookup:EBADF
747  *	dofilewrite:???
748  */
749 int
pwrite(struct proc * p,struct pwrite_args * uap,user_ssize_t * retval)750 pwrite(struct proc *p, struct pwrite_args *uap, user_ssize_t *retval)
751 {
752 	__pthread_testcancel(1);
753 	return pwrite_nocancel(p, (struct pwrite_nocancel_args *)uap, retval);
754 }
755 
756 int
writev_uio(struct proc * p,int fd,user_addr_t user_iovp,int iovcnt,off_t offset,int flags,guardid_t * puguard,user_ssize_t * retval)757 writev_uio(struct proc *p, int fd,
758     user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
759     guardid_t *puguard, user_ssize_t *retval)
760 {
761 	uio_t uio = NULL;
762 	int error;
763 	struct user_iovec *iovp;
764 
765 	if (iovcnt <= 0 || iovcnt > UIO_MAXIOV || offset < 0) {
766 		error = EINVAL;
767 		goto out;
768 	}
769 
770 	uio = uio_create(iovcnt, offset,
771 	    (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
772 	    UIO_WRITE);
773 
774 	iovp = uio_iovsaddr(uio);
775 	if (iovp == NULL) {
776 		error = ENOMEM;
777 		goto out;
778 	}
779 
780 	error = copyin_user_iovec_array(user_iovp,
781 	    IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
782 	    iovcnt, iovp);
783 
784 	if (error) {
785 		goto out;
786 	}
787 
788 	error = writev_internal(p, fd, uio, flags, puguard, retval);
789 
790 out:
791 	if (uio != NULL) {
792 		uio_free(uio);
793 	}
794 
795 	return error;
796 }
797 
798 int
writev_nocancel(struct proc * p,struct writev_nocancel_args * uap,user_ssize_t * retval)799 writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval)
800 {
801 	return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, NULL, retval);
802 }
803 
804 /*
805  * Gather write system call
806  */
807 int
writev(struct proc * p,struct writev_args * uap,user_ssize_t * retval)808 writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval)
809 {
810 	__pthread_testcancel(1);
811 	return writev_nocancel(p, (struct writev_nocancel_args *)uap, retval);
812 }
813 
814 int
sys_pwritev_nocancel(struct proc * p,struct pwritev_nocancel_args * uap,user_ssize_t * retval)815 sys_pwritev_nocancel(struct proc *p, struct pwritev_nocancel_args *uap, user_ssize_t *retval)
816 {
817 	return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset,
818 	           FOF_OFFSET, NULL, retval);
819 }
820 
821 /*
822  * Pwritev system call
823  */
824 int
sys_pwritev(struct proc * p,struct pwritev_args * uap,user_ssize_t * retval)825 sys_pwritev(struct proc *p, struct pwritev_args *uap, user_ssize_t *retval)
826 {
827 	__pthread_testcancel(1);
828 	return sys_pwritev_nocancel(p, (struct pwritev_nocancel_args *)uap, retval);
829 }
830 
831 /*
832  * Ioctl system call
833  *
834  * Returns:	0			Success
835  *		EBADF
836  *		ENOTTY
837  *		ENOMEM
838  *		ESRCH
839  *	copyin:EFAULT
840  *	copyoutEFAULT
841  *	fp_lookup:EBADF			Bad file descriptor
842  *	fo_ioctl:???
843  */
844 int
ioctl(struct proc * p,struct ioctl_args * uap,__unused int32_t * retval)845 ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval)
846 {
847 	struct fileproc *fp = NULL;
848 	int error = 0;
849 	u_int size = 0;
850 	caddr_t datap = NULL, memp = NULL;
851 	boolean_t is64bit = FALSE;
852 	int tmp = 0;
853 #define STK_PARAMS      128
854 	char stkbuf[STK_PARAMS] = {};
855 	int fd = uap->fd;
856 	u_long com = uap->com;
857 	struct vfs_context context = *vfs_context_current();
858 
859 	AUDIT_ARG(fd, uap->fd);
860 	AUDIT_ARG(addr, uap->data);
861 
862 	is64bit = proc_is64bit(p);
863 #if CONFIG_AUDIT
864 	if (is64bit) {
865 		AUDIT_ARG(value64, com);
866 	} else {
867 		AUDIT_ARG(cmd, CAST_DOWN_EXPLICIT(int, com));
868 	}
869 #endif /* CONFIG_AUDIT */
870 
871 	/*
872 	 * Interpret high order word to find amount of data to be
873 	 * copied to/from the user's address space.
874 	 */
875 	size = IOCPARM_LEN(com);
876 	if (size > IOCPARM_MAX) {
877 		return ENOTTY;
878 	}
879 	if (size > sizeof(stkbuf)) {
880 		memp = (caddr_t)kalloc_data(size, Z_WAITOK);
881 		if (memp == 0) {
882 			return ENOMEM;
883 		}
884 		datap = memp;
885 	} else {
886 		datap = &stkbuf[0];
887 	}
888 	if (com & IOC_IN) {
889 		if (size) {
890 			error = copyin(uap->data, datap, size);
891 			if (error) {
892 				goto out_nofp;
893 			}
894 		} else {
895 			/* XXX - IOC_IN and no size?  we should proably return an error here!! */
896 			if (is64bit) {
897 				*(user_addr_t *)datap = uap->data;
898 			} else {
899 				*(uint32_t *)datap = (uint32_t)uap->data;
900 			}
901 		}
902 	} else if ((com & IOC_OUT) && size) {
903 		/*
904 		 * Zero the buffer so the user always
905 		 * gets back something deterministic.
906 		 */
907 		bzero(datap, size);
908 	} else if (com & IOC_VOID) {
909 		/* XXX - this is odd since IOC_VOID means no parameters */
910 		if (is64bit) {
911 			*(user_addr_t *)datap = uap->data;
912 		} else {
913 			*(uint32_t *)datap = (uint32_t)uap->data;
914 		}
915 	}
916 
917 	proc_fdlock(p);
918 	error = fp_lookup(p, fd, &fp, 1);
919 	if (error) {
920 		proc_fdunlock(p);
921 		goto out_nofp;
922 	}
923 
924 	AUDIT_ARG(file, p, fp);
925 
926 	if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
927 		error = EBADF;
928 		goto out;
929 	}
930 
931 	context.vc_ucred = fp->fp_glob->fg_cred;
932 
933 #if CONFIG_MACF
934 	error = mac_file_check_ioctl(context.vc_ucred, fp->fp_glob, com);
935 	if (error) {
936 		goto out;
937 	}
938 #endif
939 
940 	switch (com) {
941 	case FIONCLEX:
942 		fp->fp_flags &= ~FP_CLOEXEC;
943 		break;
944 
945 	case FIOCLEX:
946 		fp->fp_flags |= FP_CLOEXEC;
947 		break;
948 
949 	case FIONBIO:
950 		// FIXME (rdar://54898652)
951 		//
952 		// this code is broken if fnctl(F_SETFL), ioctl() are
953 		// called concurrently for the same fileglob.
954 		if ((tmp = *(int *)datap)) {
955 			os_atomic_or(&fp->f_flag, FNONBLOCK, relaxed);
956 		} else {
957 			os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed);
958 		}
959 		error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
960 		break;
961 
962 	case FIOASYNC:
963 		// FIXME (rdar://54898652)
964 		//
965 		// this code is broken if fnctl(F_SETFL), ioctl() are
966 		// called concurrently for the same fileglob.
967 		if ((tmp = *(int *)datap)) {
968 			os_atomic_or(&fp->f_flag, FASYNC, relaxed);
969 		} else {
970 			os_atomic_andnot(&fp->f_flag, FASYNC, relaxed);
971 		}
972 		error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
973 		break;
974 
975 	case FIOSETOWN:
976 		tmp = *(int *)datap;
977 		if (fp->f_type == DTYPE_SOCKET) {
978 			((struct socket *)fp_get_data(fp))->so_pgid = tmp;
979 			break;
980 		}
981 		if (fp->f_type == DTYPE_PIPE) {
982 			error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
983 			break;
984 		}
985 		if (tmp <= 0) {
986 			tmp = -tmp;
987 		} else {
988 			struct proc *p1 = proc_find(tmp);
989 			if (p1 == 0) {
990 				error = ESRCH;
991 				break;
992 			}
993 			tmp = p1->p_pgrpid;
994 			proc_rele(p1);
995 		}
996 		error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
997 		break;
998 
999 	case FIOGETOWN:
1000 		if (fp->f_type == DTYPE_SOCKET) {
1001 			*(int *)datap = ((struct socket *)fp_get_data(fp))->so_pgid;
1002 			break;
1003 		}
1004 		error = fo_ioctl(fp, TIOCGPGRP, datap, &context);
1005 		*(int *)datap = -*(int *)datap;
1006 		break;
1007 
1008 	default:
1009 		error = fo_ioctl(fp, com, datap, &context);
1010 		/*
1011 		 * Copy any data to user, size was
1012 		 * already set and checked above.
1013 		 */
1014 		if (error == 0 && (com & IOC_OUT) && size) {
1015 			error = copyout(datap, uap->data, (u_int)size);
1016 		}
1017 		break;
1018 	}
1019 out:
1020 	fp_drop(p, fd, fp, 1);
1021 	proc_fdunlock(p);
1022 
1023 out_nofp:
1024 	if (memp) {
1025 		kfree_data(memp, size);
1026 	}
1027 	return error;
1028 }
1029 
1030 int     selwait;
1031 #define SEL_FIRSTPASS 1
1032 #define SEL_SECONDPASS 2
1033 static int selprocess(struct proc *p, int error, int sel_pass);
1034 static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata,
1035     int nfd, int32_t *retval, int sel_pass, struct select_set *selset);
1036 static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count);
1037 static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup);
1038 static int seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim);
1039 static int select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval);
1040 
1041 /*
1042  * This is used for the special device nodes that do not implement
1043  * a proper kevent filter (see filt_specattach).
1044  *
1045  * In order to enable kevents on those, the spec_filtops will pretend
1046  * to call select, and try to sniff the selrecord(), if it observes one,
1047  * the knote is attached, which pairs with selwakeup() or selthreadclear().
1048  *
1049  * The last issue remaining, is that we need to serialize filt_specdetach()
1050  * with this, but it really can't know the "selinfo" or any locking domain.
1051  * To make up for this, We protect knote list operations with a global lock,
1052  * which give us a safe shared locking domain.
1053  *
1054  * Note: It is a little distasteful, but we really have very few of those.
1055  *       The big problem here is that sharing a lock domain without
1056  *       any kind of shared knowledge is a little complicated.
1057  *
1058  *       1. filters can really implement their own kqueue integration
1059  *          to side step this,
1060  *
1061  *       2. There's an opportunity to pick a private lock in selspec_attach()
1062  *          because both the selinfo and the knote are locked at that time.
1063  *          The cleanup story is however a little complicated.
1064  */
1065 static LCK_GRP_DECLARE(selspec_grp, "spec_filtops");
1066 static LCK_SPIN_DECLARE(selspec_lock, &selspec_grp);
1067 
1068 /*
1069  * The "primitive" lock is held.
1070  * The knote lock is held.
1071  */
1072 void
selspec_attach(struct knote * kn,struct selinfo * si)1073 selspec_attach(struct knote *kn, struct selinfo *si)
1074 {
1075 	struct selinfo *cur = os_atomic_load(&kn->kn_hook, relaxed);
1076 
1077 	if (cur == NULL) {
1078 		si->si_flags |= SI_SELSPEC;
1079 		lck_spin_lock(&selspec_lock);
1080 		kn->kn_hook = si;
1081 		KNOTE_ATTACH(&si->si_note, kn);
1082 		lck_spin_unlock(&selspec_lock);
1083 	} else {
1084 		/*
1085 		 * selspec_attach() can be called from e.g. filt_spectouch()
1086 		 * which might be called before any event was dequeued.
1087 		 *
1088 		 * It is hence not impossible for the knote already be hooked.
1089 		 *
1090 		 * Note that selwakeup_internal() could possibly
1091 		 * already have cleared this pointer. This is a race
1092 		 * that filt_specprocess will debounce.
1093 		 */
1094 		assert(si->si_flags & SI_SELSPEC);
1095 		assert(cur == si);
1096 	}
1097 }
1098 
1099 /*
1100  * The "primitive" lock is _not_ held.
1101  * The knote lock is held.
1102  */
1103 void
selspec_detach(struct knote * kn)1104 selspec_detach(struct knote *kn)
1105 {
1106 	/*
1107 	 * kn_hook always becomes non NULL under the knote lock.
1108 	 * Seeing "NULL" can't be a false positive.
1109 	 */
1110 	if (kn->kn_hook == NULL) {
1111 		return;
1112 	}
1113 
1114 	lck_spin_lock(&selspec_lock);
1115 	if (kn->kn_hook) {
1116 		struct selinfo *sip = kn->kn_hook;
1117 
1118 		kn->kn_hook = NULL;
1119 		KNOTE_DETACH(&sip->si_note, kn);
1120 	}
1121 	lck_spin_unlock(&selspec_lock);
1122 }
1123 
1124 /*
1125  * Select system call.
1126  *
1127  * Returns:	0			Success
1128  *		EINVAL			Invalid argument
1129  *		EAGAIN			Nonconformant error if allocation fails
1130  */
1131 int
select(struct proc * p,struct select_args * uap,int32_t * retval)1132 select(struct proc *p, struct select_args *uap, int32_t *retval)
1133 {
1134 	__pthread_testcancel(1);
1135 	return select_nocancel(p, (struct select_nocancel_args *)uap, retval);
1136 }
1137 
1138 int
select_nocancel(struct proc * p,struct select_nocancel_args * uap,int32_t * retval)1139 select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retval)
1140 {
1141 	uint64_t timeout = 0;
1142 
1143 	if (uap->tv) {
1144 		int err;
1145 		struct timeval atv;
1146 		if (IS_64BIT_PROCESS(p)) {
1147 			struct user64_timeval atv64;
1148 			err = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
1149 			/* Loses resolution - assume timeout < 68 years */
1150 			atv.tv_sec = (__darwin_time_t)atv64.tv_sec;
1151 			atv.tv_usec = atv64.tv_usec;
1152 		} else {
1153 			struct user32_timeval atv32;
1154 			err = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
1155 			atv.tv_sec = atv32.tv_sec;
1156 			atv.tv_usec = atv32.tv_usec;
1157 		}
1158 		if (err) {
1159 			return err;
1160 		}
1161 
1162 		if (itimerfix(&atv)) {
1163 			err = EINVAL;
1164 			return err;
1165 		}
1166 
1167 		clock_absolutetime_interval_to_deadline(tvtoabstime(&atv), &timeout);
1168 	}
1169 
1170 	return select_internal(p, uap, timeout, retval);
1171 }
1172 
1173 int
pselect(struct proc * p,struct pselect_args * uap,int32_t * retval)1174 pselect(struct proc *p, struct pselect_args *uap, int32_t *retval)
1175 {
1176 	__pthread_testcancel(1);
1177 	return pselect_nocancel(p, (struct pselect_nocancel_args *)uap, retval);
1178 }
1179 
1180 int
pselect_nocancel(struct proc * p,struct pselect_nocancel_args * uap,int32_t * retval)1181 pselect_nocancel(struct proc *p, struct pselect_nocancel_args *uap, int32_t *retval)
1182 {
1183 	int err;
1184 	struct uthread *ut;
1185 	uint64_t timeout = 0;
1186 
1187 	if (uap->ts) {
1188 		struct timespec ts;
1189 
1190 		if (IS_64BIT_PROCESS(p)) {
1191 			struct user64_timespec ts64;
1192 			err = copyin(uap->ts, (caddr_t)&ts64, sizeof(ts64));
1193 			ts.tv_sec = (__darwin_time_t)ts64.tv_sec;
1194 			ts.tv_nsec = (long)ts64.tv_nsec;
1195 		} else {
1196 			struct user32_timespec ts32;
1197 			err = copyin(uap->ts, (caddr_t)&ts32, sizeof(ts32));
1198 			ts.tv_sec = ts32.tv_sec;
1199 			ts.tv_nsec = ts32.tv_nsec;
1200 		}
1201 		if (err) {
1202 			return err;
1203 		}
1204 
1205 		if (!timespec_is_valid(&ts)) {
1206 			return EINVAL;
1207 		}
1208 		clock_absolutetime_interval_to_deadline(tstoabstime(&ts), &timeout);
1209 	}
1210 
1211 	ut = current_uthread();
1212 
1213 	if (uap->mask != USER_ADDR_NULL) {
1214 		/* save current mask, then copyin and set new mask */
1215 		sigset_t newset;
1216 		err = copyin(uap->mask, &newset, sizeof(sigset_t));
1217 		if (err) {
1218 			return err;
1219 		}
1220 		ut->uu_oldmask = ut->uu_sigmask;
1221 		ut->uu_flag |= UT_SAS_OLDMASK;
1222 		ut->uu_sigmask = (newset & ~sigcantmask);
1223 	}
1224 
1225 	err = select_internal(p, (struct select_nocancel_args *)uap, timeout, retval);
1226 
1227 	if (err != EINTR && ut->uu_flag & UT_SAS_OLDMASK) {
1228 		/*
1229 		 * Restore old mask (direct return case). NOTE: EINTR can also be returned
1230 		 * if the thread is cancelled. In that case, we don't reset the signal
1231 		 * mask to its original value (which usually happens in the signal
1232 		 * delivery path). This behavior is permitted by POSIX.
1233 		 */
1234 		ut->uu_sigmask = ut->uu_oldmask;
1235 		ut->uu_oldmask = 0;
1236 		ut->uu_flag &= ~UT_SAS_OLDMASK;
1237 	}
1238 
1239 	return err;
1240 }
1241 
1242 void
select_cleanup_uthread(struct _select * sel)1243 select_cleanup_uthread(struct _select *sel)
1244 {
1245 	kfree_data(sel->ibits, 2 * sel->nbytes);
1246 	sel->ibits = sel->obits = NULL;
1247 	sel->nbytes = 0;
1248 }
1249 
1250 static int
select_grow_uthread_cache(struct _select * sel,uint32_t nbytes)1251 select_grow_uthread_cache(struct _select *sel, uint32_t nbytes)
1252 {
1253 	uint32_t *buf;
1254 
1255 	buf = kalloc_data(2 * nbytes, Z_WAITOK | Z_ZERO);
1256 	if (buf) {
1257 		select_cleanup_uthread(sel);
1258 		sel->ibits = buf;
1259 		sel->obits = buf + nbytes / sizeof(uint32_t);
1260 		sel->nbytes = nbytes;
1261 		return true;
1262 	}
1263 	return false;
1264 }
1265 
1266 static void
select_bzero_uthread_cache(struct _select * sel)1267 select_bzero_uthread_cache(struct _select *sel)
1268 {
1269 	bzero(sel->ibits, sel->nbytes * 2);
1270 }
1271 
1272 /*
1273  * Generic implementation of {,p}select. Care: we type-pun uap across the two
1274  * syscalls, which differ slightly. The first 4 arguments (nfds and the fd sets)
1275  * are identical. The 5th (timeout) argument points to different types, so we
1276  * unpack in the syscall-specific code, but the generic code still does a null
1277  * check on this argument to determine if a timeout was specified.
1278  */
1279 static int
select_internal(struct proc * p,struct select_nocancel_args * uap,uint64_t timeout,int32_t * retval)1280 select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval)
1281 {
1282 	struct uthread *uth = current_uthread();
1283 	struct _select *sel = &uth->uu_select;
1284 	struct _select_data *seldata = &uth->uu_save.uus_select_data;
1285 	int error = 0;
1286 	u_int ni, nw;
1287 
1288 	*retval = 0;
1289 
1290 	seldata->abstime = timeout;
1291 	seldata->args = uap;
1292 	seldata->retval = retval;
1293 	seldata->count = 0;
1294 
1295 	if (uap->nd < 0) {
1296 		return EINVAL;
1297 	}
1298 
1299 	if (uap->nd > p->p_fd.fd_nfiles) {
1300 		uap->nd = p->p_fd.fd_nfiles; /* forgiving; slightly wrong */
1301 	}
1302 	nw = howmany(uap->nd, NFDBITS);
1303 	ni = nw * sizeof(fd_mask);
1304 
1305 	/*
1306 	 * if the previously allocated space for the bits is smaller than
1307 	 * what is requested or no space has yet been allocated for this
1308 	 * thread, allocate enough space now.
1309 	 *
1310 	 * Note: If this process fails, select() will return EAGAIN; this
1311 	 * is the same thing pool() returns in a no-memory situation, but
1312 	 * it is not a POSIX compliant error code for select().
1313 	 */
1314 	if (sel->nbytes >= (3 * ni)) {
1315 		select_bzero_uthread_cache(sel);
1316 	} else if (!select_grow_uthread_cache(sel, 3 * ni)) {
1317 		return EAGAIN;
1318 	}
1319 
1320 	/*
1321 	 * get the bits from the user address space
1322 	 */
1323 #define getbits(name, x) \
1324 	(uap->name ? copyin(uap->name, &sel->ibits[(x) * nw], ni) : 0)
1325 
1326 	if ((error = getbits(in, 0))) {
1327 		return error;
1328 	}
1329 	if ((error = getbits(ou, 1))) {
1330 		return error;
1331 	}
1332 	if ((error = getbits(ex, 2))) {
1333 		return error;
1334 	}
1335 #undef  getbits
1336 
1337 	if ((error = selcount(p, sel->ibits, uap->nd, &seldata->count))) {
1338 		return error;
1339 	}
1340 
1341 	if (uth->uu_selset == NULL) {
1342 		uth->uu_selset = select_set_alloc();
1343 	}
1344 	return selprocess(p, 0, SEL_FIRSTPASS);
1345 }
1346 
1347 static int
selcontinue(int error)1348 selcontinue(int error)
1349 {
1350 	return selprocess(current_proc(), error, SEL_SECONDPASS);
1351 }
1352 
1353 
1354 /*
1355  * selprocess
1356  *
1357  * Parameters:	error			The error code from our caller
1358  *		sel_pass		The pass we are on
1359  */
1360 int
selprocess(struct proc * p,int error,int sel_pass)1361 selprocess(struct proc *p, int error, int sel_pass)
1362 {
1363 	struct uthread *uth = current_uthread();
1364 	struct _select *sel = &uth->uu_select;
1365 	struct _select_data *seldata = &uth->uu_save.uus_select_data;
1366 	struct select_nocancel_args *uap = seldata->args;
1367 	int *retval = seldata->retval;
1368 
1369 	int unwind = 1;
1370 	int prepost = 0;
1371 	int somewakeup = 0;
1372 	int doretry = 0;
1373 	wait_result_t wait_result;
1374 
1375 	if ((error != 0) && (sel_pass == SEL_FIRSTPASS)) {
1376 		unwind = 0;
1377 	}
1378 	if (seldata->count == 0) {
1379 		unwind = 0;
1380 	}
1381 retry:
1382 	if (error != 0) {
1383 		goto done;
1384 	}
1385 
1386 	OSBitOrAtomic(P_SELECT, &p->p_flag);
1387 
1388 	/* skip scans if the select is just for timeouts */
1389 	if (seldata->count) {
1390 		error = selscan(p, sel, seldata, uap->nd, retval, sel_pass,
1391 		    uth->uu_selset);
1392 		if (error || *retval) {
1393 			goto done;
1394 		}
1395 		if (prepost || somewakeup) {
1396 			/*
1397 			 * if the select of log, then we can wakeup and
1398 			 * discover some one else already read the data;
1399 			 * go to select again if time permits
1400 			 */
1401 			prepost = 0;
1402 			somewakeup = 0;
1403 			doretry = 1;
1404 		}
1405 	}
1406 
1407 	if (uap->tv) {
1408 		uint64_t        now;
1409 
1410 		clock_get_uptime(&now);
1411 		if (now >= seldata->abstime) {
1412 			goto done;
1413 		}
1414 	}
1415 
1416 	if (doretry) {
1417 		/* cleanup obits and try again */
1418 		doretry = 0;
1419 		sel_pass = SEL_FIRSTPASS;
1420 		goto retry;
1421 	}
1422 
1423 	/*
1424 	 * To effect a poll, the timeout argument should be
1425 	 * non-nil, pointing to a zero-valued timeval structure.
1426 	 */
1427 	if (uap->tv && seldata->abstime == 0) {
1428 		goto done;
1429 	}
1430 
1431 	/* No spurious wakeups due to colls,no need to check for them */
1432 	if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) {
1433 		sel_pass = SEL_FIRSTPASS;
1434 		goto retry;
1435 	}
1436 
1437 	OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1438 
1439 	/* if the select is just for timeout skip check */
1440 	if (seldata->count && (sel_pass == SEL_SECONDPASS)) {
1441 		panic("selprocess: 2nd pass assertwaiting");
1442 	}
1443 
1444 	wait_result = waitq_assert_wait64_leeway(uth->uu_selset,
1445 	    NO_EVENT64, THREAD_ABORTSAFE,
1446 	    TIMEOUT_URGENCY_USER_NORMAL,
1447 	    seldata->abstime,
1448 	    TIMEOUT_NO_LEEWAY);
1449 	if (wait_result != THREAD_AWAKENED) {
1450 		/* there are no preposted events */
1451 		error = tsleep1(NULL, PSOCK | PCATCH,
1452 		    "select", 0, selcontinue);
1453 	} else {
1454 		prepost = 1;
1455 		error = 0;
1456 	}
1457 
1458 	if (error == 0) {
1459 		sel_pass = SEL_SECONDPASS;
1460 		if (!prepost) {
1461 			somewakeup = 1;
1462 		}
1463 		goto retry;
1464 	}
1465 done:
1466 	if (unwind) {
1467 		seldrop(p, sel->ibits, uap->nd, seldata->count);
1468 		select_set_reset(uth->uu_selset);
1469 	}
1470 	OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1471 	/* select is not restarted after signals... */
1472 	if (error == ERESTART) {
1473 		error = EINTR;
1474 	}
1475 	if (error == EWOULDBLOCK) {
1476 		error = 0;
1477 	}
1478 
1479 	if (error == 0) {
1480 		uint32_t nw = howmany(uap->nd, NFDBITS);
1481 		uint32_t ni = nw * sizeof(fd_mask);
1482 
1483 #define putbits(name, x) \
1484 	(uap->name ? copyout(&sel->obits[(x) * nw], uap->name, ni) : 0)
1485 		int e0 = putbits(in, 0);
1486 		int e1 = putbits(ou, 1);
1487 		int e2 = putbits(ex, 2);
1488 
1489 		error = e0 ?: e1 ?: e2;
1490 #undef putbits
1491 	}
1492 
1493 	if (error != EINTR && sel_pass == SEL_SECONDPASS && uth->uu_flag & UT_SAS_OLDMASK) {
1494 		/* restore signal mask - continuation case */
1495 		uth->uu_sigmask = uth->uu_oldmask;
1496 		uth->uu_oldmask = 0;
1497 		uth->uu_flag &= ~UT_SAS_OLDMASK;
1498 	}
1499 
1500 	return error;
1501 }
1502 
1503 
1504 /**
1505  * remove the fileproc's underlying waitq from the supplied waitq set;
1506  * clear FP_INSELECT when appropriate
1507  *
1508  * Parameters:
1509  *		fp	File proc that is potentially currently in select
1510  *		selset	Waitq set to which the fileproc may belong
1511  *			(usually this is the thread's private waitq set)
1512  * Conditions:
1513  *		proc_fdlock is held
1514  */
1515 static void
selunlinkfp(struct fileproc * fp,struct select_set * selset)1516 selunlinkfp(struct fileproc *fp, struct select_set *selset)
1517 {
1518 	if (fp->fp_flags & FP_INSELECT) {
1519 		if (fp->fp_guard_attrs) {
1520 			if (fp->fp_guard->fpg_wset == selset) {
1521 				fp->fp_guard->fpg_wset = NULL;
1522 				fp->fp_flags &= ~FP_INSELECT;
1523 			}
1524 		} else {
1525 			if (fp->fp_wset == selset) {
1526 				fp->fp_wset = NULL;
1527 				fp->fp_flags &= ~FP_INSELECT;
1528 			}
1529 		}
1530 	}
1531 }
1532 
1533 /**
1534  * connect a fileproc to the given selset, potentially bridging to a waitq
1535  * pointed to indirectly by wq_data
1536  *
1537  * Parameters:
1538  *		fp	File proc potentially currently in select
1539  *		selset	Waitq set to which the fileproc should now belong
1540  *			(usually this is the thread's private waitq set)
1541  *
1542  * Conditions:
1543  *		proc_fdlock is held
1544  */
1545 static void
sellinkfp(struct fileproc * fp,struct select_set * selset,waitq_link_t * linkp)1546 sellinkfp(struct fileproc *fp, struct select_set *selset, waitq_link_t *linkp)
1547 {
1548 	if ((fp->fp_flags & FP_INSELECT) == 0) {
1549 		if (fp->fp_guard_attrs) {
1550 			fp->fp_guard->fpg_wset = selset;
1551 		} else {
1552 			fp->fp_wset = selset;
1553 		}
1554 		fp->fp_flags |= FP_INSELECT;
1555 	} else {
1556 		fp->fp_flags |= FP_SELCONFLICT;
1557 		if (linkp->wqlh == NULL) {
1558 			*linkp = waitq_link_alloc(WQT_SELECT_SET);
1559 		}
1560 		select_set_link(&select_conflict_queue, selset, linkp);
1561 	}
1562 }
1563 
1564 
1565 /*
1566  * selscan
1567  *
1568  * Parameters:	p			Process performing the select
1569  *		sel			The per-thread select context structure
1570  *		nfd			The number of file descriptors to scan
1571  *		retval			The per thread system call return area
1572  *		sel_pass		Which pass this is; allowed values are
1573  *						SEL_FIRSTPASS and SEL_SECONDPASS
1574  *		selset			The per thread wait queue set
1575  *
1576  * Returns:	0			Success
1577  *		EIO			Invalid p->p_fd field XXX Obsolete?
1578  *		EBADF			One of the files in the bit vector is
1579  *						invalid.
1580  */
1581 static int
selscan(struct proc * p,struct _select * sel,struct _select_data * seldata,int nfd,int32_t * retval,int sel_pass,struct select_set * selset)1582 selscan(struct proc *p, struct _select *sel, struct _select_data * seldata,
1583     int nfd, int32_t *retval, int sel_pass, struct select_set *selset)
1584 {
1585 	int msk, i, j, fd;
1586 	u_int32_t bits;
1587 	struct fileproc *fp;
1588 	int n = 0;              /* count of bits */
1589 	int nc = 0;             /* bit vector offset (nc'th bit) */
1590 	static int flag[3] = { FREAD, FWRITE, 0 };
1591 	u_int32_t *iptr, *optr;
1592 	u_int nw;
1593 	u_int32_t *ibits, *obits;
1594 	int count;
1595 	struct vfs_context context = {
1596 		.vc_thread = current_thread(),
1597 	};
1598 	waitq_link_t link = WQL_NULL;
1599 	void *s_data;
1600 
1601 	ibits = sel->ibits;
1602 	obits = sel->obits;
1603 
1604 	nw = howmany(nfd, NFDBITS);
1605 
1606 	count = seldata->count;
1607 
1608 	nc = 0;
1609 	if (!count) {
1610 		*retval = 0;
1611 		return 0;
1612 	}
1613 
1614 	if (sel_pass == SEL_FIRSTPASS) {
1615 		/*
1616 		 * Make sure the waitq-set is all clean:
1617 		 *
1618 		 * select loops until it finds at least one event, however it
1619 		 * doesn't mean that the event that woke up select is still
1620 		 * fired by the time the second pass runs, and then
1621 		 * select_internal will loop back to a first pass.
1622 		 */
1623 		select_set_reset(selset);
1624 		s_data = &link;
1625 	} else {
1626 		s_data = NULL;
1627 	}
1628 
1629 	proc_fdlock(p);
1630 	for (msk = 0; msk < 3; msk++) {
1631 		iptr = (u_int32_t *)&ibits[msk * nw];
1632 		optr = (u_int32_t *)&obits[msk * nw];
1633 
1634 		for (i = 0; i < nfd; i += NFDBITS) {
1635 			bits = iptr[i / NFDBITS];
1636 
1637 			while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1638 				bits &= ~(1U << j);
1639 
1640 				fp = fp_get_noref_locked(p, fd);
1641 				if (fp == NULL) {
1642 					/*
1643 					 * If we abort because of a bad
1644 					 * fd, let the caller unwind...
1645 					 */
1646 					proc_fdunlock(p);
1647 					return EBADF;
1648 				}
1649 				if (sel_pass == SEL_SECONDPASS) {
1650 					selunlinkfp(fp, selset);
1651 				} else if (link.wqlh == NULL) {
1652 					link = waitq_link_alloc(WQT_SELECT_SET);
1653 				}
1654 
1655 				context.vc_ucred = fp->f_cred;
1656 
1657 				/* The select; set the bit, if true */
1658 				if (fo_select(fp, flag[msk], s_data, &context)) {
1659 					optr[fd / NFDBITS] |= (1U << (fd % NFDBITS));
1660 					n++;
1661 				}
1662 				if (sel_pass == SEL_FIRSTPASS) {
1663 					/*
1664 					 * Hook up the thread's waitq set either to
1665 					 * the fileproc structure, or to the global
1666 					 * conflict queue: but only on the first
1667 					 * select pass.
1668 					 */
1669 					sellinkfp(fp, selset, &link);
1670 				}
1671 				nc++;
1672 			}
1673 		}
1674 	}
1675 	proc_fdunlock(p);
1676 
1677 	if (link.wqlh) {
1678 		waitq_link_free(WQT_SELECT_SET, link);
1679 	}
1680 
1681 	*retval = n;
1682 	return 0;
1683 }
1684 
1685 static int poll_callback(struct kevent_qos_s *, kevent_ctx_t);
1686 
1687 int
poll(struct proc * p,struct poll_args * uap,int32_t * retval)1688 poll(struct proc *p, struct poll_args *uap, int32_t *retval)
1689 {
1690 	__pthread_testcancel(1);
1691 	return poll_nocancel(p, (struct poll_nocancel_args *)uap, retval);
1692 }
1693 
1694 
1695 int
poll_nocancel(struct proc * p,struct poll_nocancel_args * uap,int32_t * retval)1696 poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval)
1697 {
1698 	struct pollfd *fds = NULL;
1699 	struct kqueue *kq = NULL;
1700 	int error = 0;
1701 	u_int nfds = uap->nfds;
1702 	u_int rfds = 0;
1703 	rlim_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE);
1704 	size_t ni = nfds * sizeof(struct pollfd);
1705 
1706 	/*
1707 	 * This is kinda bogus.  We have fd limits, but that is not
1708 	 * really related to the size of the pollfd array.  Make sure
1709 	 * we let the process use at least FD_SETSIZE entries and at
1710 	 * least enough for the current limits.  We want to be reasonably
1711 	 * safe, but not overly restrictive.
1712 	 */
1713 	if (nfds > OPEN_MAX ||
1714 	    (nfds > nofile && (proc_suser(p) || nfds > FD_SETSIZE))) {
1715 		return EINVAL;
1716 	}
1717 
1718 	kq = kqueue_alloc(p);
1719 	if (kq == NULL) {
1720 		return EAGAIN;
1721 	}
1722 
1723 	if (nfds) {
1724 		fds = (struct pollfd *)kalloc_data(ni, Z_WAITOK);
1725 		if (NULL == fds) {
1726 			error = EAGAIN;
1727 			goto out;
1728 		}
1729 
1730 		error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd));
1731 		if (error) {
1732 			goto out;
1733 		}
1734 	}
1735 
1736 	/* JMM - all this P_SELECT stuff is bogus */
1737 	OSBitOrAtomic(P_SELECT, &p->p_flag);
1738 	for (u_int i = 0; i < nfds; i++) {
1739 		short events = fds[i].events;
1740 		__assert_only int rc;
1741 
1742 		/* per spec, ignore fd values below zero */
1743 		if (fds[i].fd < 0) {
1744 			fds[i].revents = 0;
1745 			continue;
1746 		}
1747 
1748 		/* convert the poll event into a kqueue kevent */
1749 		struct kevent_qos_s kev = {
1750 			.ident = fds[i].fd,
1751 			.flags = EV_ADD | EV_ONESHOT | EV_POLL,
1752 			.udata = CAST_USER_ADDR_T(&fds[i])
1753 		};
1754 
1755 		/* Handle input events */
1756 		if (events & (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP)) {
1757 			kev.filter = EVFILT_READ;
1758 			if (events & (POLLPRI | POLLRDBAND)) {
1759 				kev.flags |= EV_OOBAND;
1760 			}
1761 			rc = kevent_register(kq, &kev, NULL);
1762 			assert((rc & FILTER_REGISTER_WAIT) == 0);
1763 		}
1764 
1765 		/* Handle output events */
1766 		if ((kev.flags & EV_ERROR) == 0 &&
1767 		    (events & (POLLOUT | POLLWRNORM | POLLWRBAND))) {
1768 			kev.filter = EVFILT_WRITE;
1769 			rc = kevent_register(kq, &kev, NULL);
1770 			assert((rc & FILTER_REGISTER_WAIT) == 0);
1771 		}
1772 
1773 		/* Handle BSD extension vnode events */
1774 		if ((kev.flags & EV_ERROR) == 0 &&
1775 		    (events & (POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE))) {
1776 			kev.filter = EVFILT_VNODE;
1777 			kev.fflags = 0;
1778 			if (events & POLLEXTEND) {
1779 				kev.fflags |= NOTE_EXTEND;
1780 			}
1781 			if (events & POLLATTRIB) {
1782 				kev.fflags |= NOTE_ATTRIB;
1783 			}
1784 			if (events & POLLNLINK) {
1785 				kev.fflags |= NOTE_LINK;
1786 			}
1787 			if (events & POLLWRITE) {
1788 				kev.fflags |= NOTE_WRITE;
1789 			}
1790 			rc = kevent_register(kq, &kev, NULL);
1791 			assert((rc & FILTER_REGISTER_WAIT) == 0);
1792 		}
1793 
1794 		if (kev.flags & EV_ERROR) {
1795 			fds[i].revents = POLLNVAL;
1796 			rfds++;
1797 		} else {
1798 			fds[i].revents = 0;
1799 		}
1800 	}
1801 
1802 	/*
1803 	 * Did we have any trouble registering?
1804 	 * If user space passed 0 FDs, then respect any timeout value passed.
1805 	 * This is an extremely inefficient sleep. If user space passed one or
1806 	 * more FDs, and we had trouble registering _all_ of them, then bail
1807 	 * out. If a subset of the provided FDs failed to register, then we
1808 	 * will still call the kqueue_scan function.
1809 	 */
1810 	if (nfds && (rfds == nfds)) {
1811 		goto done;
1812 	}
1813 
1814 	/* scan for, and possibly wait for, the kevents to trigger */
1815 	kevent_ctx_t kectx = kevent_get_context(current_thread());
1816 	*kectx = (struct kevent_ctx_s){
1817 		.kec_process_noutputs = rfds,
1818 		.kec_process_flags    = KEVENT_FLAG_POLL,
1819 		.kec_deadline         = 0, /* wait forever */
1820 	};
1821 
1822 	/*
1823 	 * If any events have trouble registering, an event has fired and we
1824 	 * shouldn't wait for events in kqueue_scan.
1825 	 */
1826 	if (rfds) {
1827 		kectx->kec_process_flags |= KEVENT_FLAG_IMMEDIATE;
1828 	} else if (uap->timeout != -1) {
1829 		clock_interval_to_deadline(uap->timeout, NSEC_PER_MSEC,
1830 		    &kectx->kec_deadline);
1831 	}
1832 
1833 	error = kqueue_scan(kq, kectx->kec_process_flags, kectx, poll_callback);
1834 	rfds = kectx->kec_process_noutputs;
1835 
1836 done:
1837 	OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1838 	/* poll is not restarted after signals... */
1839 	if (error == ERESTART) {
1840 		error = EINTR;
1841 	}
1842 	if (error == 0) {
1843 		error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd));
1844 		*retval = rfds;
1845 	}
1846 
1847 out:
1848 	kfree_data(fds, ni);
1849 
1850 	kqueue_dealloc(kq);
1851 	return error;
1852 }
1853 
1854 static int
poll_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)1855 poll_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
1856 {
1857 	struct pollfd *fds = CAST_DOWN(struct pollfd *, kevp->udata);
1858 	short prev_revents = fds->revents;
1859 	short mask = 0;
1860 
1861 	/* convert the results back into revents */
1862 	if (kevp->flags & EV_EOF) {
1863 		fds->revents |= POLLHUP;
1864 	}
1865 	if (kevp->flags & EV_ERROR) {
1866 		fds->revents |= POLLERR;
1867 	}
1868 
1869 	switch (kevp->filter) {
1870 	case EVFILT_READ:
1871 		if (fds->revents & POLLHUP) {
1872 			mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND);
1873 		} else {
1874 			mask = (POLLIN | POLLRDNORM);
1875 			if (kevp->flags & EV_OOBAND) {
1876 				mask |= (POLLPRI | POLLRDBAND);
1877 			}
1878 		}
1879 		fds->revents |= (fds->events & mask);
1880 		break;
1881 
1882 	case EVFILT_WRITE:
1883 		if (!(fds->revents & POLLHUP)) {
1884 			fds->revents |= (fds->events & (POLLOUT | POLLWRNORM | POLLWRBAND));
1885 		}
1886 		break;
1887 
1888 	case EVFILT_VNODE:
1889 		if (kevp->fflags & NOTE_EXTEND) {
1890 			fds->revents |= (fds->events & POLLEXTEND);
1891 		}
1892 		if (kevp->fflags & NOTE_ATTRIB) {
1893 			fds->revents |= (fds->events & POLLATTRIB);
1894 		}
1895 		if (kevp->fflags & NOTE_LINK) {
1896 			fds->revents |= (fds->events & POLLNLINK);
1897 		}
1898 		if (kevp->fflags & NOTE_WRITE) {
1899 			fds->revents |= (fds->events & POLLWRITE);
1900 		}
1901 		break;
1902 	}
1903 
1904 	if (fds->revents != 0 && prev_revents == 0) {
1905 		kectx->kec_process_noutputs++;
1906 	}
1907 
1908 	return 0;
1909 }
1910 
1911 int
seltrue(__unused dev_t dev,__unused int flag,__unused struct proc * p)1912 seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p)
1913 {
1914 	return 1;
1915 }
1916 
1917 /*
1918  * selcount
1919  *
1920  * Count the number of bits set in the input bit vector, and establish an
1921  * outstanding fp->fp_iocount for each of the descriptors which will be in
1922  * use in the select operation.
1923  *
1924  * Parameters:	p			The process doing the select
1925  *		ibits			The input bit vector
1926  *		nfd			The number of fd's in the vector
1927  *		countp			Pointer to where to store the bit count
1928  *
1929  * Returns:	0			Success
1930  *		EIO			Bad per process open file table
1931  *		EBADF			One of the bits in the input bit vector
1932  *						references an invalid fd
1933  *
1934  * Implicit:	*countp (modified)	Count of fd's
1935  *
1936  * Notes:	This function is the first pass under the proc_fdlock() that
1937  *		permits us to recognize invalid descriptors in the bit vector;
1938  *		the may, however, not remain valid through the drop and
1939  *		later reacquisition of the proc_fdlock().
1940  */
1941 static int
selcount(struct proc * p,u_int32_t * ibits,int nfd,int * countp)1942 selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp)
1943 {
1944 	int msk, i, j, fd;
1945 	u_int32_t bits;
1946 	struct fileproc *fp;
1947 	int n = 0;
1948 	u_int32_t *iptr;
1949 	u_int nw;
1950 	int error = 0;
1951 	int need_wakeup = 0;
1952 
1953 	nw = howmany(nfd, NFDBITS);
1954 
1955 	proc_fdlock(p);
1956 	for (msk = 0; msk < 3; msk++) {
1957 		iptr = (u_int32_t *)&ibits[msk * nw];
1958 		for (i = 0; i < nfd; i += NFDBITS) {
1959 			bits = iptr[i / NFDBITS];
1960 			while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1961 				bits &= ~(1U << j);
1962 
1963 				fp = fp_get_noref_locked(p, fd);
1964 				if (fp == NULL) {
1965 					*countp = 0;
1966 					error = EBADF;
1967 					goto bad;
1968 				}
1969 				os_ref_retain_locked(&fp->fp_iocount);
1970 				n++;
1971 			}
1972 		}
1973 	}
1974 	proc_fdunlock(p);
1975 
1976 	*countp = n;
1977 	return 0;
1978 
1979 bad:
1980 	if (n == 0) {
1981 		goto out;
1982 	}
1983 	/* Ignore error return; it's already EBADF */
1984 	(void)seldrop_locked(p, ibits, nfd, n, &need_wakeup);
1985 
1986 out:
1987 	proc_fdunlock(p);
1988 	if (need_wakeup) {
1989 		wakeup(&p->p_fd.fd_fpdrainwait);
1990 	}
1991 	return error;
1992 }
1993 
1994 
1995 /*
1996  * seldrop_locked
1997  *
1998  * Drop outstanding wait queue references set up during selscan(); drop the
1999  * outstanding per fileproc fp_iocount picked up during the selcount().
2000  *
2001  * Parameters:	p			Process performing the select
2002  *		ibits			Input bit bector of fd's
2003  *		nfd			Number of fd's
2004  *		lim			Limit to number of vector entries to
2005  *						consider, or -1 for "all"
2006  *		inselect		True if
2007  *		need_wakeup		Pointer to flag to set to do a wakeup
2008  *					if f_iocont on any descriptor goes to 0
2009  *
2010  * Returns:	0			Success
2011  *		EBADF			One or more fds in the bit vector
2012  *						were invalid, but the rest
2013  *						were successfully dropped
2014  *
2015  * Notes:	An fd make become bad while the proc_fdlock() is not held,
2016  *		if a multithreaded application closes the fd out from under
2017  *		the in progress select.  In this case, we still have to
2018  *		clean up after the set up on the remaining fds.
2019  */
2020 static int
seldrop_locked(struct proc * p,u_int32_t * ibits,int nfd,int lim,int * need_wakeup)2021 seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup)
2022 {
2023 	int msk, i, j, nc, fd;
2024 	u_int32_t bits;
2025 	struct fileproc *fp;
2026 	u_int32_t *iptr;
2027 	u_int nw;
2028 	int error = 0;
2029 	uthread_t uth = current_uthread();
2030 	struct _select_data *seldata;
2031 
2032 	*need_wakeup = 0;
2033 
2034 	nw = howmany(nfd, NFDBITS);
2035 	seldata = &uth->uu_save.uus_select_data;
2036 
2037 	nc = 0;
2038 	for (msk = 0; msk < 3; msk++) {
2039 		iptr = (u_int32_t *)&ibits[msk * nw];
2040 		for (i = 0; i < nfd; i += NFDBITS) {
2041 			bits = iptr[i / NFDBITS];
2042 			while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
2043 				bits &= ~(1U << j);
2044 				/*
2045 				 * If we've already dropped as many as were
2046 				 * counted/scanned, then we are done.
2047 				 */
2048 				if (nc >= lim) {
2049 					goto done;
2050 				}
2051 
2052 				/*
2053 				 * We took an I/O reference in selcount,
2054 				 * so the fp can't possibly be NULL.
2055 				 */
2056 				fp = fp_get_noref_locked_with_iocount(p, fd);
2057 				selunlinkfp(fp, uth->uu_selset);
2058 
2059 				nc++;
2060 
2061 				const os_ref_count_t refc = os_ref_release_locked(&fp->fp_iocount);
2062 				if (0 == refc) {
2063 					panic("fp_iocount overdecrement!");
2064 				}
2065 
2066 				if (1 == refc) {
2067 					/*
2068 					 * The last iocount is responsible for clearing
2069 					 * selconfict flag - even if we didn't set it -
2070 					 * and is also responsible for waking up anyone
2071 					 * waiting on iocounts to drain.
2072 					 */
2073 					if (fp->fp_flags & FP_SELCONFLICT) {
2074 						fp->fp_flags &= ~FP_SELCONFLICT;
2075 					}
2076 					if (p->p_fd.fd_fpdrainwait) {
2077 						p->p_fd.fd_fpdrainwait = 0;
2078 						*need_wakeup = 1;
2079 					}
2080 				}
2081 			}
2082 		}
2083 	}
2084 done:
2085 	return error;
2086 }
2087 
2088 
2089 static int
seldrop(struct proc * p,u_int32_t * ibits,int nfd,int lim)2090 seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim)
2091 {
2092 	int error;
2093 	int need_wakeup = 0;
2094 
2095 	proc_fdlock(p);
2096 	error = seldrop_locked(p, ibits, nfd, lim, &need_wakeup);
2097 	proc_fdunlock(p);
2098 	if (need_wakeup) {
2099 		wakeup(&p->p_fd.fd_fpdrainwait);
2100 	}
2101 	return error;
2102 }
2103 
2104 /*
2105  * Record a select request.
2106  */
2107 void
selrecord(__unused struct proc * selector,struct selinfo * sip,void * s_data)2108 selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data)
2109 {
2110 	struct select_set *selset = current_uthread()->uu_selset;
2111 
2112 	/* do not record if this is second pass of select */
2113 	if (!s_data) {
2114 		return;
2115 	}
2116 
2117 	if (selset == SELSPEC_RECORD_MARKER) {
2118 		/*
2119 		 * The kevent subsystem is trying to sniff
2120 		 * the selinfo::si_note to attach to.
2121 		 */
2122 		((selspec_record_hook_t)s_data)(sip);
2123 	} else {
2124 		waitq_link_t *linkp = s_data;
2125 
2126 		if (!waitq_is_valid(&sip->si_waitq)) {
2127 			waitq_init(&sip->si_waitq, WQT_SELECT, SYNC_POLICY_FIFO);
2128 		}
2129 
2130 		/* note: this checks for pre-existing linkage */
2131 		select_set_link(&sip->si_waitq, selset, linkp);
2132 	}
2133 }
2134 
2135 static void
selwakeup_internal(struct selinfo * sip,long hint,wait_result_t wr)2136 selwakeup_internal(struct selinfo *sip, long hint, wait_result_t wr)
2137 {
2138 	if (sip->si_flags & SI_SELSPEC) {
2139 		/*
2140 		 * The "primitive" lock is held.
2141 		 * The knote lock is not held.
2142 		 *
2143 		 * All knotes will transition their kn_hook to NULL.
2144 		 */
2145 		lck_spin_lock(&selspec_lock);
2146 		KNOTE(&sip->si_note, hint);
2147 		klist_init(&sip->si_note);
2148 		lck_spin_unlock(&selspec_lock);
2149 		sip->si_flags &= ~SI_SELSPEC;
2150 	}
2151 
2152 	/*
2153 	 * After selrecord() has been called, selinfo owners must call
2154 	 * at least one of selwakeup() or selthreadclear().
2155 	 *
2156 	 * Use this opportunity to deinit the waitq
2157 	 * so that all linkages are garbage collected
2158 	 * in a combined wakeup-all + unlink + deinit call.
2159 	 */
2160 	select_waitq_wakeup_and_deinit(&sip->si_waitq, NO_EVENT64, wr);
2161 }
2162 
2163 
2164 void
selwakeup(struct selinfo * sip)2165 selwakeup(struct selinfo *sip)
2166 {
2167 	selwakeup_internal(sip, 0, THREAD_AWAKENED);
2168 }
2169 
2170 void
selthreadclear(struct selinfo * sip)2171 selthreadclear(struct selinfo *sip)
2172 {
2173 	selwakeup_internal(sip, NOTE_REVOKE, THREAD_RESTART);
2174 }
2175 
2176 
2177 /*
2178  * gethostuuid
2179  *
2180  * Description:	Get the host UUID from IOKit and return it to user space.
2181  *
2182  * Parameters:	uuid_buf		Pointer to buffer to receive UUID
2183  *		timeout			Timespec for timout
2184  *
2185  * Returns:	0			Success
2186  *		EWOULDBLOCK		Timeout is too short
2187  *		copyout:EFAULT		Bad user buffer
2188  *		mac_system_check_info:EPERM		Client not allowed to perform this operation
2189  *
2190  * Notes:	A timeout seems redundant, since if it's tolerable to not
2191  *		have a system UUID in hand, then why ask for one?
2192  */
2193 int
gethostuuid(struct proc * p,struct gethostuuid_args * uap,__unused int32_t * retval)2194 gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval)
2195 {
2196 	kern_return_t kret;
2197 	int error;
2198 	mach_timespec_t mach_ts;        /* for IOKit call */
2199 	__darwin_uuid_t uuid_kern = {}; /* for IOKit call */
2200 
2201 	/* Check entitlement */
2202 	if (!IOCurrentTaskHasEntitlement("com.apple.private.getprivatesysid")) {
2203 #if !defined(XNU_TARGET_OS_OSX)
2204 #if CONFIG_MACF
2205 		if ((error = mac_system_check_info(kauth_cred_get(), "hw.uuid")) != 0) {
2206 			/* EPERM invokes userspace upcall if present */
2207 			return error;
2208 		}
2209 #endif
2210 #endif
2211 	}
2212 
2213 	/* Convert the 32/64 bit timespec into a mach_timespec_t */
2214 	if (proc_is64bit(p)) {
2215 		struct user64_timespec ts;
2216 		error = copyin(uap->timeoutp, &ts, sizeof(ts));
2217 		if (error) {
2218 			return error;
2219 		}
2220 		mach_ts.tv_sec = (unsigned int)ts.tv_sec;
2221 		mach_ts.tv_nsec = (clock_res_t)ts.tv_nsec;
2222 	} else {
2223 		struct user32_timespec ts;
2224 		error = copyin(uap->timeoutp, &ts, sizeof(ts));
2225 		if (error) {
2226 			return error;
2227 		}
2228 		mach_ts.tv_sec = ts.tv_sec;
2229 		mach_ts.tv_nsec = ts.tv_nsec;
2230 	}
2231 
2232 	/* Call IOKit with the stack buffer to get the UUID */
2233 	kret = IOBSDGetPlatformUUID(uuid_kern, mach_ts);
2234 
2235 	/*
2236 	 * If we get it, copy out the data to the user buffer; note that a
2237 	 * uuid_t is an array of characters, so this is size invariant for
2238 	 * 32 vs. 64 bit.
2239 	 */
2240 	if (kret == KERN_SUCCESS) {
2241 		error = copyout(uuid_kern, uap->uuid_buf, sizeof(uuid_kern));
2242 	} else {
2243 		error = EWOULDBLOCK;
2244 	}
2245 
2246 	return error;
2247 }
2248 
2249 /*
2250  * ledger
2251  *
2252  * Description:	Omnibus system call for ledger operations
2253  */
2254 int
ledger(struct proc * p,struct ledger_args * args,__unused int32_t * retval)2255 ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval)
2256 {
2257 #if !CONFIG_MACF
2258 #pragma unused(p)
2259 #endif
2260 	int rval, pid, len, error;
2261 #ifdef LEDGER_DEBUG
2262 	struct ledger_limit_args lla;
2263 #endif
2264 	task_t task;
2265 	proc_t proc;
2266 
2267 	/* Finish copying in the necessary args before taking the proc lock */
2268 	error = 0;
2269 	len = 0;
2270 	if (args->cmd == LEDGER_ENTRY_INFO) {
2271 		error = copyin(args->arg3, (char *)&len, sizeof(len));
2272 	} else if (args->cmd == LEDGER_TEMPLATE_INFO) {
2273 		error = copyin(args->arg2, (char *)&len, sizeof(len));
2274 	} else if (args->cmd == LEDGER_LIMIT)
2275 #ifdef LEDGER_DEBUG
2276 	{ error = copyin(args->arg2, (char *)&lla, sizeof(lla));}
2277 #else
2278 	{ return EINVAL; }
2279 #endif
2280 	else if ((args->cmd < 0) || (args->cmd > LEDGER_MAX_CMD)) {
2281 		return EINVAL;
2282 	}
2283 
2284 	if (error) {
2285 		return error;
2286 	}
2287 	if (len < 0) {
2288 		return EINVAL;
2289 	}
2290 
2291 	rval = 0;
2292 	if (args->cmd != LEDGER_TEMPLATE_INFO) {
2293 		pid = (int)args->arg1;
2294 		proc = proc_find(pid);
2295 		if (proc == NULL) {
2296 			return ESRCH;
2297 		}
2298 
2299 #if CONFIG_MACF
2300 		error = mac_proc_check_ledger(p, proc, args->cmd);
2301 		if (error) {
2302 			proc_rele(proc);
2303 			return error;
2304 		}
2305 #endif
2306 
2307 		task = proc_task(proc);
2308 	}
2309 
2310 	switch (args->cmd) {
2311 #ifdef LEDGER_DEBUG
2312 	case LEDGER_LIMIT: {
2313 		if (!kauth_cred_issuser(kauth_cred_get())) {
2314 			rval = EPERM;
2315 		}
2316 		rval = ledger_limit(task, &lla);
2317 		proc_rele(proc);
2318 		break;
2319 	}
2320 #endif
2321 	case LEDGER_INFO: {
2322 		struct ledger_info info = {};
2323 
2324 		rval = ledger_info(task, &info);
2325 		proc_rele(proc);
2326 		if (rval == 0) {
2327 			rval = copyout(&info, args->arg2,
2328 			    sizeof(info));
2329 		}
2330 		break;
2331 	}
2332 
2333 	case LEDGER_ENTRY_INFO: {
2334 		void *buf;
2335 		int sz;
2336 
2337 #if CONFIG_MEMORYSTATUS
2338 		task_ledger_settle_dirty_time(task);
2339 #endif /* CONFIG_MEMORYSTATUS */
2340 
2341 		rval = ledger_get_task_entry_info_multiple(task, &buf, &len);
2342 		proc_rele(proc);
2343 		if ((rval == 0) && (len >= 0)) {
2344 			sz = len * sizeof(struct ledger_entry_info);
2345 			rval = copyout(buf, args->arg2, sz);
2346 			kfree_data(buf, sz);
2347 		}
2348 		if (rval == 0) {
2349 			rval = copyout(&len, args->arg3, sizeof(len));
2350 		}
2351 		break;
2352 	}
2353 
2354 	case LEDGER_TEMPLATE_INFO: {
2355 		void *buf;
2356 		int sz;
2357 
2358 		rval = ledger_template_info(&buf, &len);
2359 		if ((rval == 0) && (len >= 0)) {
2360 			sz = len * sizeof(struct ledger_template_info);
2361 			rval = copyout(buf, args->arg1, sz);
2362 			kfree_data(buf, sz);
2363 		}
2364 		if (rval == 0) {
2365 			rval = copyout(&len, args->arg2, sizeof(len));
2366 		}
2367 		break;
2368 	}
2369 
2370 	default:
2371 		panic("ledger syscall logic error -- command type %d", args->cmd);
2372 		proc_rele(proc);
2373 		rval = EINVAL;
2374 	}
2375 
2376 	return rval;
2377 }
2378 
2379 int
telemetry(__unused struct proc * p,struct telemetry_args * args,__unused int32_t * retval)2380 telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t *retval)
2381 {
2382 	int error = 0;
2383 
2384 	switch (args->cmd) {
2385 #if CONFIG_TELEMETRY
2386 	case TELEMETRY_CMD_TIMER_EVENT:
2387 		error = telemetry_timer_event(args->deadline, args->interval, args->leeway);
2388 		break;
2389 	case TELEMETRY_CMD_PMI_SETUP:
2390 		error = telemetry_pmi_setup((enum telemetry_pmi)args->deadline, args->interval);
2391 		break;
2392 #endif /* CONFIG_TELEMETRY */
2393 	case TELEMETRY_CMD_VOUCHER_NAME:
2394 		if (thread_set_voucher_name((mach_port_name_t)args->deadline)) {
2395 			error = EINVAL;
2396 		}
2397 		break;
2398 
2399 	default:
2400 		error = EINVAL;
2401 		break;
2402 	}
2403 
2404 	return error;
2405 }
2406 
2407 /*
2408  * Logging
2409  *
2410  * Description: syscall to access kernel logging from userspace
2411  *
2412  * Args:
2413  *	tag - used for syncing with userspace on the version.
2414  *	flags - flags used by the syscall.
2415  *	buffer - userspace address of string to copy.
2416  *	size - size of buffer.
2417  */
2418 int
log_data(__unused struct proc * p,struct log_data_args * args,int * retval)2419 log_data(__unused struct proc *p, struct log_data_args *args, int *retval)
2420 {
2421 	unsigned int tag = args->tag;
2422 	unsigned int flags = args->flags;
2423 	user_addr_t buffer = args->buffer;
2424 	unsigned int size = args->size;
2425 	int ret = 0;
2426 	*retval = 0;
2427 
2428 	/* Only DEXTs are suppose to use this syscall. */
2429 	if (!task_is_driver(current_task())) {
2430 		return EPERM;
2431 	}
2432 
2433 	/*
2434 	 * Tag synchronize the syscall version with userspace.
2435 	 * Tag == 0 => flags == OS_LOG_TYPE
2436 	 */
2437 	if (tag != 0) {
2438 		return EINVAL;
2439 	}
2440 
2441 	/*
2442 	 * OS_LOG_TYPE are defined in libkern/os/log.h
2443 	 * In userspace they are defined in libtrace/os/log.h
2444 	 */
2445 	if (flags != OS_LOG_TYPE_DEFAULT &&
2446 	    flags != OS_LOG_TYPE_INFO &&
2447 	    flags != OS_LOG_TYPE_DEBUG &&
2448 	    flags != OS_LOG_TYPE_ERROR &&
2449 	    flags != OS_LOG_TYPE_FAULT) {
2450 		return EINVAL;
2451 	}
2452 
2453 	if (size == 0) {
2454 		return EINVAL;
2455 	}
2456 
2457 	/* truncate to OS_LOG_DATA_MAX_SIZE */
2458 	if (size > OS_LOG_DATA_MAX_SIZE) {
2459 		printf("%s: WARNING msg is going to be truncated from %u to %u\n",
2460 		    __func__, size, OS_LOG_DATA_MAX_SIZE);
2461 		size = OS_LOG_DATA_MAX_SIZE;
2462 	}
2463 
2464 	char *log_msg = (char *)kalloc_data(size, Z_WAITOK);
2465 	if (!log_msg) {
2466 		return ENOMEM;
2467 	}
2468 
2469 	if (copyin(buffer, log_msg, size) != 0) {
2470 		ret = EFAULT;
2471 		goto out;
2472 	}
2473 	log_msg[size - 1] = '\0';
2474 
2475 	/*
2476 	 * This will log to dmesg and logd.
2477 	 * The call will fail if the current
2478 	 * process is not a driverKit process.
2479 	 */
2480 	os_log_driverKit(&ret, OS_LOG_DEFAULT, (os_log_type_t)flags, "%s", log_msg);
2481 
2482 out:
2483 	if (log_msg != NULL) {
2484 		kfree_data(log_msg, size);
2485 	}
2486 
2487 	return ret;
2488 }
2489 
2490 #if DEVELOPMENT || DEBUG
2491 
2492 static int
2493 sysctl_mpsc_test_pingpong SYSCTL_HANDLER_ARGS
2494 {
2495 #pragma unused(oidp, arg1, arg2)
2496 	uint64_t value = 0;
2497 	int error;
2498 
2499 	error = SYSCTL_IN(req, &value, sizeof(value));
2500 	if (error) {
2501 		return error;
2502 	}
2503 
2504 	if (error == 0 && req->newptr) {
2505 		error = mpsc_test_pingpong(value, &value);
2506 		if (error == 0) {
2507 			error = SYSCTL_OUT(req, &value, sizeof(value));
2508 		}
2509 	}
2510 
2511 	return error;
2512 }
2513 SYSCTL_PROC(_kern, OID_AUTO, mpsc_test_pingpong, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2514     0, 0, sysctl_mpsc_test_pingpong, "Q", "MPSC tests: pingpong");
2515 
2516 #endif /* DEVELOPMENT || DEBUG */
2517 
2518 /* Telemetry, microstackshots */
2519 
2520 SYSCTL_NODE(_kern, OID_AUTO, microstackshot, CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2521     "microstackshot info");
2522 
2523 extern uint32_t telemetry_sample_rate;
2524 SYSCTL_UINT(_kern_microstackshot, OID_AUTO, interrupt_sample_rate,
2525     CTLFLAG_RD | CTLFLAG_LOCKED, &telemetry_sample_rate, 0,
2526     "interrupt-based sampling rate in Hz");
2527 
2528 #if defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES)
2529 
2530 extern uint64_t mt_microstackshot_period;
2531 SYSCTL_QUAD(_kern_microstackshot, OID_AUTO, pmi_sample_period,
2532     CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_period,
2533     "PMI sampling rate");
2534 extern unsigned int mt_microstackshot_ctr;
2535 SYSCTL_UINT(_kern_microstackshot, OID_AUTO, pmi_sample_counter,
2536     CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_ctr, 0,
2537     "PMI counter");
2538 
2539 #endif /* defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) */
2540 
2541 /*Remote Time api*/
2542 SYSCTL_NODE(_machdep, OID_AUTO, remotetime, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "Remote time api");
2543 
2544 #if DEVELOPMENT || DEBUG
2545 #if CONFIG_MACH_BRIDGE_SEND_TIME
2546 extern _Atomic uint32_t bt_init_flag;
2547 extern uint32_t mach_bridge_timer_enable(uint32_t, int);
2548 
2549 SYSCTL_INT(_machdep_remotetime, OID_AUTO, bridge_timer_init_flag,
2550     CTLFLAG_RD | CTLFLAG_LOCKED, &bt_init_flag, 0, "");
2551 
2552 static int sysctl_mach_bridge_timer_enable SYSCTL_HANDLER_ARGS
2553 {
2554 #pragma unused(oidp, arg1, arg2)
2555 	uint32_t value = 0;
2556 	int error = 0;
2557 	/* User is querying buffer size */
2558 	if (req->oldptr == USER_ADDR_NULL && req->newptr == USER_ADDR_NULL) {
2559 		req->oldidx = sizeof(value);
2560 		return 0;
2561 	}
2562 	if (os_atomic_load(&bt_init_flag, acquire)) {
2563 		if (req->newptr) {
2564 			int new_value = 0;
2565 			error = SYSCTL_IN(req, &new_value, sizeof(new_value));
2566 			if (error) {
2567 				return error;
2568 			}
2569 			if (new_value == 0 || new_value == 1) {
2570 				value = mach_bridge_timer_enable(new_value, 1);
2571 			} else {
2572 				return EPERM;
2573 			}
2574 		} else {
2575 			value = mach_bridge_timer_enable(0, 0);
2576 		}
2577 	}
2578 	error = SYSCTL_OUT(req, &value, sizeof(value));
2579 	return error;
2580 }
2581 
2582 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, bridge_timer_enable,
2583     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2584     0, 0, sysctl_mach_bridge_timer_enable, "I", "");
2585 
2586 #endif /* CONFIG_MACH_BRIDGE_SEND_TIME */
2587 
2588 static int sysctl_mach_bridge_remote_time SYSCTL_HANDLER_ARGS
2589 {
2590 #pragma unused(oidp, arg1, arg2)
2591 	uint64_t ltime = 0, rtime = 0;
2592 	if (req->oldptr == USER_ADDR_NULL) {
2593 		req->oldidx = sizeof(rtime);
2594 		return 0;
2595 	}
2596 	if (req->newptr) {
2597 		int error = SYSCTL_IN(req, &ltime, sizeof(ltime));
2598 		if (error) {
2599 			return error;
2600 		}
2601 	}
2602 	rtime = mach_bridge_remote_time(ltime);
2603 	return SYSCTL_OUT(req, &rtime, sizeof(rtime));
2604 }
2605 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, mach_bridge_remote_time,
2606     CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2607     0, 0, sysctl_mach_bridge_remote_time, "Q", "");
2608 
2609 #endif /* DEVELOPMENT || DEBUG */
2610 
2611 #if CONFIG_MACH_BRIDGE_RECV_TIME
2612 extern struct bt_params bt_params_get_latest(void);
2613 
2614 static int sysctl_mach_bridge_conversion_params SYSCTL_HANDLER_ARGS
2615 {
2616 #pragma unused(oidp, arg1, arg2)
2617 	struct bt_params params = {};
2618 	if (req->oldptr == USER_ADDR_NULL) {
2619 		req->oldidx = sizeof(struct bt_params);
2620 		return 0;
2621 	}
2622 	if (req->newptr) {
2623 		return EPERM;
2624 	}
2625 	params = bt_params_get_latest();
2626 	return SYSCTL_OUT(req, &params, MIN(sizeof(params), req->oldlen));
2627 }
2628 
2629 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, conversion_params,
2630     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2631     0, sysctl_mach_bridge_conversion_params, "S,bt_params", "");
2632 
2633 #endif /* CONFIG_MACH_BRIDGE_RECV_TIME */
2634 
2635 #if DEVELOPMENT || DEBUG
2636 
2637 #include <pexpert/pexpert.h>
2638 extern int32_t sysctl_get_bound_cpuid(void);
2639 extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
2640 static int
2641 sysctl_kern_sched_thread_bind_cpu SYSCTL_HANDLER_ARGS
2642 {
2643 #pragma unused(oidp, arg1, arg2)
2644 
2645 	/*
2646 	 * DO NOT remove this bootarg guard or make this non-development.
2647 	 * This kind of binding should only be used for tests and
2648 	 * experiments in a custom configuration, never shipping code.
2649 	 */
2650 
2651 	if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2652 		return ENOENT;
2653 	}
2654 
2655 	int32_t cpuid = sysctl_get_bound_cpuid();
2656 
2657 	int32_t new_value;
2658 	int changed;
2659 	int error = sysctl_io_number(req, cpuid, sizeof cpuid, &new_value, &changed);
2660 	if (error) {
2661 		return error;
2662 	}
2663 
2664 	if (changed) {
2665 		kern_return_t kr = sysctl_thread_bind_cpuid(new_value);
2666 
2667 		if (kr == KERN_NOT_SUPPORTED) {
2668 			return ENOTSUP;
2669 		}
2670 
2671 		if (kr == KERN_INVALID_VALUE) {
2672 			return ERANGE;
2673 		}
2674 	}
2675 
2676 	return error;
2677 }
2678 
2679 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cpu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2680     0, 0, sysctl_kern_sched_thread_bind_cpu, "I", "");
2681 
2682 #if __AMP__
2683 extern char sysctl_get_bound_cluster_type(void);
2684 extern void sysctl_thread_bind_cluster_type(char cluster_type);
2685 static int
2686 sysctl_kern_sched_thread_bind_cluster_type SYSCTL_HANDLER_ARGS
2687 {
2688 #pragma unused(oidp, arg1, arg2)
2689 	char buff[4];
2690 
2691 	if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2692 		return ENOENT;
2693 	}
2694 
2695 	int error = SYSCTL_IN(req, buff, 1);
2696 	if (error) {
2697 		return error;
2698 	}
2699 	char cluster_type = buff[0];
2700 
2701 	if (!req->newptr) {
2702 		goto out;
2703 	}
2704 
2705 	sysctl_thread_bind_cluster_type(cluster_type);
2706 out:
2707 	cluster_type = sysctl_get_bound_cluster_type();
2708 	buff[0] = cluster_type;
2709 
2710 	return SYSCTL_OUT(req, buff, 1);
2711 }
2712 
2713 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2714     0, 0, sysctl_kern_sched_thread_bind_cluster_type, "A", "");
2715 
2716 extern char sysctl_get_task_cluster_type(void);
2717 extern void sysctl_task_set_cluster_type(char cluster_type);
2718 static int
2719 sysctl_kern_sched_task_set_cluster_type SYSCTL_HANDLER_ARGS
2720 {
2721 #pragma unused(oidp, arg1, arg2)
2722 	char buff[4];
2723 
2724 	if (!PE_parse_boot_argn("enable_skstsct", NULL, 0)) {
2725 		return ENOENT;
2726 	}
2727 
2728 	int error = SYSCTL_IN(req, buff, 1);
2729 	if (error) {
2730 		return error;
2731 	}
2732 	char cluster_type = buff[0];
2733 
2734 	if (!req->newptr) {
2735 		goto out;
2736 	}
2737 
2738 	sysctl_task_set_cluster_type(cluster_type);
2739 out:
2740 	cluster_type = sysctl_get_task_cluster_type();
2741 	buff[0] = cluster_type;
2742 
2743 	return SYSCTL_OUT(req, buff, 1);
2744 }
2745 
2746 SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2747     0, 0, sysctl_kern_sched_task_set_cluster_type, "A", "");
2748 
2749 extern kern_return_t thread_bind_cluster_id(thread_t thread, uint32_t cluster_id, thread_bind_option_t options);
2750 extern uint32_t thread_bound_cluster_id(thread_t);
2751 static int
2752 sysctl_kern_sched_thread_bind_cluster_id SYSCTL_HANDLER_ARGS
2753 {
2754 #pragma unused(oidp, arg1, arg2)
2755 	if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2756 		return ENOENT;
2757 	}
2758 
2759 	thread_t self = current_thread();
2760 	uint32_t old_value = thread_bound_cluster_id(self);
2761 	uint32_t new_value;
2762 
2763 	int error = SYSCTL_IN(req, &new_value, sizeof(new_value));
2764 	if (error) {
2765 		return error;
2766 	}
2767 	if (new_value != old_value) {
2768 		/*
2769 		 * This sysctl binds the thread to the cluster without any flags,
2770 		 * which means it will be hard bound and not check eligibility.
2771 		 */
2772 		thread_bind_cluster_id(self, new_value, 0);
2773 	}
2774 	return SYSCTL_OUT(req, &old_value, sizeof(old_value));
2775 }
2776 
2777 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_id, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2778     0, 0, sysctl_kern_sched_thread_bind_cluster_id, "I", "");
2779 
2780 #if CONFIG_SCHED_EDGE
2781 
2782 extern int sched_edge_restrict_ut;
2783 SYSCTL_INT(_kern, OID_AUTO, sched_edge_restrict_ut, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_restrict_ut, 0, "Edge Scheduler Restrict UT Threads");
2784 extern int sched_edge_restrict_bg;
2785 SYSCTL_INT(_kern, OID_AUTO, sched_edge_restrict_bg, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_restrict_ut, 0, "Edge Scheduler Restrict BG Threads");
2786 extern int sched_edge_migrate_ipi_immediate;
2787 SYSCTL_INT(_kern, OID_AUTO, sched_edge_migrate_ipi_immediate, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_migrate_ipi_immediate, 0, "Edge Scheduler uses immediate IPIs for migration event based on execution latency");
2788 
2789 #endif /* CONFIG_SCHED_EDGE */
2790 
2791 #endif /* __AMP__ */
2792 
2793 #if SCHED_HYGIENE_DEBUG
2794 
2795 SYSCTL_QUAD(_kern, OID_AUTO, interrupt_masked_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED,
2796     &interrupt_masked_timeout,
2797     "Interrupt masked duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2798 
2799 SYSCTL_INT(_kern, OID_AUTO, interrupt_masked_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2800     &interrupt_masked_debug_mode, 0,
2801     "Enable interrupt masked tracing or panic (0: off, 1: trace, 2: panic)");
2802 
2803 SYSCTL_QUAD(_kern, OID_AUTO, sched_preemption_disable_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED,
2804     &sched_preemption_disable_threshold_mt,
2805     "Preemption disablement duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2806 
2807 SYSCTL_INT(_kern, OID_AUTO, sched_preemption_disable_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2808     &sched_preemption_disable_debug_mode, 0,
2809     "Enable preemption disablement tracing or panic (0: off, 1: trace, 2: panic)");
2810 
2811 PERCPU_DECL(uint64_t _Atomic, preemption_disable_max_mt);
2812 
2813 static int
sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2814 sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2815 {
2816 	uint64_t stats[MAX_CPUS]; // maximum per CPU
2817 
2818 	/*
2819 	 * No synchronization here. The individual values are pretty much
2820 	 * independent, and reading/writing them is atomic.
2821 	 */
2822 
2823 	int cpu = 0;
2824 	percpu_foreach(max_stat, preemption_disable_max_mt) {
2825 		stats[cpu++] = os_atomic_load(max_stat, relaxed);
2826 	}
2827 
2828 	if (req->newlen > 0) {
2829 		// writing just resets all stats.
2830 		percpu_foreach(max_stat, preemption_disable_max_mt) {
2831 			os_atomic_store(max_stat, 0, relaxed);
2832 		}
2833 	}
2834 
2835 	return sysctl_io_opaque(req, stats, cpu * sizeof(uint64_t), NULL);
2836 }
2837 
2838 SYSCTL_PROC(_kern, OID_AUTO, sched_preemption_disable_stats,
2839     CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2840     0, 0, sysctl_sched_preemption_disable_stats, "I", "Preemption disablement statistics");
2841 
2842 #endif /* SCHED_HYGIENE_DEBUG */
2843 
2844 /* used for testing by exception_tests */
2845 extern uint32_t ipc_control_port_options;
2846 SYSCTL_INT(_kern, OID_AUTO, ipc_control_port_options,
2847     CTLFLAG_RD | CTLFLAG_LOCKED, &ipc_control_port_options, 0, "");
2848 
2849 #endif /* DEVELOPMENT || DEBUG */
2850 
2851 extern uint32_t task_exc_guard_default;
2852 
2853 SYSCTL_INT(_kern, OID_AUTO, task_exc_guard_default,
2854     CTLFLAG_RD | CTLFLAG_LOCKED, &task_exc_guard_default, 0, "");
2855 
2856 
2857 static int
2858 sysctl_kern_tcsm_available SYSCTL_HANDLER_ARGS
2859 {
2860 #pragma unused(oidp, arg1, arg2)
2861 	uint32_t value = machine_csv(CPUVN_CI) ? 1 : 0;
2862 
2863 	if (req->newptr) {
2864 		return EINVAL;
2865 	}
2866 
2867 	return SYSCTL_OUT(req, &value, sizeof(value));
2868 }
2869 SYSCTL_PROC(_kern, OID_AUTO, tcsm_available,
2870     CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
2871     0, 0, sysctl_kern_tcsm_available, "I", "");
2872 
2873 
2874 static int
2875 sysctl_kern_tcsm_enable SYSCTL_HANDLER_ARGS
2876 {
2877 #pragma unused(oidp, arg1, arg2)
2878 	uint32_t soflags = 0;
2879 	uint32_t old_value = thread_get_no_smt() ? 1 : 0;
2880 
2881 	int error = SYSCTL_IN(req, &soflags, sizeof(soflags));
2882 	if (error) {
2883 		return error;
2884 	}
2885 
2886 	if (soflags && machine_csv(CPUVN_CI)) {
2887 		thread_set_no_smt(true);
2888 		machine_tecs(current_thread());
2889 	}
2890 
2891 	return SYSCTL_OUT(req, &old_value, sizeof(old_value));
2892 }
2893 SYSCTL_PROC(_kern, OID_AUTO, tcsm_enable,
2894     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
2895     0, 0, sysctl_kern_tcsm_enable, "I", "");
2896 
2897 static int
2898 sysctl_kern_debug_get_preoslog SYSCTL_HANDLER_ARGS
2899 {
2900 #pragma unused(oidp, arg1, arg2)
2901 	static bool oneshot_executed = false;
2902 	size_t preoslog_size = 0;
2903 	const char *preoslog = NULL;
2904 	int ret = 0;
2905 
2906 	// DumpPanic passes a non-zero write value when it needs oneshot behaviour
2907 	if (req->newptr != USER_ADDR_NULL) {
2908 		uint8_t oneshot = 0;
2909 		int error = SYSCTL_IN(req, &oneshot, sizeof(oneshot));
2910 		if (error) {
2911 			return error;
2912 		}
2913 
2914 		if (oneshot) {
2915 			if (!os_atomic_cmpxchg(&oneshot_executed, false, true, acq_rel)) {
2916 				return EPERM;
2917 			}
2918 		}
2919 	}
2920 
2921 	preoslog = sysctl_debug_get_preoslog(&preoslog_size);
2922 	if (preoslog != NULL && preoslog_size == 0) {
2923 		sysctl_debug_free_preoslog();
2924 		return 0;
2925 	}
2926 
2927 	if (preoslog == NULL || preoslog_size == 0) {
2928 		return 0;
2929 	}
2930 
2931 	if (req->oldptr == USER_ADDR_NULL) {
2932 		req->oldidx = preoslog_size;
2933 		return 0;
2934 	}
2935 
2936 	ret = SYSCTL_OUT(req, preoslog, preoslog_size);
2937 	sysctl_debug_free_preoslog();
2938 	return ret;
2939 }
2940 
2941 SYSCTL_PROC(_kern, OID_AUTO, preoslog, CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2942     0, 0, sysctl_kern_debug_get_preoslog, "-", "");
2943 
2944 #if DEVELOPMENT || DEBUG
2945 extern void sysctl_task_set_no_smt(char no_smt);
2946 extern char sysctl_task_get_no_smt(void);
2947 
2948 static int
2949 sysctl_kern_sched_task_set_no_smt SYSCTL_HANDLER_ARGS
2950 {
2951 #pragma unused(oidp, arg1, arg2)
2952 	char buff[4];
2953 
2954 	int error = SYSCTL_IN(req, buff, 1);
2955 	if (error) {
2956 		return error;
2957 	}
2958 	char no_smt = buff[0];
2959 
2960 	if (!req->newptr) {
2961 		goto out;
2962 	}
2963 
2964 	sysctl_task_set_no_smt(no_smt);
2965 out:
2966 	no_smt = sysctl_task_get_no_smt();
2967 	buff[0] = no_smt;
2968 
2969 	return SYSCTL_OUT(req, buff, 1);
2970 }
2971 
2972 SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_no_smt, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
2973     0, 0, sysctl_kern_sched_task_set_no_smt, "A", "");
2974 
2975 static int
sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2976 sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2977 {
2978 	int new_value, changed;
2979 	int old_value = thread_get_no_smt() ? 1 : 0;
2980 	int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2981 
2982 	if (changed) {
2983 		thread_set_no_smt(!!new_value);
2984 	}
2985 
2986 	return error;
2987 }
2988 
2989 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_set_no_smt,
2990     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
2991     0, 0, sysctl_kern_sched_thread_set_no_smt, "I", "");
2992 
2993 #if CONFIG_SCHED_RT_ALLOW
2994 
2995 #if DEVELOPMENT || DEBUG
2996 #define RT_ALLOW_CTLFLAGS CTLFLAG_RW
2997 #else
2998 #define RT_ALLOW_CTLFLAGS CTLFLAG_RD
2999 #endif /* DEVELOPMENT || DEBUG */
3000 
3001 static int
sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3002 sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid *oidp,
3003     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3004 {
3005 	extern uint8_t rt_allow_limit_percent;
3006 
3007 	int new_value = 0;
3008 	int old_value = rt_allow_limit_percent;
3009 	int changed = 0;
3010 
3011 	int error = sysctl_io_number(req, old_value, sizeof(old_value),
3012 	    &new_value, &changed);
3013 	if (error != 0) {
3014 		return error;
3015 	}
3016 
3017 	/* Only accept a percentage between 1 and 99 inclusive. */
3018 	if (changed) {
3019 		if (new_value >= 100 || new_value <= 0) {
3020 			return EINVAL;
3021 		}
3022 
3023 		rt_allow_limit_percent = (uint8_t)new_value;
3024 	}
3025 
3026 	return 0;
3027 }
3028 
3029 SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_percent,
3030     RT_ALLOW_CTLFLAGS | CTLTYPE_INT | CTLFLAG_LOCKED,
3031     0, 0, sysctl_kern_rt_allow_limit_percent, "I", "");
3032 
3033 static int
sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3034 sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid *oidp,
3035     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3036 {
3037 	extern uint16_t rt_allow_limit_interval_ms;
3038 
3039 	uint64_t new_value = 0;
3040 	uint64_t old_value = rt_allow_limit_interval_ms;
3041 	int changed = 0;
3042 
3043 	int error = sysctl_io_number(req, old_value, sizeof(old_value),
3044 	    &new_value, &changed);
3045 	if (error != 0) {
3046 		return error;
3047 	}
3048 
3049 	/* Value is in ns. Must be at least 1ms. */
3050 	if (changed) {
3051 		if (new_value < 1 || new_value > UINT16_MAX) {
3052 			return EINVAL;
3053 		}
3054 
3055 		rt_allow_limit_interval_ms = (uint16_t)new_value;
3056 	}
3057 
3058 	return 0;
3059 }
3060 
3061 SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_interval_ms,
3062     RT_ALLOW_CTLFLAGS | CTLTYPE_QUAD | CTLFLAG_LOCKED,
3063     0, 0, sysctl_kern_rt_allow_limit_interval_ms, "Q", "");
3064 
3065 #endif /* CONFIG_SCHED_RT_ALLOW */
3066 
3067 
3068 static int
3069 sysctl_kern_task_set_filter_msg_flag SYSCTL_HANDLER_ARGS
3070 {
3071 #pragma unused(oidp, arg1, arg2)
3072 	int new_value, changed;
3073 	int old_value = task_get_filter_msg_flag(current_task()) ? 1 : 0;
3074 	int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3075 
3076 	if (changed) {
3077 		task_set_filter_msg_flag(current_task(), !!new_value);
3078 	}
3079 
3080 	return error;
3081 }
3082 
3083 SYSCTL_PROC(_kern, OID_AUTO, task_set_filter_msg_flag, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3084     0, 0, sysctl_kern_task_set_filter_msg_flag, "I", "");
3085 
3086 #if CONFIG_PROC_RESOURCE_LIMITS
3087 
3088 extern mach_port_name_t current_task_get_fatal_port_name(void);
3089 
3090 static int
3091 sysctl_kern_task_get_fatal_port SYSCTL_HANDLER_ARGS
3092 {
3093 #pragma unused(oidp, arg1, arg2)
3094 	int port = 0;
3095 	int flag = 0;
3096 
3097 	if (req->oldptr == USER_ADDR_NULL) {
3098 		req->oldidx = sizeof(mach_port_t);
3099 		return 0;
3100 	}
3101 
3102 	int error = SYSCTL_IN(req, &flag, sizeof(flag));
3103 	if (error) {
3104 		return error;
3105 	}
3106 
3107 	if (flag == 1) {
3108 		port = (int)current_task_get_fatal_port_name();
3109 	}
3110 	return SYSCTL_OUT(req, &port, sizeof(port));
3111 }
3112 
3113 SYSCTL_PROC(_machdep, OID_AUTO, task_get_fatal_port, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3114     0, 0, sysctl_kern_task_get_fatal_port, "I", "");
3115 
3116 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
3117 
3118 extern unsigned int ipc_entry_table_count_max(void);
3119 
3120 static int
3121 sysctl_mach_max_port_table_size SYSCTL_HANDLER_ARGS
3122 {
3123 #pragma unused(oidp, arg1, arg2)
3124 	int old_value = ipc_entry_table_count_max();
3125 	int error = sysctl_io_number(req, old_value, sizeof(int), NULL, NULL);
3126 
3127 	return error;
3128 }
3129 
3130 SYSCTL_PROC(_machdep, OID_AUTO, max_port_table_size, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3131     0, 0, sysctl_mach_max_port_table_size, "I", "");
3132 
3133 #endif /* DEVELOPMENT || DEBUG */
3134 
3135 #if defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(CONFIG_KDP_COREDUMP_ENCRYPTION)
3136 
3137 #define COREDUMP_ENCRYPTION_KEY_ENTITLEMENT "com.apple.private.coredump-encryption-key"
3138 
3139 static int
3140 sysctl_coredump_encryption_key_update SYSCTL_HANDLER_ARGS
3141 {
3142 	kern_return_t ret = KERN_SUCCESS;
3143 	int error = 0;
3144 	struct kdp_core_encryption_key_descriptor key_descriptor = {
3145 		.kcekd_format = MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256,
3146 	};
3147 
3148 	/* Need to be root and have entitlement */
3149 	if (!kauth_cred_issuser(kauth_cred_get()) && !IOCurrentTaskHasEntitlement(COREDUMP_ENCRYPTION_KEY_ENTITLEMENT)) {
3150 		return EPERM;
3151 	}
3152 
3153 	// Sanity-check the given key length
3154 	if (req->newlen > UINT16_MAX) {
3155 		return EINVAL;
3156 	}
3157 
3158 	// It is allowed for the caller to pass in a NULL buffer.
3159 	// This indicates that they want us to forget about any public key we might have.
3160 	if (req->newptr) {
3161 		key_descriptor.kcekd_size = (uint16_t) req->newlen;
3162 		key_descriptor.kcekd_key = kalloc_data(key_descriptor.kcekd_size, Z_WAITOK);
3163 
3164 		if (key_descriptor.kcekd_key == NULL) {
3165 			return ENOMEM;
3166 		}
3167 
3168 		error = SYSCTL_IN(req, key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3169 		if (error) {
3170 			goto out;
3171 		}
3172 	}
3173 
3174 	ret = IOProvideCoreFileAccess(kdp_core_handle_new_encryption_key, (void *)&key_descriptor);
3175 	if (KERN_SUCCESS != ret) {
3176 		printf("Failed to handle the new encryption key. Error 0x%x", ret);
3177 		error = EFAULT;
3178 	}
3179 
3180 out:
3181 	kfree_data(key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3182 	return 0;
3183 }
3184 
3185 SYSCTL_PROC(_kern, OID_AUTO, coredump_encryption_key, CTLTYPE_OPAQUE | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED,
3186     0, 0, &sysctl_coredump_encryption_key_update, "-", "Set a new encryption key for coredumps");
3187 
3188 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING && CONFIG_KDP_COREDUMP_ENCRYPTION*/
3189