xref: /xnu-12377.81.4/bsd/kern/sys_generic.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)sys_generic.c	8.9 (Berkeley) 2/14/95
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/ioctl.h>
79 #include <sys/file_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/socketvar.h>
82 #include <sys/uio_internal.h>
83 #include <sys/kernel.h>
84 #include <sys/guarded.h>
85 #include <sys/stat.h>
86 #include <sys/malloc.h>
87 #include <sys/sysproto.h>
88 
89 #include <sys/mount_internal.h>
90 #include <sys/protosw.h>
91 #include <sys/ev.h>
92 #include <sys/user.h>
93 #include <sys/kdebug.h>
94 #include <sys/poll.h>
95 #include <sys/event.h>
96 #include <sys/eventvar.h>
97 #include <sys/proc.h>
98 #include <sys/kauth.h>
99 
100 #include <machine/smp.h>
101 #include <mach/mach_types.h>
102 #include <kern/kern_types.h>
103 #include <kern/assert.h>
104 #include <kern/kalloc.h>
105 #include <kern/thread.h>
106 #include <kern/clock.h>
107 #include <kern/ledger.h>
108 #include <kern/monotonic.h>
109 #include <kern/task.h>
110 #include <kern/telemetry.h>
111 #include <kern/waitq.h>
112 #include <kern/sched_hygiene.h>
113 #include <kern/sched_prim.h>
114 #include <kern/mpsc_queue.h>
115 #include <kern/debug.h>
116 
117 #include <sys/mbuf.h>
118 #include <sys/domain.h>
119 #include <sys/socket.h>
120 #include <sys/socketvar.h>
121 #include <sys/errno.h>
122 #include <sys/syscall.h>
123 #include <sys/pipe.h>
124 
125 #include <security/audit/audit.h>
126 
127 #include <net/if.h>
128 #include <net/route.h>
129 
130 #include <netinet/in.h>
131 #include <netinet/in_systm.h>
132 #include <netinet/ip.h>
133 #include <netinet/in_pcb.h>
134 #include <netinet/ip_var.h>
135 #include <netinet/ip6.h>
136 #include <netinet/tcp.h>
137 #include <netinet/tcp_fsm.h>
138 #include <netinet/tcp_seq.h>
139 #include <netinet/tcp_timer.h>
140 #include <netinet/tcp_var.h>
141 #include <netinet/tcpip.h>
142 /* for wait queue based select */
143 #include <kern/waitq.h>
144 #include <sys/vnode_internal.h>
145 /* for remote time api*/
146 #include <kern/remote_time.h>
147 #include <os/log.h>
148 #include <sys/log_data.h>
149 
150 #include <machine/monotonic.h>
151 
152 #if CONFIG_MACF
153 #include <security/mac_framework.h>
154 #endif
155 
156 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
157 #include <mach_debug/mach_debug_types.h>
158 #endif
159 
160 /* for entitlement check */
161 #include <IOKit/IOBSD.h>
162 
163 /* XXX should be in a header file somewhere */
164 extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp);
165 
166 int do_uiowrite(struct proc *p, struct fileproc *fp, uio_t uio, int flags, user_ssize_t *retval);
167 __private_extern__ int  dofileread(vfs_context_t ctx, struct fileproc *fp,
168     user_addr_t bufp, user_size_t nbyte,
169     off_t offset, int flags, user_ssize_t *retval);
170 __private_extern__ int  dofilewrite(vfs_context_t ctx, struct fileproc *fp,
171     user_addr_t bufp, user_size_t nbyte,
172     off_t offset, int flags, user_ssize_t *retval);
173 static int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode);
174 
175 /* needed by guarded_writev, etc. */
176 int write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
177     off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval);
178 int writev_uio(struct proc *p, int fd, user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
179     guardid_t *puguard, user_ssize_t *retval);
180 
181 #define f_flag fp_glob->fg_flag
182 #define f_type fp_glob->fg_ops->fo_type
183 #define f_cred fp_glob->fg_cred
184 #define f_ops fp_glob->fg_ops
185 
186 /*
187  * Validate if the file can be used for random access (pread, pwrite, etc).
188  *
189  * Conditions:
190  *		proc_fdlock is held
191  *
192  * Returns:    0                       Success
193  *             ESPIPE
194  *             ENXIO
195  */
196 static int
valid_for_random_access(struct fileproc * fp,bool check_for_pwrite)197 valid_for_random_access(struct fileproc *fp, bool check_for_pwrite)
198 {
199 	if (__improbable(fp->f_type != DTYPE_VNODE)) {
200 		return ESPIPE;
201 	}
202 
203 	vnode_t vp = (struct vnode *)fp_get_data(fp);
204 	if (__improbable(vnode_isfifo(vp))) {
205 		return ESPIPE;
206 	}
207 
208 	if (__improbable(vp->v_flag & VISTTY)) {
209 		return ENXIO;
210 	}
211 
212 	if (check_for_pwrite && vnode_isappendonly(vp)) {
213 		return EPERM;
214 	}
215 
216 	return 0;
217 }
218 
219 /*
220  * Returns:	0			Success
221  *		EBADF
222  *		ESPIPE
223  *		ENXIO
224  *	fp_lookup:EBADF
225  *  valid_for_random_access:ESPIPE
226  *  valid_for_random_access:ENXIO
227  */
228 static int
preparefileread(struct proc * p,struct fileproc ** fp_ret,int fd,int check_for_pread)229 preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread)
230 {
231 	int     error;
232 	struct fileproc *fp;
233 
234 	AUDIT_ARG(fd, fd);
235 
236 	proc_fdlock_spin(p);
237 
238 	error = fp_lookup(p, fd, &fp, 1);
239 
240 	if (error) {
241 		proc_fdunlock(p);
242 		return error;
243 	}
244 	if ((fp->f_flag & FREAD) == 0) {
245 		error = EBADF;
246 		goto out;
247 	}
248 	if (check_for_pread) {
249 		if ((error = valid_for_random_access(fp, false))) {
250 			goto out;
251 		}
252 	}
253 
254 	*fp_ret = fp;
255 
256 	proc_fdunlock(p);
257 	return 0;
258 
259 out:
260 	fp_drop(p, fd, fp, 1);
261 	proc_fdunlock(p);
262 	return error;
263 }
264 
265 static int
fp_readv(vfs_context_t ctx,struct fileproc * fp,uio_t uio,int flags,user_ssize_t * retval)266 fp_readv(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
267     user_ssize_t *retval)
268 {
269 	int error;
270 	user_ssize_t count;
271 
272 	if ((error = uio_calculateresid_user(uio))) {
273 		*retval = 0;
274 		return error;
275 	}
276 
277 	count = uio_resid(uio);
278 	error = fo_read(fp, uio, flags, ctx);
279 
280 	switch (error) {
281 	case ERESTART:
282 	case EINTR:
283 	case EWOULDBLOCK:
284 		if (uio_resid(uio) != count) {
285 			error = 0;
286 		}
287 		break;
288 
289 	default:
290 		break;
291 	}
292 
293 	*retval = count - uio_resid(uio);
294 	return error;
295 }
296 
297 /*
298  * Returns:	0			Success
299  *		EINVAL
300  *	fo_read:???
301  */
302 __private_extern__ int
dofileread(vfs_context_t ctx,struct fileproc * fp,user_addr_t bufp,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)303 dofileread(vfs_context_t ctx, struct fileproc *fp,
304     user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
305     user_ssize_t *retval)
306 {
307 	UIO_STACKBUF(uio_buf, 1);
308 	uio_t uio;
309 	int spacetype;
310 
311 	if (nbyte > INT_MAX) {
312 		*retval = 0;
313 		return EINVAL;
314 	}
315 
316 	spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
317 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_READ, &uio_buf[0],
318 	    sizeof(uio_buf));
319 
320 	if (uio_addiov(uio, bufp, nbyte) != 0) {
321 		*retval = 0;
322 		return EINVAL;
323 	}
324 
325 	return fp_readv(ctx, fp, uio, flags, retval);
326 }
327 
328 static int
readv_internal(struct proc * p,int fd,uio_t uio,int flags,user_ssize_t * retval)329 readv_internal(struct proc *p, int fd, uio_t uio, int flags,
330     user_ssize_t *retval)
331 {
332 	struct fileproc *fp = NULL;
333 	struct vfs_context context;
334 	int error;
335 
336 	if ((error = preparefileread(p, &fp, fd, flags & FOF_OFFSET))) {
337 		*retval = 0;
338 		return error;
339 	}
340 
341 	context = *(vfs_context_current());
342 	context.vc_ucred = fp->fp_glob->fg_cred;
343 
344 	error = fp_readv(&context, fp, uio, flags, retval);
345 
346 	fp_drop(p, fd, fp, 0);
347 	return error;
348 }
349 
350 static int
read_internal(struct proc * p,int fd,user_addr_t buf,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)351 read_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
352     off_t offset, int flags, user_ssize_t *retval)
353 {
354 	UIO_STACKBUF(uio_buf, 1);
355 	uio_t uio;
356 	int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
357 
358 	if (nbyte > INT_MAX) {
359 		*retval = 0;
360 		return EINVAL;
361 	}
362 
363 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_READ,
364 	    &uio_buf[0], sizeof(uio_buf));
365 
366 	if (uio_addiov(uio, buf, nbyte) != 0) {
367 		*retval = 0;
368 		return EINVAL;
369 	}
370 
371 	return readv_internal(p, fd, uio, flags, retval);
372 }
373 
374 int
read_nocancel(struct proc * p,struct read_nocancel_args * uap,user_ssize_t * retval)375 read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retval)
376 {
377 	return read_internal(p, uap->fd, uap->cbuf, uap->nbyte, (off_t)-1, 0,
378 	           retval);
379 }
380 
381 /*
382  * Read system call.
383  *
384  * Returns:	0			Success
385  *	preparefileread:EBADF
386  *	preparefileread:ESPIPE
387  *	preparefileread:ENXIO
388  *	preparefileread:EBADF
389  *	dofileread:???
390  */
391 int
read(struct proc * p,struct read_args * uap,user_ssize_t * retval)392 read(struct proc *p, struct read_args *uap, user_ssize_t *retval)
393 {
394 	__pthread_testcancel(1);
395 	return read_nocancel(p, (struct read_nocancel_args *)uap, retval);
396 }
397 
398 int
pread_nocancel(struct proc * p,struct pread_nocancel_args * uap,user_ssize_t * retval)399 pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *retval)
400 {
401 	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE),
402 	    uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
403 
404 	return read_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset,
405 	           FOF_OFFSET, retval);
406 }
407 
408 /*
409  * Pread system call
410  *
411  * Returns:	0			Success
412  *	preparefileread:EBADF
413  *	preparefileread:ESPIPE
414  *	preparefileread:ENXIO
415  *	preparefileread:EBADF
416  *	dofileread:???
417  */
418 int
pread(struct proc * p,struct pread_args * uap,user_ssize_t * retval)419 pread(struct proc *p, struct pread_args *uap, user_ssize_t *retval)
420 {
421 	__pthread_testcancel(1);
422 	return pread_nocancel(p, (struct pread_nocancel_args *)uap, retval);
423 }
424 
425 /*
426  * Vector read.
427  *
428  * Returns:    0                       Success
429  *             EINVAL
430  *             ENOMEM
431  *     preparefileread:EBADF
432  *     preparefileread:ESPIPE
433  *     preparefileread:ENXIO
434  *     preparefileread:EBADF
435  *     copyin:EFAULT
436  *     rd_uio:???
437  */
438 static int
readv_uio(struct proc * p,int fd,user_addr_t user_iovp,int iovcnt,off_t offset,int flags,user_ssize_t * retval)439 readv_uio(struct proc *p, int fd,
440     user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
441     user_ssize_t *retval)
442 {
443 	uio_t uio = NULL;
444 	int error;
445 	struct user_iovec *iovp;
446 
447 	if (iovcnt <= 0 || iovcnt > UIO_MAXIOV) {
448 		error = EINVAL;
449 		goto out;
450 	}
451 
452 	uio = uio_create(iovcnt, offset,
453 	    (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
454 	    UIO_READ);
455 
456 	iovp = uio_iovsaddr_user(uio);
457 	if (iovp == NULL) {
458 		error = ENOMEM;
459 		goto out;
460 	}
461 
462 	error = copyin_user_iovec_array(user_iovp,
463 	    IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
464 	    iovcnt, iovp);
465 
466 	if (error) {
467 		goto out;
468 	}
469 
470 	error = readv_internal(p, fd, uio, flags, retval);
471 
472 out:
473 	if (uio != NULL) {
474 		uio_free(uio);
475 	}
476 
477 	return error;
478 }
479 
480 int
readv_nocancel(struct proc * p,struct readv_nocancel_args * uap,user_ssize_t * retval)481 readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval)
482 {
483 	return readv_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, retval);
484 }
485 
486 /*
487  * Scatter read system call.
488  */
489 int
readv(struct proc * p,struct readv_args * uap,user_ssize_t * retval)490 readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval)
491 {
492 	__pthread_testcancel(1);
493 	return readv_nocancel(p, (struct readv_nocancel_args *)uap, retval);
494 }
495 
496 int
sys_preadv_nocancel(struct proc * p,struct preadv_nocancel_args * uap,user_ssize_t * retval)497 sys_preadv_nocancel(struct proc *p, struct preadv_nocancel_args *uap, user_ssize_t *retval)
498 {
499 	return readv_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset,
500 	           FOF_OFFSET, retval);
501 }
502 
503 /*
504  * Preadv system call
505  */
506 int
sys_preadv(struct proc * p,struct preadv_args * uap,user_ssize_t * retval)507 sys_preadv(struct proc *p, struct preadv_args *uap, user_ssize_t *retval)
508 {
509 	__pthread_testcancel(1);
510 	return sys_preadv_nocancel(p, (struct preadv_nocancel_args *)uap, retval);
511 }
512 
513 /*
514  * Returns:	0			Success
515  *		EBADF
516  *		ESPIPE
517  *		ENXIO
518  *	fp_lookup:EBADF
519  *	fp_guard_exception:???
520  *  valid_for_random_access:ESPIPE
521  *  valid_for_random_access:ENXIO
522  */
523 static int
preparefilewrite(struct proc * p,struct fileproc ** fp_ret,int fd,int check_for_pwrite,guardid_t * puguard)524 preparefilewrite(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pwrite,
525     guardid_t *puguard)
526 {
527 	int error;
528 	struct fileproc *fp;
529 
530 	AUDIT_ARG(fd, fd);
531 
532 	proc_fdlock_spin(p);
533 
534 	if (puguard) {
535 		error = fp_lookup_guarded(p, fd, *puguard, &fp, 1);
536 		if (error) {
537 			proc_fdunlock(p);
538 			return error;
539 		}
540 
541 		if ((fp->f_flag & FWRITE) == 0) {
542 			error = EBADF;
543 			goto out;
544 		}
545 	} else {
546 		error = fp_lookup(p, fd, &fp, 1);
547 		if (error) {
548 			proc_fdunlock(p);
549 			return error;
550 		}
551 
552 		/* Allow EBADF first. */
553 		if ((fp->f_flag & FWRITE) == 0) {
554 			error = EBADF;
555 			goto out;
556 		}
557 
558 		if (fp_isguarded(fp, GUARD_WRITE)) {
559 			error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
560 			goto out;
561 		}
562 	}
563 
564 	if (check_for_pwrite) {
565 		if ((error = valid_for_random_access(fp, true))) {
566 			goto out;
567 		}
568 	}
569 
570 	*fp_ret = fp;
571 
572 	proc_fdunlock(p);
573 	return 0;
574 
575 out:
576 	fp_drop(p, fd, fp, 1);
577 	proc_fdunlock(p);
578 	return error;
579 }
580 
581 static int
fp_writev(vfs_context_t ctx,struct fileproc * fp,uio_t uio,int flags,user_ssize_t * retval)582 fp_writev(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
583     user_ssize_t *retval)
584 {
585 	int error;
586 	user_ssize_t count;
587 
588 	if ((error = uio_calculateresid_user(uio))) {
589 		*retval = 0;
590 		return error;
591 	}
592 
593 	count = uio_resid(uio);
594 	error = fo_write(fp, uio, flags, ctx);
595 
596 	switch (error) {
597 	case ERESTART:
598 	case EINTR:
599 	case EWOULDBLOCK:
600 		if (uio_resid(uio) != count) {
601 			error = 0;
602 		}
603 		break;
604 
605 	case EPIPE:
606 		if (fp->f_type != DTYPE_SOCKET &&
607 		    (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) == 0) {
608 			/* XXX Raise the signal on the thread? */
609 			psignal(vfs_context_proc(ctx), SIGPIPE);
610 		}
611 		break;
612 
613 	default:
614 		break;
615 	}
616 
617 	if ((*retval = count - uio_resid(uio))) {
618 		os_atomic_or(&fp->fp_glob->fg_flag, FWASWRITTEN, relaxed);
619 	}
620 
621 	return error;
622 }
623 
624 /*
625  * Returns:	0			Success
626  *		EINVAL
627  *	<fo_write>:EPIPE
628  *	<fo_write>:???			[indirect through struct fileops]
629  */
630 __private_extern__ int
dofilewrite(vfs_context_t ctx,struct fileproc * fp,user_addr_t bufp,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)631 dofilewrite(vfs_context_t ctx, struct fileproc *fp,
632     user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
633     user_ssize_t *retval)
634 {
635 	UIO_STACKBUF(uio_buf, 1);
636 	uio_t uio;
637 	int spacetype;
638 
639 	if (nbyte > INT_MAX) {
640 		*retval = 0;
641 		return EINVAL;
642 	}
643 
644 	spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
645 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_WRITE, &uio_buf[0],
646 	    sizeof(uio_buf));
647 
648 	if (uio_addiov(uio, bufp, nbyte) != 0) {
649 		*retval = 0;
650 		return EINVAL;
651 	}
652 
653 	return fp_writev(ctx, fp, uio, flags, retval);
654 }
655 
656 static int
writev_internal(struct proc * p,int fd,uio_t uio,int flags,guardid_t * puguard,user_ssize_t * retval)657 writev_internal(struct proc *p, int fd, uio_t uio, int flags,
658     guardid_t *puguard, user_ssize_t *retval)
659 {
660 	struct fileproc *fp = NULL;
661 	struct vfs_context context;
662 	int error;
663 
664 	if ((error = preparefilewrite(p, &fp, fd, flags & FOF_OFFSET, puguard))) {
665 		*retval = 0;
666 		return error;
667 	}
668 
669 	context = *(vfs_context_current());
670 	context.vc_ucred = fp->fp_glob->fg_cred;
671 
672 	error = fp_writev(&context, fp, uio, flags, retval);
673 
674 	fp_drop(p, fd, fp, 0);
675 	return error;
676 }
677 
678 int
write_internal(struct proc * p,int fd,user_addr_t buf,user_size_t nbyte,off_t offset,int flags,guardid_t * puguard,user_ssize_t * retval)679 write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
680     off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval)
681 {
682 	UIO_STACKBUF(uio_buf, 1);
683 	uio_t uio;
684 	int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
685 
686 	if (nbyte > INT_MAX) {
687 		*retval = 0;
688 		return EINVAL;
689 	}
690 
691 	uio = uio_createwithbuffer(1, offset, spacetype, UIO_WRITE,
692 	    &uio_buf[0], sizeof(uio_buf));
693 
694 	if (uio_addiov(uio, buf, nbyte) != 0) {
695 		*retval = 0;
696 		return EINVAL;
697 	}
698 
699 	return writev_internal(p, fd, uio, flags, puguard, retval);
700 }
701 
702 int
write_nocancel(struct proc * p,struct write_nocancel_args * uap,user_ssize_t * retval)703 write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *retval)
704 {
705 	return write_internal(p, uap->fd, uap->cbuf, uap->nbyte, (off_t)-1, 0,
706 	           NULL, retval);
707 }
708 
709 /*
710  * Write system call
711  *
712  * Returns:	0			Success
713  *		EBADF
714  *	fp_lookup:EBADF
715  *	dofilewrite:???
716  */
717 int
write(struct proc * p,struct write_args * uap,user_ssize_t * retval)718 write(struct proc *p, struct write_args *uap, user_ssize_t *retval)
719 {
720 	__pthread_testcancel(1);
721 	return write_nocancel(p, (struct write_nocancel_args *)uap, retval);
722 }
723 
724 int
pwrite_nocancel(struct proc * p,struct pwrite_nocancel_args * uap,user_ssize_t * retval)725 pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *retval)
726 {
727 	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE),
728 	    uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
729 
730 	/* XXX: Should be < 0 instead? (See man page + pwritev) */
731 	if (uap->offset == (off_t)-1) {
732 		return EINVAL;
733 	}
734 
735 	return write_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset,
736 	           FOF_OFFSET, NULL, retval);
737 }
738 
739 /*
740  * pwrite system call
741  *
742  * Returns:	0			Success
743  *		EBADF
744  *		ESPIPE
745  *		ENXIO
746  *		EINVAL
747  *	fp_lookup:EBADF
748  *	dofilewrite:???
749  */
750 int
pwrite(struct proc * p,struct pwrite_args * uap,user_ssize_t * retval)751 pwrite(struct proc *p, struct pwrite_args *uap, user_ssize_t *retval)
752 {
753 	__pthread_testcancel(1);
754 	return pwrite_nocancel(p, (struct pwrite_nocancel_args *)uap, retval);
755 }
756 
757 int
writev_uio(struct proc * p,int fd,user_addr_t user_iovp,int iovcnt,off_t offset,int flags,guardid_t * puguard,user_ssize_t * retval)758 writev_uio(struct proc *p, int fd,
759     user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
760     guardid_t *puguard, user_ssize_t *retval)
761 {
762 	uio_t uio = NULL;
763 	int error;
764 	struct user_iovec *iovp;
765 
766 	if (iovcnt <= 0 || iovcnt > UIO_MAXIOV || offset < 0) {
767 		error = EINVAL;
768 		goto out;
769 	}
770 
771 	uio = uio_create(iovcnt, offset,
772 	    (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
773 	    UIO_WRITE);
774 
775 	iovp = uio_iovsaddr_user(uio);
776 	if (iovp == NULL) {
777 		error = ENOMEM;
778 		goto out;
779 	}
780 
781 	error = copyin_user_iovec_array(user_iovp,
782 	    IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
783 	    iovcnt, iovp);
784 
785 	if (error) {
786 		goto out;
787 	}
788 
789 	error = writev_internal(p, fd, uio, flags, puguard, retval);
790 
791 out:
792 	if (uio != NULL) {
793 		uio_free(uio);
794 	}
795 
796 	return error;
797 }
798 
799 int
writev_nocancel(struct proc * p,struct writev_nocancel_args * uap,user_ssize_t * retval)800 writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval)
801 {
802 	return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, NULL, retval);
803 }
804 
805 /*
806  * Gather write system call
807  */
808 int
writev(struct proc * p,struct writev_args * uap,user_ssize_t * retval)809 writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval)
810 {
811 	__pthread_testcancel(1);
812 	return writev_nocancel(p, (struct writev_nocancel_args *)uap, retval);
813 }
814 
815 int
sys_pwritev_nocancel(struct proc * p,struct pwritev_nocancel_args * uap,user_ssize_t * retval)816 sys_pwritev_nocancel(struct proc *p, struct pwritev_nocancel_args *uap, user_ssize_t *retval)
817 {
818 	return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset,
819 	           FOF_OFFSET, NULL, retval);
820 }
821 
822 /*
823  * Pwritev system call
824  */
825 int
sys_pwritev(struct proc * p,struct pwritev_args * uap,user_ssize_t * retval)826 sys_pwritev(struct proc *p, struct pwritev_args *uap, user_ssize_t *retval)
827 {
828 	__pthread_testcancel(1);
829 	return sys_pwritev_nocancel(p, (struct pwritev_nocancel_args *)uap, retval);
830 }
831 
832 /*
833  * Ioctl system call
834  *
835  * Returns:	0			Success
836  *		EBADF
837  *		ENOTTY
838  *		ENOMEM
839  *		ESRCH
840  *	copyin:EFAULT
841  *	copyoutEFAULT
842  *	fp_lookup:EBADF			Bad file descriptor
843  *	fo_ioctl:???
844  */
845 int
ioctl(struct proc * p,struct ioctl_args * uap,__unused int32_t * retval)846 ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval)
847 {
848 	struct fileproc *fp = NULL;
849 	int error = 0;
850 	u_int size = 0;
851 	caddr_t datap = NULL, memp = NULL;
852 	boolean_t is64bit = FALSE;
853 	int tmp = 0;
854 #define STK_PARAMS      128
855 	char stkbuf[STK_PARAMS] = {};
856 	int fd = uap->fd;
857 	u_long com = uap->com;
858 	struct vfs_context context = *vfs_context_current();
859 
860 	AUDIT_ARG(fd, uap->fd);
861 	AUDIT_ARG(addr, uap->data);
862 
863 	is64bit = proc_is64bit(p);
864 #if CONFIG_AUDIT
865 	if (is64bit) {
866 		AUDIT_ARG(value64, com);
867 	} else {
868 		AUDIT_ARG(cmd, CAST_DOWN_EXPLICIT(int, com));
869 	}
870 #endif /* CONFIG_AUDIT */
871 
872 	/*
873 	 * Interpret high order word to find amount of data to be
874 	 * copied to/from the user's address space.
875 	 */
876 	size = IOCPARM_LEN(com);
877 	if (size > IOCPARM_MAX) {
878 		return ENOTTY;
879 	}
880 	if (size > sizeof(stkbuf)) {
881 		memp = (caddr_t)kalloc_data(size, Z_WAITOK);
882 		if (memp == 0) {
883 			return ENOMEM;
884 		}
885 		datap = memp;
886 	} else {
887 		datap = &stkbuf[0];
888 	}
889 	if (com & IOC_IN) {
890 		if (size) {
891 			error = copyin(uap->data, datap, size);
892 			if (error) {
893 				goto out_nofp;
894 			}
895 		} else {
896 			/* XXX - IOC_IN and no size?  we should proably return an error here!! */
897 			if (is64bit) {
898 				*(user_addr_t *)datap = uap->data;
899 			} else {
900 				*(uint32_t *)datap = (uint32_t)uap->data;
901 			}
902 		}
903 	} else if ((com & IOC_OUT) && size) {
904 		/*
905 		 * Zero the buffer so the user always
906 		 * gets back something deterministic.
907 		 */
908 		bzero(datap, size);
909 	} else if (com & IOC_VOID) {
910 		/* XXX - this is odd since IOC_VOID means no parameters */
911 		if (is64bit) {
912 			*(user_addr_t *)datap = uap->data;
913 		} else {
914 			*(uint32_t *)datap = (uint32_t)uap->data;
915 		}
916 	}
917 
918 	proc_fdlock(p);
919 	error = fp_lookup(p, fd, &fp, 1);
920 	if (error) {
921 		proc_fdunlock(p);
922 		goto out_nofp;
923 	}
924 
925 	AUDIT_ARG(file, p, fp);
926 
927 	if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
928 		error = EBADF;
929 		goto out;
930 	}
931 
932 	context.vc_ucred = fp->fp_glob->fg_cred;
933 
934 #if CONFIG_MACF
935 	error = mac_file_check_ioctl(context.vc_ucred, fp->fp_glob, com);
936 	if (error) {
937 		goto out;
938 	}
939 #endif
940 
941 	switch (com) {
942 	case FIONCLEX:
943 		fp->fp_flags &= ~FP_CLOEXEC;
944 		break;
945 
946 	case FIOCLEX:
947 		fp->fp_flags |= FP_CLOEXEC;
948 		break;
949 
950 	case FIONBIO:
951 		// FIXME (rdar://54898652)
952 		//
953 		// this code is broken if fnctl(F_SETFL), ioctl() are
954 		// called concurrently for the same fileglob.
955 		if ((tmp = *(int *)datap)) {
956 			os_atomic_or(&fp->f_flag, FNONBLOCK, relaxed);
957 		} else {
958 			os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed);
959 		}
960 		error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
961 		break;
962 
963 	case FIOASYNC:
964 		// FIXME (rdar://54898652)
965 		//
966 		// this code is broken if fnctl(F_SETFL), ioctl() are
967 		// called concurrently for the same fileglob.
968 		if ((tmp = *(int *)datap)) {
969 			os_atomic_or(&fp->f_flag, FASYNC, relaxed);
970 		} else {
971 			os_atomic_andnot(&fp->f_flag, FASYNC, relaxed);
972 		}
973 		error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
974 		break;
975 
976 	case FIOSETOWN:
977 		tmp = *(int *)datap;
978 		if (fp->f_type == DTYPE_SOCKET) {
979 			((struct socket *)fp_get_data(fp))->so_pgid = tmp;
980 			break;
981 		}
982 		if (fp->f_type == DTYPE_PIPE) {
983 			error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
984 			break;
985 		}
986 		if (tmp <= 0) {
987 			tmp = -tmp;
988 		} else {
989 			struct proc *p1 = proc_find(tmp);
990 			if (p1 == 0) {
991 				error = ESRCH;
992 				break;
993 			}
994 			tmp = p1->p_pgrpid;
995 			proc_rele(p1);
996 		}
997 		error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
998 		break;
999 
1000 	case FIOGETOWN:
1001 		if (fp->f_type == DTYPE_SOCKET) {
1002 			*(int *)datap = ((struct socket *)fp_get_data(fp))->so_pgid;
1003 			break;
1004 		}
1005 		error = fo_ioctl(fp, TIOCGPGRP, datap, &context);
1006 		*(int *)datap = -*(int *)datap;
1007 		break;
1008 
1009 	default:
1010 		error = fo_ioctl(fp, com, datap, &context);
1011 		/*
1012 		 * Copy any data to user, size was
1013 		 * already set and checked above.
1014 		 */
1015 		if (error == 0 && (com & IOC_OUT) && size) {
1016 			error = copyout(datap, uap->data, (u_int)size);
1017 		}
1018 		break;
1019 	}
1020 out:
1021 	fp_drop(p, fd, fp, 1);
1022 	proc_fdunlock(p);
1023 
1024 out_nofp:
1025 	if (memp) {
1026 		kfree_data(memp, size);
1027 	}
1028 	return error;
1029 }
1030 
1031 int     selwait;
1032 #define SEL_FIRSTPASS 1
1033 #define SEL_SECONDPASS 2
1034 static int selprocess(struct proc *p, int error, int sel_pass);
1035 static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata,
1036     int nfd, int32_t *retval, int sel_pass, struct select_set *selset);
1037 static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count);
1038 static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup);
1039 static int seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim);
1040 static int select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval);
1041 
1042 /*
1043  * This is used for the special device nodes that do not implement
1044  * a proper kevent filter (see filt_specattach).
1045  *
1046  * In order to enable kevents on those, the spec_filtops will pretend
1047  * to call select, and try to sniff the selrecord(), if it observes one,
1048  * the knote is attached, which pairs with selwakeup() or selthreadclear().
1049  *
1050  * The last issue remaining, is that we need to serialize filt_specdetach()
1051  * with this, but it really can't know the "selinfo" or any locking domain.
1052  * To make up for this, We protect knote list operations with a global lock,
1053  * which give us a safe shared locking domain.
1054  *
1055  * Note: It is a little distasteful, but we really have very few of those.
1056  *       The big problem here is that sharing a lock domain without
1057  *       any kind of shared knowledge is a little complicated.
1058  *
1059  *       1. filters can really implement their own kqueue integration
1060  *          to side step this,
1061  *
1062  *       2. There's an opportunity to pick a private lock in selspec_attach()
1063  *          because both the selinfo and the knote are locked at that time.
1064  *          The cleanup story is however a little complicated.
1065  */
1066 static LCK_GRP_DECLARE(selspec_grp, "spec_filtops");
1067 static LCK_SPIN_DECLARE(selspec_lock, &selspec_grp);
1068 
1069 /*
1070  * The "primitive" lock is held.
1071  * The knote lock is held.
1072  */
1073 void
selspec_attach(struct knote * kn,struct selinfo * si)1074 selspec_attach(struct knote *kn, struct selinfo *si)
1075 {
1076 	struct selinfo *cur = knote_kn_hook_get_raw(kn);
1077 
1078 	if (cur == NULL) {
1079 		si->si_flags |= SI_SELSPEC;
1080 		lck_spin_lock(&selspec_lock);
1081 		knote_kn_hook_set_raw(kn, (void *) si);
1082 		KNOTE_ATTACH(&si->si_note, kn);
1083 		lck_spin_unlock(&selspec_lock);
1084 	} else {
1085 		/*
1086 		 * selspec_attach() can be called from e.g. filt_spectouch()
1087 		 * which might be called before any event was dequeued.
1088 		 *
1089 		 * It is hence not impossible for the knote already be hooked.
1090 		 *
1091 		 * Note that selwakeup_internal() could possibly
1092 		 * already have cleared this pointer. This is a race
1093 		 * that filt_specprocess will debounce.
1094 		 */
1095 		assert(si->si_flags & SI_SELSPEC);
1096 		assert(cur == si);
1097 	}
1098 }
1099 
1100 /*
1101  * The "primitive" lock is _not_ held.
1102  *
1103  * knote "lock" is held
1104  */
1105 void
selspec_detach(struct knote * kn)1106 selspec_detach(struct knote *kn)
1107 {
1108 	lck_spin_lock(&selspec_lock);
1109 
1110 	if (!KNOTE_IS_AUTODETACHED(kn)) {
1111 		struct selinfo *sip = knote_kn_hook_get_raw(kn);
1112 		if (sip) {
1113 			KNOTE_DETACH(&sip->si_note, kn);
1114 		}
1115 	}
1116 
1117 	knote_kn_hook_set_raw(kn, NULL);
1118 
1119 	lck_spin_unlock(&selspec_lock);
1120 }
1121 
1122 /*
1123  * Select system call.
1124  *
1125  * Returns:	0			Success
1126  *		EINVAL			Invalid argument
1127  *		EAGAIN			Nonconformant error if allocation fails
1128  */
1129 int
select(struct proc * p,struct select_args * uap,int32_t * retval)1130 select(struct proc *p, struct select_args *uap, int32_t *retval)
1131 {
1132 	__pthread_testcancel(1);
1133 	return select_nocancel(p, (struct select_nocancel_args *)uap, retval);
1134 }
1135 
1136 int
select_nocancel(struct proc * p,struct select_nocancel_args * uap,int32_t * retval)1137 select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retval)
1138 {
1139 	uint64_t timeout = 0;
1140 
1141 	if (uap->tv) {
1142 		int err;
1143 		struct timeval atv;
1144 		if (IS_64BIT_PROCESS(p)) {
1145 			struct user64_timeval atv64;
1146 			err = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
1147 			/* Loses resolution - assume timeout < 68 years */
1148 			atv.tv_sec = (__darwin_time_t)atv64.tv_sec;
1149 			atv.tv_usec = atv64.tv_usec;
1150 		} else {
1151 			struct user32_timeval atv32;
1152 			err = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
1153 			atv.tv_sec = atv32.tv_sec;
1154 			atv.tv_usec = atv32.tv_usec;
1155 		}
1156 		if (err) {
1157 			return err;
1158 		}
1159 
1160 		if (itimerfix(&atv)) {
1161 			err = EINVAL;
1162 			return err;
1163 		}
1164 
1165 		clock_absolutetime_interval_to_deadline(tvtoabstime(&atv), &timeout);
1166 	}
1167 
1168 	return select_internal(p, uap, timeout, retval);
1169 }
1170 
1171 int
pselect(struct proc * p,struct pselect_args * uap,int32_t * retval)1172 pselect(struct proc *p, struct pselect_args *uap, int32_t *retval)
1173 {
1174 	__pthread_testcancel(1);
1175 	return pselect_nocancel(p, (struct pselect_nocancel_args *)uap, retval);
1176 }
1177 
1178 int
pselect_nocancel(struct proc * p,struct pselect_nocancel_args * uap,int32_t * retval)1179 pselect_nocancel(struct proc *p, struct pselect_nocancel_args *uap, int32_t *retval)
1180 {
1181 	int err;
1182 	struct uthread *ut;
1183 	uint64_t timeout = 0;
1184 
1185 	if (uap->ts) {
1186 		struct timespec ts;
1187 
1188 		if (IS_64BIT_PROCESS(p)) {
1189 			struct user64_timespec ts64;
1190 			err = copyin(uap->ts, (caddr_t)&ts64, sizeof(ts64));
1191 			ts.tv_sec = (__darwin_time_t)ts64.tv_sec;
1192 			ts.tv_nsec = (long)ts64.tv_nsec;
1193 		} else {
1194 			struct user32_timespec ts32;
1195 			err = copyin(uap->ts, (caddr_t)&ts32, sizeof(ts32));
1196 			ts.tv_sec = ts32.tv_sec;
1197 			ts.tv_nsec = ts32.tv_nsec;
1198 		}
1199 		if (err) {
1200 			return err;
1201 		}
1202 
1203 		if (!timespec_is_valid(&ts)) {
1204 			return EINVAL;
1205 		}
1206 		clock_absolutetime_interval_to_deadline(tstoabstime(&ts), &timeout);
1207 	}
1208 
1209 	ut = current_uthread();
1210 
1211 	if (uap->mask != USER_ADDR_NULL) {
1212 		/* save current mask, then copyin and set new mask */
1213 		sigset_t newset;
1214 		err = copyin(uap->mask, &newset, sizeof(sigset_t));
1215 		if (err) {
1216 			return err;
1217 		}
1218 		ut->uu_oldmask = ut->uu_sigmask;
1219 		ut->uu_flag |= UT_SAS_OLDMASK;
1220 		ut->uu_sigmask = (newset & ~sigcantmask);
1221 	}
1222 
1223 	err = select_internal(p, (struct select_nocancel_args *)uap, timeout, retval);
1224 
1225 	if (err != EINTR && ut->uu_flag & UT_SAS_OLDMASK) {
1226 		/*
1227 		 * Restore old mask (direct return case). NOTE: EINTR can also be returned
1228 		 * if the thread is cancelled. In that case, we don't reset the signal
1229 		 * mask to its original value (which usually happens in the signal
1230 		 * delivery path). This behavior is permitted by POSIX.
1231 		 */
1232 		ut->uu_sigmask = ut->uu_oldmask;
1233 		ut->uu_oldmask = 0;
1234 		ut->uu_flag &= ~UT_SAS_OLDMASK;
1235 	}
1236 
1237 	return err;
1238 }
1239 
1240 void
select_cleanup_uthread(struct _select * sel)1241 select_cleanup_uthread(struct _select *sel)
1242 {
1243 	kfree_data(sel->ibits, 2 * sel->nbytes);
1244 	sel->ibits = sel->obits = NULL;
1245 	sel->nbytes = 0;
1246 }
1247 
1248 static int
select_grow_uthread_cache(struct _select * sel,uint32_t nbytes)1249 select_grow_uthread_cache(struct _select *sel, uint32_t nbytes)
1250 {
1251 	uint32_t *buf;
1252 
1253 	buf = kalloc_data(2 * nbytes, Z_WAITOK | Z_ZERO);
1254 	if (buf) {
1255 		select_cleanup_uthread(sel);
1256 		sel->ibits = buf;
1257 		sel->obits = buf + nbytes / sizeof(uint32_t);
1258 		sel->nbytes = nbytes;
1259 		return true;
1260 	}
1261 	return false;
1262 }
1263 
1264 static void
select_bzero_uthread_cache(struct _select * sel)1265 select_bzero_uthread_cache(struct _select *sel)
1266 {
1267 	bzero(sel->ibits, sel->nbytes * 2);
1268 }
1269 
1270 /*
1271  * Generic implementation of {,p}select. Care: we type-pun uap across the two
1272  * syscalls, which differ slightly. The first 4 arguments (nfds and the fd sets)
1273  * are identical. The 5th (timeout) argument points to different types, so we
1274  * unpack in the syscall-specific code, but the generic code still does a null
1275  * check on this argument to determine if a timeout was specified.
1276  */
1277 static int
select_internal(struct proc * p,struct select_nocancel_args * uap,uint64_t timeout,int32_t * retval)1278 select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval)
1279 {
1280 	struct uthread *uth = current_uthread();
1281 	struct _select *sel = &uth->uu_select;
1282 	struct _select_data *seldata = &uth->uu_save.uus_select_data;
1283 	int error = 0;
1284 	u_int ni, nw;
1285 
1286 	*retval = 0;
1287 
1288 	seldata->abstime = timeout;
1289 	seldata->args = uap;
1290 	seldata->retval = retval;
1291 	seldata->count = 0;
1292 
1293 	if (uap->nd < 0) {
1294 		return EINVAL;
1295 	}
1296 
1297 	if (uap->nd > p->p_fd.fd_nfiles) {
1298 		uap->nd = p->p_fd.fd_nfiles; /* forgiving; slightly wrong */
1299 	}
1300 	nw = howmany(uap->nd, NFDBITS);
1301 	ni = nw * sizeof(fd_mask);
1302 
1303 	/*
1304 	 * if the previously allocated space for the bits is smaller than
1305 	 * what is requested or no space has yet been allocated for this
1306 	 * thread, allocate enough space now.
1307 	 *
1308 	 * Note: If this process fails, select() will return EAGAIN; this
1309 	 * is the same thing pool() returns in a no-memory situation, but
1310 	 * it is not a POSIX compliant error code for select().
1311 	 */
1312 	if (sel->nbytes >= (3 * ni)) {
1313 		select_bzero_uthread_cache(sel);
1314 	} else if (!select_grow_uthread_cache(sel, 3 * ni)) {
1315 		return EAGAIN;
1316 	}
1317 
1318 	/*
1319 	 * get the bits from the user address space
1320 	 */
1321 #define getbits(name, x) \
1322 	(uap->name ? copyin(uap->name, &sel->ibits[(x) * nw], ni) : 0)
1323 
1324 	if ((error = getbits(in, 0))) {
1325 		return error;
1326 	}
1327 	if ((error = getbits(ou, 1))) {
1328 		return error;
1329 	}
1330 	if ((error = getbits(ex, 2))) {
1331 		return error;
1332 	}
1333 #undef  getbits
1334 
1335 	if ((error = selcount(p, sel->ibits, uap->nd, &seldata->count))) {
1336 		return error;
1337 	}
1338 
1339 	if (uth->uu_selset == NULL) {
1340 		uth->uu_selset = select_set_alloc();
1341 	}
1342 	return selprocess(p, 0, SEL_FIRSTPASS);
1343 }
1344 
1345 static int
selcontinue(int error)1346 selcontinue(int error)
1347 {
1348 	return selprocess(current_proc(), error, SEL_SECONDPASS);
1349 }
1350 
1351 
1352 /*
1353  * selprocess
1354  *
1355  * Parameters:	error			The error code from our caller
1356  *		sel_pass		The pass we are on
1357  */
1358 int
selprocess(struct proc * p,int error,int sel_pass)1359 selprocess(struct proc *p, int error, int sel_pass)
1360 {
1361 	struct uthread *uth = current_uthread();
1362 	struct _select *sel = &uth->uu_select;
1363 	struct _select_data *seldata = &uth->uu_save.uus_select_data;
1364 	struct select_nocancel_args *uap = seldata->args;
1365 	int *retval = seldata->retval;
1366 
1367 	int unwind = 1;
1368 	int prepost = 0;
1369 	int somewakeup = 0;
1370 	int doretry = 0;
1371 	wait_result_t wait_result;
1372 
1373 	if ((error != 0) && (sel_pass == SEL_FIRSTPASS)) {
1374 		unwind = 0;
1375 	}
1376 	if (seldata->count == 0) {
1377 		unwind = 0;
1378 	}
1379 retry:
1380 	if (error != 0) {
1381 		goto done;
1382 	}
1383 
1384 	OSBitOrAtomic(P_SELECT, &p->p_flag);
1385 
1386 	/* skip scans if the select is just for timeouts */
1387 	if (seldata->count) {
1388 		error = selscan(p, sel, seldata, uap->nd, retval, sel_pass,
1389 		    uth->uu_selset);
1390 		if (error || *retval) {
1391 			goto done;
1392 		}
1393 		if (prepost || somewakeup) {
1394 			/*
1395 			 * if the select of log, then we can wakeup and
1396 			 * discover some one else already read the data;
1397 			 * go to select again if time permits
1398 			 */
1399 			prepost = 0;
1400 			somewakeup = 0;
1401 			doretry = 1;
1402 		}
1403 	}
1404 
1405 	if (uap->tv) {
1406 		uint64_t        now;
1407 
1408 		clock_get_uptime(&now);
1409 		if (now >= seldata->abstime) {
1410 			goto done;
1411 		}
1412 	}
1413 
1414 	if (doretry) {
1415 		/* cleanup obits and try again */
1416 		doretry = 0;
1417 		sel_pass = SEL_FIRSTPASS;
1418 		goto retry;
1419 	}
1420 
1421 	/*
1422 	 * To effect a poll, the timeout argument should be
1423 	 * non-nil, pointing to a zero-valued timeval structure.
1424 	 */
1425 	if (uap->tv && seldata->abstime == 0) {
1426 		goto done;
1427 	}
1428 
1429 	/* No spurious wakeups due to colls,no need to check for them */
1430 	if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) {
1431 		sel_pass = SEL_FIRSTPASS;
1432 		goto retry;
1433 	}
1434 
1435 	OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1436 
1437 	/* if the select is just for timeout skip check */
1438 	if (seldata->count && (sel_pass == SEL_SECONDPASS)) {
1439 		panic("selprocess: 2nd pass assertwaiting");
1440 	}
1441 
1442 	wait_result = waitq_assert_wait64_leeway(uth->uu_selset,
1443 	    NO_EVENT64, THREAD_ABORTSAFE,
1444 	    TIMEOUT_URGENCY_USER_NORMAL,
1445 	    seldata->abstime,
1446 	    TIMEOUT_NO_LEEWAY);
1447 	if (wait_result != THREAD_AWAKENED) {
1448 		/* there are no preposted events */
1449 		error = tsleep1(NULL, PSOCK | PCATCH,
1450 		    "select", 0, selcontinue);
1451 	} else {
1452 		prepost = 1;
1453 		error = 0;
1454 	}
1455 
1456 	if (error == 0) {
1457 		sel_pass = SEL_SECONDPASS;
1458 		if (!prepost) {
1459 			somewakeup = 1;
1460 		}
1461 		goto retry;
1462 	}
1463 done:
1464 	if (unwind) {
1465 		seldrop(p, sel->ibits, uap->nd, seldata->count);
1466 		select_set_reset(uth->uu_selset);
1467 	}
1468 	OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1469 	/* select is not restarted after signals... */
1470 	if (error == ERESTART) {
1471 		error = EINTR;
1472 	}
1473 	if (error == EWOULDBLOCK) {
1474 		error = 0;
1475 	}
1476 
1477 	if (error == 0) {
1478 		uint32_t nw = howmany(uap->nd, NFDBITS);
1479 		uint32_t ni = nw * sizeof(fd_mask);
1480 
1481 #define putbits(name, x) \
1482 	(uap->name ? copyout(&sel->obits[(x) * nw], uap->name, ni) : 0)
1483 		int e0 = putbits(in, 0);
1484 		int e1 = putbits(ou, 1);
1485 		int e2 = putbits(ex, 2);
1486 
1487 		error = e0 ?: e1 ?: e2;
1488 #undef putbits
1489 	}
1490 
1491 	if (error != EINTR && sel_pass == SEL_SECONDPASS && uth->uu_flag & UT_SAS_OLDMASK) {
1492 		/* restore signal mask - continuation case */
1493 		uth->uu_sigmask = uth->uu_oldmask;
1494 		uth->uu_oldmask = 0;
1495 		uth->uu_flag &= ~UT_SAS_OLDMASK;
1496 	}
1497 
1498 	return error;
1499 }
1500 
1501 
1502 /**
1503  * remove the fileproc's underlying waitq from the supplied waitq set;
1504  * clear FP_INSELECT when appropriate
1505  *
1506  * Parameters:
1507  *		fp	File proc that is potentially currently in select
1508  *		selset	Waitq set to which the fileproc may belong
1509  *			(usually this is the thread's private waitq set)
1510  * Conditions:
1511  *		proc_fdlock is held
1512  */
1513 static void
selunlinkfp(struct fileproc * fp,struct select_set * selset)1514 selunlinkfp(struct fileproc *fp, struct select_set *selset)
1515 {
1516 	if (fp->fp_flags & FP_INSELECT) {
1517 		if (fp->fp_guard_attrs) {
1518 			if (fp->fp_guard->fpg_wset == selset) {
1519 				fp->fp_guard->fpg_wset = NULL;
1520 				fp->fp_flags &= ~FP_INSELECT;
1521 			}
1522 		} else {
1523 			if (fp->fp_wset == selset) {
1524 				fp->fp_wset = NULL;
1525 				fp->fp_flags &= ~FP_INSELECT;
1526 			}
1527 		}
1528 	}
1529 }
1530 
1531 /**
1532  * connect a fileproc to the given selset, potentially bridging to a waitq
1533  * pointed to indirectly by wq_data
1534  *
1535  * Parameters:
1536  *		fp	File proc potentially currently in select
1537  *		selset	Waitq set to which the fileproc should now belong
1538  *			(usually this is the thread's private waitq set)
1539  *
1540  * Conditions:
1541  *		proc_fdlock is held
1542  */
1543 static void
sellinkfp(struct fileproc * fp,struct select_set * selset,waitq_link_t * linkp)1544 sellinkfp(struct fileproc *fp, struct select_set *selset, waitq_link_t *linkp)
1545 {
1546 	if ((fp->fp_flags & FP_INSELECT) == 0) {
1547 		if (fp->fp_guard_attrs) {
1548 			fp->fp_guard->fpg_wset = selset;
1549 		} else {
1550 			fp->fp_wset = selset;
1551 		}
1552 		fp->fp_flags |= FP_INSELECT;
1553 	} else {
1554 		fp->fp_flags |= FP_SELCONFLICT;
1555 		if (linkp->wqlh == NULL) {
1556 			*linkp = waitq_link_alloc(WQT_SELECT_SET);
1557 		}
1558 		select_set_link(&select_conflict_queue, selset, linkp);
1559 	}
1560 }
1561 
1562 
1563 /*
1564  * selscan
1565  *
1566  * Parameters:	p			Process performing the select
1567  *		sel			The per-thread select context structure
1568  *		nfd			The number of file descriptors to scan
1569  *		retval			The per thread system call return area
1570  *		sel_pass		Which pass this is; allowed values are
1571  *						SEL_FIRSTPASS and SEL_SECONDPASS
1572  *		selset			The per thread wait queue set
1573  *
1574  * Returns:	0			Success
1575  *		EIO			Invalid p->p_fd field XXX Obsolete?
1576  *		EBADF			One of the files in the bit vector is
1577  *						invalid.
1578  */
1579 static int
selscan(struct proc * p,struct _select * sel,struct _select_data * seldata,int nfd,int32_t * retval,int sel_pass,struct select_set * selset)1580 selscan(struct proc *p, struct _select *sel, struct _select_data * seldata,
1581     int nfd, int32_t *retval, int sel_pass, struct select_set *selset)
1582 {
1583 	int msk, i, j, fd;
1584 	u_int32_t bits;
1585 	struct fileproc *fp;
1586 	int n = 0;              /* count of bits */
1587 	int nc = 0;             /* bit vector offset (nc'th bit) */
1588 	static int flag[3] = { FREAD, FWRITE, 0 };
1589 	u_int32_t *iptr, *optr;
1590 	u_int nw;
1591 	u_int32_t *ibits, *obits;
1592 	int count;
1593 	struct vfs_context context = {
1594 		.vc_thread = current_thread(),
1595 	};
1596 	waitq_link_t link = WQL_NULL;
1597 	void *s_data;
1598 
1599 	ibits = sel->ibits;
1600 	obits = sel->obits;
1601 
1602 	nw = howmany(nfd, NFDBITS);
1603 
1604 	count = seldata->count;
1605 
1606 	nc = 0;
1607 	if (!count) {
1608 		*retval = 0;
1609 		return 0;
1610 	}
1611 
1612 	if (sel_pass == SEL_FIRSTPASS) {
1613 		/*
1614 		 * Make sure the waitq-set is all clean:
1615 		 *
1616 		 * select loops until it finds at least one event, however it
1617 		 * doesn't mean that the event that woke up select is still
1618 		 * fired by the time the second pass runs, and then
1619 		 * select_internal will loop back to a first pass.
1620 		 */
1621 		select_set_reset(selset);
1622 		s_data = &link;
1623 	} else {
1624 		s_data = NULL;
1625 	}
1626 
1627 	proc_fdlock(p);
1628 	for (msk = 0; msk < 3; msk++) {
1629 		iptr = (u_int32_t *)&ibits[msk * nw];
1630 		optr = (u_int32_t *)&obits[msk * nw];
1631 
1632 		for (i = 0; i < nfd; i += NFDBITS) {
1633 			bits = iptr[i / NFDBITS];
1634 
1635 			while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1636 				bits &= ~(1U << j);
1637 
1638 				fp = fp_get_noref_locked(p, fd);
1639 				if (fp == NULL) {
1640 					/*
1641 					 * If we abort because of a bad
1642 					 * fd, let the caller unwind...
1643 					 */
1644 					proc_fdunlock(p);
1645 					return EBADF;
1646 				}
1647 				if (sel_pass == SEL_SECONDPASS) {
1648 					selunlinkfp(fp, selset);
1649 				} else if (link.wqlh == NULL) {
1650 					link = waitq_link_alloc(WQT_SELECT_SET);
1651 				}
1652 
1653 				context.vc_ucred = fp->f_cred;
1654 
1655 				/* The select; set the bit, if true */
1656 				if (fo_select(fp, flag[msk], s_data, &context)) {
1657 					optr[fd / NFDBITS] |= (1U << (fd % NFDBITS));
1658 					n++;
1659 				}
1660 				if (sel_pass == SEL_FIRSTPASS) {
1661 					/*
1662 					 * Hook up the thread's waitq set either to
1663 					 * the fileproc structure, or to the global
1664 					 * conflict queue: but only on the first
1665 					 * select pass.
1666 					 */
1667 					sellinkfp(fp, selset, &link);
1668 				}
1669 				nc++;
1670 			}
1671 		}
1672 	}
1673 	proc_fdunlock(p);
1674 
1675 	if (link.wqlh) {
1676 		waitq_link_free(WQT_SELECT_SET, link);
1677 	}
1678 
1679 	*retval = n;
1680 	return 0;
1681 }
1682 
1683 static int poll_callback(struct kevent_qos_s *, kevent_ctx_t);
1684 
1685 int
poll(struct proc * p,struct poll_args * uap,int32_t * retval)1686 poll(struct proc *p, struct poll_args *uap, int32_t *retval)
1687 {
1688 	__pthread_testcancel(1);
1689 	return poll_nocancel(p, (struct poll_nocancel_args *)uap, retval);
1690 }
1691 
1692 
1693 int
poll_nocancel(struct proc * p,struct poll_nocancel_args * uap,int32_t * retval)1694 poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval)
1695 {
1696 	struct pollfd *fds = NULL;
1697 	struct kqueue *kq = NULL;
1698 	int error = 0;
1699 	u_int nfds = uap->nfds;
1700 	u_int rfds = 0;
1701 	rlim_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE);
1702 	size_t ni = nfds * sizeof(struct pollfd);
1703 
1704 	/*
1705 	 * This is kinda bogus.  We have fd limits, but that is not
1706 	 * really related to the size of the pollfd array.  Make sure
1707 	 * we let the process use at least FD_SETSIZE entries and at
1708 	 * least enough for the current limits.  We want to be reasonably
1709 	 * safe, but not overly restrictive.
1710 	 */
1711 	if (nfds > OPEN_MAX ||
1712 	    (nfds > nofile && (proc_suser(p) || nfds > FD_SETSIZE))) {
1713 		return EINVAL;
1714 	}
1715 
1716 	kq = kqueue_alloc(p);
1717 	if (kq == NULL) {
1718 		return EAGAIN;
1719 	}
1720 
1721 	if (nfds) {
1722 		fds = (struct pollfd *)kalloc_data(ni, Z_WAITOK);
1723 		if (NULL == fds) {
1724 			error = EAGAIN;
1725 			goto out;
1726 		}
1727 
1728 		error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd));
1729 		if (error) {
1730 			goto out;
1731 		}
1732 	}
1733 
1734 	/* JMM - all this P_SELECT stuff is bogus */
1735 	OSBitOrAtomic(P_SELECT, &p->p_flag);
1736 	for (u_int i = 0; i < nfds; i++) {
1737 		short events = fds[i].events;
1738 		__assert_only int rc;
1739 
1740 		/* per spec, ignore fd values below zero */
1741 		if (fds[i].fd < 0) {
1742 			fds[i].revents = 0;
1743 			continue;
1744 		}
1745 
1746 		/* convert the poll event into a kqueue kevent */
1747 		struct kevent_qos_s kev = {
1748 			.ident = fds[i].fd,
1749 			.flags = EV_ADD | EV_ONESHOT | EV_POLL,
1750 			.udata = i, /* Index into pollfd array */
1751 		};
1752 
1753 		/* Handle input events */
1754 		if (events & (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP)) {
1755 			kev.filter = EVFILT_READ;
1756 			if (events & (POLLPRI | POLLRDBAND)) {
1757 				kev.flags |= EV_OOBAND;
1758 			}
1759 			rc = kevent_register(kq, &kev, NULL);
1760 			assert((rc & FILTER_REGISTER_WAIT) == 0);
1761 		}
1762 
1763 		/* Handle output events */
1764 		if ((kev.flags & EV_ERROR) == 0 &&
1765 		    (events & (POLLOUT | POLLWRNORM | POLLWRBAND))) {
1766 			kev.filter = EVFILT_WRITE;
1767 			rc = kevent_register(kq, &kev, NULL);
1768 			assert((rc & FILTER_REGISTER_WAIT) == 0);
1769 		}
1770 
1771 		/* Handle BSD extension vnode events */
1772 		if ((kev.flags & EV_ERROR) == 0 &&
1773 		    (events & (POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE))) {
1774 			kev.filter = EVFILT_VNODE;
1775 			kev.fflags = 0;
1776 			if (events & POLLEXTEND) {
1777 				kev.fflags |= NOTE_EXTEND;
1778 			}
1779 			if (events & POLLATTRIB) {
1780 				kev.fflags |= NOTE_ATTRIB;
1781 			}
1782 			if (events & POLLNLINK) {
1783 				kev.fflags |= NOTE_LINK;
1784 			}
1785 			if (events & POLLWRITE) {
1786 				kev.fflags |= NOTE_WRITE;
1787 			}
1788 			rc = kevent_register(kq, &kev, NULL);
1789 			assert((rc & FILTER_REGISTER_WAIT) == 0);
1790 		}
1791 
1792 		if (kev.flags & EV_ERROR) {
1793 			fds[i].revents = POLLNVAL;
1794 			rfds++;
1795 		} else {
1796 			fds[i].revents = 0;
1797 		}
1798 	}
1799 
1800 	/*
1801 	 * Did we have any trouble registering?
1802 	 * If user space passed 0 FDs, then respect any timeout value passed.
1803 	 * This is an extremely inefficient sleep. If user space passed one or
1804 	 * more FDs, and we had trouble registering _all_ of them, then bail
1805 	 * out. If a subset of the provided FDs failed to register, then we
1806 	 * will still call the kqueue_scan function.
1807 	 */
1808 	if (nfds && (rfds == nfds)) {
1809 		goto done;
1810 	}
1811 
1812 	/* scan for, and possibly wait for, the kevents to trigger */
1813 	kevent_ctx_t kectx = kevent_get_context(current_thread());
1814 	*kectx = (struct kevent_ctx_s){
1815 		.kec_process_noutputs = rfds,
1816 		.kec_process_flags    = KEVENT_FLAG_POLL,
1817 		.kec_deadline         = 0, /* wait forever */
1818 		.kec_poll_fds         = fds,
1819 	};
1820 
1821 	/*
1822 	 * If any events have trouble registering, an event has fired and we
1823 	 * shouldn't wait for events in kqueue_scan.
1824 	 */
1825 	if (rfds) {
1826 		kectx->kec_process_flags |= KEVENT_FLAG_IMMEDIATE;
1827 	} else if (uap->timeout != -1) {
1828 		clock_interval_to_deadline(uap->timeout, NSEC_PER_MSEC,
1829 		    &kectx->kec_deadline);
1830 	}
1831 
1832 	error = kqueue_scan(kq, kectx->kec_process_flags, kectx, poll_callback);
1833 	rfds = kectx->kec_process_noutputs;
1834 
1835 done:
1836 	OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1837 	/* poll is not restarted after signals... */
1838 	if (error == ERESTART) {
1839 		error = EINTR;
1840 	}
1841 	if (error == 0) {
1842 		error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd));
1843 		*retval = rfds;
1844 	}
1845 
1846 out:
1847 	kfree_data(fds, ni);
1848 
1849 	kqueue_dealloc(kq);
1850 	return error;
1851 }
1852 
1853 static int
poll_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)1854 poll_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
1855 {
1856 	assert(kectx->kec_process_flags & KEVENT_FLAG_POLL);
1857 	struct pollfd *fds = &kectx->kec_poll_fds[kevp->udata];
1858 
1859 	short prev_revents = fds->revents;
1860 	short mask = 0;
1861 
1862 	/* convert the results back into revents */
1863 	if (kevp->flags & EV_EOF) {
1864 		fds->revents |= POLLHUP;
1865 	}
1866 	if (kevp->flags & EV_ERROR) {
1867 		fds->revents |= POLLERR;
1868 	}
1869 
1870 	switch (kevp->filter) {
1871 	case EVFILT_READ:
1872 		if (fds->revents & POLLHUP) {
1873 			mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND);
1874 		} else {
1875 			mask = (POLLIN | POLLRDNORM);
1876 			if (kevp->flags & EV_OOBAND) {
1877 				mask |= (POLLPRI | POLLRDBAND);
1878 			}
1879 		}
1880 		fds->revents |= (fds->events & mask);
1881 		break;
1882 
1883 	case EVFILT_WRITE:
1884 		if (!(fds->revents & POLLHUP)) {
1885 			fds->revents |= (fds->events & (POLLOUT | POLLWRNORM | POLLWRBAND));
1886 		}
1887 		break;
1888 
1889 	case EVFILT_VNODE:
1890 		if (kevp->fflags & NOTE_EXTEND) {
1891 			fds->revents |= (fds->events & POLLEXTEND);
1892 		}
1893 		if (kevp->fflags & NOTE_ATTRIB) {
1894 			fds->revents |= (fds->events & POLLATTRIB);
1895 		}
1896 		if (kevp->fflags & NOTE_LINK) {
1897 			fds->revents |= (fds->events & POLLNLINK);
1898 		}
1899 		if (kevp->fflags & NOTE_WRITE) {
1900 			fds->revents |= (fds->events & POLLWRITE);
1901 		}
1902 		break;
1903 	}
1904 
1905 	if (fds->revents != 0 && prev_revents == 0) {
1906 		kectx->kec_process_noutputs++;
1907 	}
1908 
1909 	return 0;
1910 }
1911 
1912 int
seltrue(__unused dev_t dev,__unused int flag,__unused struct proc * p)1913 seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p)
1914 {
1915 	return 1;
1916 }
1917 
1918 /*
1919  * selcount
1920  *
1921  * Count the number of bits set in the input bit vector, and establish an
1922  * outstanding fp->fp_iocount for each of the descriptors which will be in
1923  * use in the select operation.
1924  *
1925  * Parameters:	p			The process doing the select
1926  *		ibits			The input bit vector
1927  *		nfd			The number of fd's in the vector
1928  *		countp			Pointer to where to store the bit count
1929  *
1930  * Returns:	0			Success
1931  *		EIO			Bad per process open file table
1932  *		EBADF			One of the bits in the input bit vector
1933  *						references an invalid fd
1934  *
1935  * Implicit:	*countp (modified)	Count of fd's
1936  *
1937  * Notes:	This function is the first pass under the proc_fdlock() that
1938  *		permits us to recognize invalid descriptors in the bit vector;
1939  *		the may, however, not remain valid through the drop and
1940  *		later reacquisition of the proc_fdlock().
1941  */
1942 static int
selcount(struct proc * p,u_int32_t * ibits,int nfd,int * countp)1943 selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp)
1944 {
1945 	int msk, i, j, fd;
1946 	u_int32_t bits;
1947 	struct fileproc *fp;
1948 	int n = 0;
1949 	u_int32_t *iptr;
1950 	u_int nw;
1951 	int error = 0;
1952 	int need_wakeup = 0;
1953 
1954 	nw = howmany(nfd, NFDBITS);
1955 
1956 	proc_fdlock(p);
1957 	for (msk = 0; msk < 3; msk++) {
1958 		iptr = (u_int32_t *)&ibits[msk * nw];
1959 		for (i = 0; i < nfd; i += NFDBITS) {
1960 			bits = iptr[i / NFDBITS];
1961 			while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1962 				bits &= ~(1U << j);
1963 
1964 				fp = fp_get_noref_locked(p, fd);
1965 				if (fp == NULL) {
1966 					*countp = 0;
1967 					error = EBADF;
1968 					goto bad;
1969 				}
1970 				os_ref_retain_locked(&fp->fp_iocount);
1971 				n++;
1972 			}
1973 		}
1974 	}
1975 	proc_fdunlock(p);
1976 
1977 	*countp = n;
1978 	return 0;
1979 
1980 bad:
1981 	if (n == 0) {
1982 		goto out;
1983 	}
1984 	/* Ignore error return; it's already EBADF */
1985 	(void)seldrop_locked(p, ibits, nfd, n, &need_wakeup);
1986 
1987 out:
1988 	proc_fdunlock(p);
1989 	if (need_wakeup) {
1990 		wakeup(&p->p_fd.fd_fpdrainwait);
1991 	}
1992 	return error;
1993 }
1994 
1995 
1996 /*
1997  * seldrop_locked
1998  *
1999  * Drop outstanding wait queue references set up during selscan(); drop the
2000  * outstanding per fileproc fp_iocount picked up during the selcount().
2001  *
2002  * Parameters:	p			Process performing the select
2003  *		ibits			Input bit bector of fd's
2004  *		nfd			Number of fd's
2005  *		lim			Limit to number of vector entries to
2006  *						consider, or -1 for "all"
2007  *		inselect		True if
2008  *		need_wakeup		Pointer to flag to set to do a wakeup
2009  *					if f_iocont on any descriptor goes to 0
2010  *
2011  * Returns:	0			Success
2012  *		EBADF			One or more fds in the bit vector
2013  *						were invalid, but the rest
2014  *						were successfully dropped
2015  *
2016  * Notes:	An fd make become bad while the proc_fdlock() is not held,
2017  *		if a multithreaded application closes the fd out from under
2018  *		the in progress select.  In this case, we still have to
2019  *		clean up after the set up on the remaining fds.
2020  */
2021 static int
seldrop_locked(struct proc * p,u_int32_t * ibits,int nfd,int lim,int * need_wakeup)2022 seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup)
2023 {
2024 	int msk, i, j, nc, fd;
2025 	u_int32_t bits;
2026 	struct fileproc *fp;
2027 	u_int32_t *iptr;
2028 	u_int nw;
2029 	int error = 0;
2030 	uthread_t uth = current_uthread();
2031 	struct _select_data *seldata;
2032 
2033 	*need_wakeup = 0;
2034 
2035 	nw = howmany(nfd, NFDBITS);
2036 	seldata = &uth->uu_save.uus_select_data;
2037 
2038 	nc = 0;
2039 	for (msk = 0; msk < 3; msk++) {
2040 		iptr = (u_int32_t *)&ibits[msk * nw];
2041 		for (i = 0; i < nfd; i += NFDBITS) {
2042 			bits = iptr[i / NFDBITS];
2043 			while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
2044 				bits &= ~(1U << j);
2045 				/*
2046 				 * If we've already dropped as many as were
2047 				 * counted/scanned, then we are done.
2048 				 */
2049 				if (nc >= lim) {
2050 					goto done;
2051 				}
2052 
2053 				/*
2054 				 * We took an I/O reference in selcount,
2055 				 * so the fp can't possibly be NULL.
2056 				 */
2057 				fp = fp_get_noref_locked_with_iocount(p, fd);
2058 				selunlinkfp(fp, uth->uu_selset);
2059 
2060 				nc++;
2061 
2062 				const os_ref_count_t refc = os_ref_release_locked(&fp->fp_iocount);
2063 				if (0 == refc) {
2064 					panic("fp_iocount overdecrement!");
2065 				}
2066 
2067 				if (1 == refc) {
2068 					/*
2069 					 * The last iocount is responsible for clearing
2070 					 * selconfict flag - even if we didn't set it -
2071 					 * and is also responsible for waking up anyone
2072 					 * waiting on iocounts to drain.
2073 					 */
2074 					if (fp->fp_flags & FP_SELCONFLICT) {
2075 						fp->fp_flags &= ~FP_SELCONFLICT;
2076 					}
2077 					if (p->p_fd.fd_fpdrainwait) {
2078 						p->p_fd.fd_fpdrainwait = 0;
2079 						*need_wakeup = 1;
2080 					}
2081 				}
2082 			}
2083 		}
2084 	}
2085 done:
2086 	return error;
2087 }
2088 
2089 
2090 static int
seldrop(struct proc * p,u_int32_t * ibits,int nfd,int lim)2091 seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim)
2092 {
2093 	int error;
2094 	int need_wakeup = 0;
2095 
2096 	proc_fdlock(p);
2097 	error = seldrop_locked(p, ibits, nfd, lim, &need_wakeup);
2098 	proc_fdunlock(p);
2099 	if (need_wakeup) {
2100 		wakeup(&p->p_fd.fd_fpdrainwait);
2101 	}
2102 	return error;
2103 }
2104 
2105 /*
2106  * Record a select request.
2107  */
2108 void
selrecord(__unused struct proc * selector,struct selinfo * sip,void * s_data)2109 selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data)
2110 {
2111 	struct select_set *selset = current_uthread()->uu_selset;
2112 
2113 	/* do not record if this is second pass of select */
2114 	if (!s_data) {
2115 		return;
2116 	}
2117 
2118 	if (selset == SELSPEC_RECORD_MARKER) {
2119 		/*
2120 		 * The kevent subsystem is trying to sniff
2121 		 * the selinfo::si_note to attach to.
2122 		 */
2123 		((selspec_record_hook_t)s_data)(sip);
2124 	} else {
2125 		waitq_link_t *linkp = s_data;
2126 
2127 		if (!waitq_is_valid(&sip->si_waitq)) {
2128 			waitq_init(&sip->si_waitq, WQT_SELECT, SYNC_POLICY_FIFO);
2129 		}
2130 
2131 		/* note: this checks for pre-existing linkage */
2132 		select_set_link(&sip->si_waitq, selset, linkp);
2133 	}
2134 }
2135 
2136 static void
selwakeup_internal(struct selinfo * sip,long hint,wait_result_t wr)2137 selwakeup_internal(struct selinfo *sip, long hint, wait_result_t wr)
2138 {
2139 	if (sip->si_flags & SI_SELSPEC) {
2140 		/*
2141 		 * The "primitive" lock is held.
2142 		 * The knote lock is not held.
2143 		 *
2144 		 * All knotes will transition their kn_hook to NULL and we will
2145 		 * reeinitialize the primitive's klist
2146 		 */
2147 		lck_spin_lock(&selspec_lock);
2148 		knote(&sip->si_note, hint, /*autodetach=*/ true);
2149 		lck_spin_unlock(&selspec_lock);
2150 		sip->si_flags &= ~SI_SELSPEC;
2151 	}
2152 
2153 	/*
2154 	 * After selrecord() has been called, selinfo owners must call
2155 	 * at least one of selwakeup() or selthreadclear().
2156 	 *
2157 	 * Use this opportunity to deinit the waitq
2158 	 * so that all linkages are garbage collected
2159 	 * in a combined wakeup-all + unlink + deinit call.
2160 	 */
2161 	select_waitq_wakeup_and_deinit(&sip->si_waitq, NO_EVENT64, wr);
2162 }
2163 
2164 
2165 void
selwakeup(struct selinfo * sip)2166 selwakeup(struct selinfo *sip)
2167 {
2168 	selwakeup_internal(sip, 0, THREAD_AWAKENED);
2169 }
2170 
2171 void
selthreadclear(struct selinfo * sip)2172 selthreadclear(struct selinfo *sip)
2173 {
2174 	selwakeup_internal(sip, NOTE_REVOKE, THREAD_RESTART);
2175 }
2176 
2177 
2178 /*
2179  * gethostuuid
2180  *
2181  * Description:	Get the host UUID from IOKit and return it to user space.
2182  *
2183  * Parameters:	uuid_buf		Pointer to buffer to receive UUID
2184  *		timeout			Timespec for timout
2185  *
2186  * Returns:	0			Success
2187  *		EWOULDBLOCK		Timeout is too short
2188  *		copyout:EFAULT		Bad user buffer
2189  *		mac_system_check_info:EPERM		Client not allowed to perform this operation
2190  *
2191  * Notes:	A timeout seems redundant, since if it's tolerable to not
2192  *		have a system UUID in hand, then why ask for one?
2193  */
2194 int
gethostuuid(struct proc * p,struct gethostuuid_args * uap,__unused int32_t * retval)2195 gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval)
2196 {
2197 	kern_return_t kret;
2198 	int error;
2199 	mach_timespec_t mach_ts;        /* for IOKit call */
2200 	__darwin_uuid_t uuid_kern = {}; /* for IOKit call */
2201 
2202 	/* Check entitlement */
2203 	if (!IOCurrentTaskHasEntitlement("com.apple.private.getprivatesysid")) {
2204 #if !defined(XNU_TARGET_OS_OSX)
2205 #if CONFIG_MACF
2206 		if ((error = mac_system_check_info(kauth_cred_get(), "hw.uuid")) != 0) {
2207 			/* EPERM invokes userspace upcall if present */
2208 			return error;
2209 		}
2210 #endif
2211 #endif
2212 	}
2213 
2214 	/* Convert the 32/64 bit timespec into a mach_timespec_t */
2215 	if (proc_is64bit(p)) {
2216 		struct user64_timespec ts;
2217 		error = copyin(uap->timeoutp, &ts, sizeof(ts));
2218 		if (error) {
2219 			return error;
2220 		}
2221 		mach_ts.tv_sec = (unsigned int)ts.tv_sec;
2222 		mach_ts.tv_nsec = (clock_res_t)ts.tv_nsec;
2223 	} else {
2224 		struct user32_timespec ts;
2225 		error = copyin(uap->timeoutp, &ts, sizeof(ts));
2226 		if (error) {
2227 			return error;
2228 		}
2229 		mach_ts.tv_sec = ts.tv_sec;
2230 		mach_ts.tv_nsec = ts.tv_nsec;
2231 	}
2232 
2233 	/* Call IOKit with the stack buffer to get the UUID */
2234 	kret = IOBSDGetPlatformUUID(uuid_kern, mach_ts);
2235 
2236 	/*
2237 	 * If we get it, copy out the data to the user buffer; note that a
2238 	 * uuid_t is an array of characters, so this is size invariant for
2239 	 * 32 vs. 64 bit.
2240 	 */
2241 	if (kret == KERN_SUCCESS) {
2242 		error = copyout(uuid_kern, uap->uuid_buf, sizeof(uuid_kern));
2243 	} else {
2244 		error = EWOULDBLOCK;
2245 	}
2246 
2247 	return error;
2248 }
2249 
2250 /*
2251  * ledger
2252  *
2253  * Description:	Omnibus system call for ledger operations
2254  */
2255 int
ledger(struct proc * p,struct ledger_args * args,__unused int32_t * retval)2256 ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval)
2257 {
2258 #if !CONFIG_MACF
2259 #pragma unused(p)
2260 #endif
2261 	int rval, pid, len, error;
2262 #ifdef LEDGER_DEBUG
2263 	struct ledger_limit_args lla;
2264 #endif
2265 	task_t task;
2266 	proc_t proc;
2267 
2268 	/* Finish copying in the necessary args before taking the proc lock */
2269 	error = 0;
2270 	len = 0;
2271 	if (args->cmd == LEDGER_ENTRY_INFO || args->cmd == LEDGER_ENTRY_INFO_V2) {
2272 		error = copyin(args->arg3, (char *)&len, sizeof(len));
2273 	} else if (args->cmd == LEDGER_TEMPLATE_INFO) {
2274 		error = copyin(args->arg2, (char *)&len, sizeof(len));
2275 	} else if (args->cmd == LEDGER_LIMIT)
2276 #ifdef LEDGER_DEBUG
2277 	{ error = copyin(args->arg2, (char *)&lla, sizeof(lla));}
2278 #else
2279 	{ return EINVAL; }
2280 #endif
2281 	else if ((args->cmd < 0) || (args->cmd > LEDGER_MAX_CMD)) {
2282 		return EINVAL;
2283 	}
2284 
2285 	if (error) {
2286 		return error;
2287 	}
2288 	if (len < 0) {
2289 		return EINVAL;
2290 	}
2291 
2292 	rval = 0;
2293 	if (args->cmd != LEDGER_TEMPLATE_INFO) {
2294 		pid = (int)args->arg1;
2295 		proc = proc_find(pid);
2296 		if (proc == NULL) {
2297 			return ESRCH;
2298 		}
2299 
2300 #if CONFIG_MACF
2301 		error = mac_proc_check_ledger(p, proc, args->cmd);
2302 		if (error) {
2303 			proc_rele(proc);
2304 			return error;
2305 		}
2306 #endif
2307 
2308 		task = proc_task(proc);
2309 	}
2310 
2311 	switch (args->cmd) {
2312 #ifdef LEDGER_DEBUG
2313 	case LEDGER_LIMIT: {
2314 		if (!kauth_cred_issuser(kauth_cred_get())) {
2315 			rval = EPERM;
2316 		}
2317 		rval = ledger_limit(task, &lla);
2318 		proc_rele(proc);
2319 		break;
2320 	}
2321 #endif
2322 	case LEDGER_INFO: {
2323 		struct ledger_info info = {};
2324 
2325 		rval = ledger_info(task, &info);
2326 		proc_rele(proc);
2327 		if (rval == 0) {
2328 			rval = copyout(&info, args->arg2,
2329 			    sizeof(info));
2330 		}
2331 		break;
2332 	}
2333 
2334 	case LEDGER_ENTRY_INFO:
2335 	case LEDGER_ENTRY_INFO_V2: {
2336 		bool v2 = (args->cmd == LEDGER_ENTRY_INFO_V2);
2337 		int entry_size = (v2) ? sizeof(struct ledger_entry_info_v2) : sizeof(struct ledger_entry_info);
2338 		void *buf;
2339 		int sz;
2340 
2341 		/* Settle ledger entries for memorystatus and pages grabbed */
2342 		task_ledger_settle(task);
2343 
2344 		rval = ledger_get_task_entry_info_multiple(task, &buf, &len, v2);
2345 		proc_rele(proc);
2346 		if ((rval == 0) && (len >= 0)) {
2347 			sz = len * entry_size;
2348 			rval = copyout(buf, args->arg2, sz);
2349 			kfree_data(buf, sz);
2350 		}
2351 		if (rval == 0) {
2352 			rval = copyout(&len, args->arg3, sizeof(len));
2353 		}
2354 		break;
2355 	}
2356 
2357 	case LEDGER_TEMPLATE_INFO: {
2358 		void *buf;
2359 		int sz;
2360 
2361 		rval = ledger_template_info(&buf, &len);
2362 		if ((rval == 0) && (len >= 0)) {
2363 			sz = len * sizeof(struct ledger_template_info);
2364 			rval = copyout(buf, args->arg1, sz);
2365 			kfree_data(buf, sz);
2366 		}
2367 		if (rval == 0) {
2368 			rval = copyout(&len, args->arg2, sizeof(len));
2369 		}
2370 		break;
2371 	}
2372 
2373 	default:
2374 		panic("ledger syscall logic error -- command type %d", args->cmd);
2375 		proc_rele(proc);
2376 		rval = EINVAL;
2377 	}
2378 
2379 	return rval;
2380 }
2381 
2382 int
telemetry(__unused struct proc * p,struct telemetry_args * args,__unused int32_t * retval)2383 telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t *retval)
2384 {
2385 	int error = 0;
2386 
2387 	switch (args->cmd) {
2388 #if CONFIG_TELEMETRY
2389 	case TELEMETRY_CMD_TIMER_EVENT:
2390 		error = ENOTSUP;
2391 		break;
2392 	case TELEMETRY_CMD_PMI_SETUP:
2393 		error = telemetry_pmi_setup((enum telemetry_pmi)args->deadline, args->interval);
2394 		break;
2395 #endif /* CONFIG_TELEMETRY */
2396 	case TELEMETRY_CMD_VOUCHER_NAME:
2397 		if (thread_set_voucher_name((mach_port_name_t)args->deadline)) {
2398 			error = EINVAL;
2399 		}
2400 		break;
2401 
2402 	default:
2403 		error = EINVAL;
2404 		break;
2405 	}
2406 
2407 	return error;
2408 }
2409 
2410 /*
2411  * Logging
2412  *
2413  * Description: syscall to access kernel logging from userspace
2414  *
2415  * Args:
2416  *	tag - used for syncing with userspace on the version.
2417  *	flags - flags used by the syscall.
2418  *	buffer - userspace address of string to copy.
2419  *	size - size of buffer.
2420  */
2421 int
log_data(__unused struct proc * p,struct log_data_args * args,int * retval)2422 log_data(__unused struct proc *p, struct log_data_args *args, int *retval)
2423 {
2424 	unsigned int tag = args->tag;
2425 	unsigned int flags = args->flags;
2426 	user_addr_t buffer = args->buffer;
2427 	unsigned int size = args->size;
2428 	int ret = 0;
2429 	*retval = 0;
2430 
2431 	/* Only DEXTs are suppose to use this syscall. */
2432 	if (!task_is_driver(current_task())) {
2433 		return EPERM;
2434 	}
2435 
2436 	/*
2437 	 * Tag synchronize the syscall version with userspace.
2438 	 * Tag == 0 => flags == OS_LOG_TYPE
2439 	 */
2440 	if (tag != 0) {
2441 		return EINVAL;
2442 	}
2443 
2444 	/*
2445 	 * OS_LOG_TYPE are defined in libkern/os/log.h
2446 	 * In userspace they are defined in libtrace/os/log.h
2447 	 */
2448 	if (flags != OS_LOG_TYPE_DEFAULT &&
2449 	    flags != OS_LOG_TYPE_INFO &&
2450 	    flags != OS_LOG_TYPE_DEBUG &&
2451 	    flags != OS_LOG_TYPE_ERROR &&
2452 	    flags != OS_LOG_TYPE_FAULT) {
2453 		return EINVAL;
2454 	}
2455 
2456 	if (size == 0) {
2457 		return EINVAL;
2458 	}
2459 
2460 	/* truncate to OS_LOG_DATA_MAX_SIZE */
2461 	if (size > OS_LOG_DATA_MAX_SIZE) {
2462 		size = OS_LOG_DATA_MAX_SIZE;
2463 	}
2464 
2465 	char *log_msg = (char *)kalloc_data(size, Z_WAITOK);
2466 	if (!log_msg) {
2467 		return ENOMEM;
2468 	}
2469 
2470 	if (copyin(buffer, log_msg, size) != 0) {
2471 		ret = EFAULT;
2472 		goto out;
2473 	}
2474 	log_msg[size - 1] = '\0';
2475 
2476 	/*
2477 	 * This will log to dmesg and logd.
2478 	 * The call will fail if the current
2479 	 * process is not a driverKit process.
2480 	 */
2481 	os_log_driverKit(&ret, OS_LOG_DEFAULT, (os_log_type_t)flags, "%s", log_msg);
2482 
2483 out:
2484 	if (log_msg != NULL) {
2485 		kfree_data(log_msg, size);
2486 	}
2487 
2488 	return ret;
2489 }
2490 
2491 /*
2492  * Coprocessor logging
2493  *
2494  * Description: syscall to access kernel coprocessor logging from userspace
2495  *
2496  * Args:
2497  *	buff - userspace address of string to copy.
2498  *	buff_len - size of buffer.
2499  *	type - log type/level
2500  *	uuid - log source identifier
2501  *	timestamp - log timestamp
2502  *	offset - log format offset
2503  *	stream_log - flag indicating stream
2504  */
2505 int
oslog_coproc(__unused struct proc * p,struct oslog_coproc_args * args,int * retval)2506 oslog_coproc(__unused struct proc *p, struct oslog_coproc_args *args, int *retval)
2507 {
2508 	user_addr_t buff = args->buff;
2509 	uint64_t buff_len = args->buff_len;
2510 	uint32_t type = args->type;
2511 	user_addr_t uuid = args->uuid;
2512 	uint64_t timestamp = args->timestamp;
2513 	uint32_t offset = args->offset;
2514 	uint32_t stream_log = args->stream_log;
2515 	char *log_buff = NULL;
2516 	uuid_t log_uuid;
2517 
2518 	int ret = 0;
2519 	*retval = 0;
2520 
2521 	const task_t __single task = proc_task(p);
2522 	if (task == NULL || !IOTaskHasEntitlement(task, "com.apple.private.coprocessor-logging")) {
2523 		return EPERM;
2524 	}
2525 
2526 	/* Only DEXTs are supposed to use this syscall. */
2527 	if (!task_is_driver(current_task())) {
2528 		return EPERM;
2529 	}
2530 
2531 	// the full message contains a 32 bit offset value, 16 byte uuid and then the provided buffer
2532 	// entire message needs to fit within OS_LOG_BUFFER_MAX_SIZE
2533 	// this is reflected in `log.c:os_log_coprocessor`
2534 	uint64_t full_len;
2535 	if (os_add_overflow(buff_len, sizeof(uuid_t) + sizeof(uint32_t), &full_len) || full_len > OS_LOG_BUFFER_MAX_SIZE) {
2536 		return ERANGE;
2537 	}
2538 
2539 	log_buff = (char *)kalloc_data(buff_len, Z_WAITOK);
2540 	if (!log_buff) {
2541 		return ENOMEM;
2542 	}
2543 
2544 	ret = copyin(buff, log_buff, buff_len);
2545 	if (ret) {
2546 		goto out;
2547 	}
2548 
2549 	ret = copyin(uuid, &log_uuid, sizeof(uuid_t));
2550 	if (ret) {
2551 		goto out;
2552 	}
2553 
2554 	os_log_coprocessor(log_buff, buff_len, (os_log_type_t)type, (char *)log_uuid, timestamp, offset, stream_log);
2555 
2556 out:
2557 	if (log_buff != NULL) {
2558 		kfree_data(log_buff, buff_len);
2559 	}
2560 
2561 	return ret;
2562 }
2563 
2564 /*
2565  * Coprocessor logging registration
2566  *
2567  * Description: syscall to access kernel coprocessor logging registration from userspace
2568  *
2569  * Args:
2570  *	uuid - coprocessor fw uuid to harvest
2571  *	file_path - file name for logd to harvest from
2572  *	file_path_len - file name length
2573  */
2574 int
oslog_coproc_reg(__unused struct proc * p,struct oslog_coproc_reg_args * args,int * retval)2575 oslog_coproc_reg(__unused struct proc *p, struct oslog_coproc_reg_args *args, int *retval)
2576 {
2577 	user_addr_t uuid = args->uuid;
2578 	user_addr_t file_path = args->file_path;
2579 	size_t file_path_len = args->file_path_len;
2580 	char *file_path_buf = NULL;
2581 	uuid_t uuid_buf;
2582 
2583 	int ret = 0;
2584 	*retval = 0;
2585 
2586 	const task_t __single task = proc_task(p);
2587 	if (task == NULL || !IOTaskHasEntitlement(task, "com.apple.private.coprocessor-logging")) {
2588 		return EPERM;
2589 	}
2590 
2591 	/* Only DEXTs are supposed to use this syscall. */
2592 	if (!task_is_driver(current_task())) {
2593 		return EPERM;
2594 	}
2595 
2596 	if (file_path_len > PATH_MAX) {
2597 		return EINVAL;
2598 	}
2599 
2600 	file_path_buf = (char *)kalloc_data(file_path_len, Z_WAITOK);
2601 	if (!file_path_buf) {
2602 		return ENOMEM;
2603 	}
2604 
2605 	ret = copyin(file_path, file_path_buf, file_path_len);
2606 	if (ret) {
2607 		goto out;
2608 	}
2609 
2610 	ret = copyin(uuid, &uuid_buf, sizeof(uuid_t));
2611 	if (ret) {
2612 		goto out;
2613 	}
2614 
2615 	os_log_coprocessor_register_with_type((char *)uuid_buf, file_path_buf, os_log_coproc_register_harvest_fs_ftab);
2616 
2617 out:
2618 	if (file_path_buf != NULL) {
2619 		kfree_data(file_path_buf, file_path_len);
2620 	}
2621 
2622 	return ret;
2623 }
2624 
2625 #if DEVELOPMENT || DEBUG
2626 
2627 static int
2628 sysctl_mpsc_test_pingpong SYSCTL_HANDLER_ARGS
2629 {
2630 #pragma unused(oidp, arg1, arg2)
2631 	uint64_t value = 0;
2632 	int error;
2633 
2634 	error = SYSCTL_IN(req, &value, sizeof(value));
2635 	if (error) {
2636 		return error;
2637 	}
2638 
2639 	if (error == 0 && req->newptr) {
2640 		error = mpsc_test_pingpong(value, &value);
2641 		if (error == 0) {
2642 			error = SYSCTL_OUT(req, &value, sizeof(value));
2643 		}
2644 	}
2645 
2646 	return error;
2647 }
2648 SYSCTL_PROC(_kern, OID_AUTO, mpsc_test_pingpong, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2649     0, 0, sysctl_mpsc_test_pingpong, "Q", "MPSC tests: pingpong");
2650 
2651 #endif /* DEVELOPMENT || DEBUG */
2652 
2653 /* Telemetry, microstackshots */
2654 
2655 SYSCTL_NODE(_kern, OID_AUTO, microstackshot, CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2656     "microstackshot info");
2657 
2658 #if defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES)
2659 
2660 extern uint64_t mt_microstackshot_period;
2661 SYSCTL_QUAD(_kern_microstackshot, OID_AUTO, pmi_sample_period,
2662     CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_period,
2663     "PMI sampling rate");
2664 extern unsigned int mt_microstackshot_ctr;
2665 SYSCTL_UINT(_kern_microstackshot, OID_AUTO, pmi_sample_counter,
2666     CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_ctr, 0,
2667     "PMI counter");
2668 
2669 #endif /* defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) */
2670 
2671 /*Remote Time api*/
2672 SYSCTL_NODE(_machdep, OID_AUTO, remotetime, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "Remote time api");
2673 
2674 #if DEVELOPMENT || DEBUG
2675 #if CONFIG_MACH_BRIDGE_SEND_TIME
2676 extern _Atomic uint32_t bt_init_flag;
2677 extern uint32_t mach_bridge_timer_enable(uint32_t, int);
2678 
2679 SYSCTL_INT(_machdep_remotetime, OID_AUTO, bridge_timer_init_flag,
2680     CTLFLAG_RD | CTLFLAG_LOCKED, &bt_init_flag, 0, "");
2681 
2682 static int sysctl_mach_bridge_timer_enable SYSCTL_HANDLER_ARGS
2683 {
2684 #pragma unused(oidp, arg1, arg2)
2685 	uint32_t value = 0;
2686 	int error = 0;
2687 	/* User is querying buffer size */
2688 	if (req->oldptr == USER_ADDR_NULL && req->newptr == USER_ADDR_NULL) {
2689 		req->oldidx = sizeof(value);
2690 		return 0;
2691 	}
2692 	if (os_atomic_load(&bt_init_flag, acquire)) {
2693 		if (req->newptr) {
2694 			int new_value = 0;
2695 			error = SYSCTL_IN(req, &new_value, sizeof(new_value));
2696 			if (error) {
2697 				return error;
2698 			}
2699 			if (new_value == 0 || new_value == 1) {
2700 				value = mach_bridge_timer_enable(new_value, 1);
2701 			} else {
2702 				return EPERM;
2703 			}
2704 		} else {
2705 			value = mach_bridge_timer_enable(0, 0);
2706 		}
2707 	}
2708 	error = SYSCTL_OUT(req, &value, sizeof(value));
2709 	return error;
2710 }
2711 
2712 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, bridge_timer_enable,
2713     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2714     0, 0, sysctl_mach_bridge_timer_enable, "I", "");
2715 
2716 #endif /* CONFIG_MACH_BRIDGE_SEND_TIME */
2717 
2718 static int sysctl_mach_bridge_remote_time SYSCTL_HANDLER_ARGS
2719 {
2720 #pragma unused(oidp, arg1, arg2)
2721 	uint64_t ltime = 0, rtime = 0;
2722 	if (req->oldptr == USER_ADDR_NULL) {
2723 		req->oldidx = sizeof(rtime);
2724 		return 0;
2725 	}
2726 	if (req->newptr) {
2727 		int error = SYSCTL_IN(req, &ltime, sizeof(ltime));
2728 		if (error) {
2729 			return error;
2730 		}
2731 	}
2732 	rtime = mach_bridge_remote_time(ltime);
2733 	return SYSCTL_OUT(req, &rtime, sizeof(rtime));
2734 }
2735 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, mach_bridge_remote_time,
2736     CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2737     0, 0, sysctl_mach_bridge_remote_time, "Q", "");
2738 
2739 #endif /* DEVELOPMENT || DEBUG */
2740 
2741 #if CONFIG_MACH_BRIDGE_RECV_TIME
2742 extern struct bt_params bt_params_get_latest(void);
2743 
2744 static int sysctl_mach_bridge_conversion_params SYSCTL_HANDLER_ARGS
2745 {
2746 #pragma unused(oidp, arg1, arg2)
2747 	struct bt_params params = {};
2748 	if (req->oldptr == USER_ADDR_NULL) {
2749 		req->oldidx = sizeof(struct bt_params);
2750 		return 0;
2751 	}
2752 	if (req->newptr) {
2753 		return EPERM;
2754 	}
2755 	params = bt_params_get_latest();
2756 	return SYSCTL_OUT(req, &params, MIN(sizeof(params), req->oldlen));
2757 }
2758 
2759 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, conversion_params,
2760     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2761     0, sysctl_mach_bridge_conversion_params, "S,bt_params", "");
2762 
2763 #endif /* CONFIG_MACH_BRIDGE_RECV_TIME */
2764 
2765 #if DEVELOPMENT || DEBUG
2766 
2767 #include <pexpert/pexpert.h>
2768 extern int32_t sysctl_get_bound_cpuid(void);
2769 extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
2770 static int
2771 sysctl_kern_sched_thread_bind_cpu SYSCTL_HANDLER_ARGS
2772 {
2773 #pragma unused(oidp, arg1, arg2)
2774 
2775 	/*
2776 	 * DO NOT remove this bootarg guard or make this non-development.
2777 	 * This kind of binding should only be used for tests and
2778 	 * experiments in a custom configuration, never shipping code.
2779 	 */
2780 
2781 	if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2782 		return ENOENT;
2783 	}
2784 
2785 	int32_t cpuid = sysctl_get_bound_cpuid();
2786 
2787 	int32_t new_value;
2788 	int changed;
2789 	int error = sysctl_io_number(req, cpuid, sizeof(cpuid), &new_value, &changed);
2790 	if (error) {
2791 		return error;
2792 	}
2793 
2794 	if (changed) {
2795 		kern_return_t kr = sysctl_thread_bind_cpuid(new_value);
2796 
2797 		if (kr == KERN_NOT_SUPPORTED) {
2798 			return ENOTSUP;
2799 		}
2800 
2801 		if (kr == KERN_INVALID_VALUE) {
2802 			return ERANGE;
2803 		}
2804 	}
2805 
2806 	return error;
2807 }
2808 
2809 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cpu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2810     0, 0, sysctl_kern_sched_thread_bind_cpu, "I", "");
2811 
2812 #if __AMP__
2813 
2814 errno_t mach_to_bsd_errno(kern_return_t mach_err);
2815 
2816 extern char sysctl_get_bound_cluster_type(void);
2817 static int
2818 sysctl_kern_sched_thread_bind_cluster_type SYSCTL_HANDLER_ARGS
2819 {
2820 #pragma unused(oidp, arg1, arg2)
2821 	char buff[4];
2822 
2823 	if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2824 		return ENOENT;
2825 	}
2826 
2827 	int error = SYSCTL_IN(req, buff, 1);
2828 	if (error) {
2829 		return error;
2830 	}
2831 	char cluster_type = buff[0];
2832 
2833 	if (!req->newptr) {
2834 		goto out;
2835 	}
2836 
2837 	kern_return_t kr = thread_soft_bind_cluster_type(current_thread(), cluster_type);
2838 	if (kr != KERN_SUCCESS) {
2839 		return mach_to_bsd_errno(kr);
2840 	}
2841 
2842 out:
2843 	buff[0] = sysctl_get_bound_cluster_type();
2844 
2845 	return SYSCTL_OUT(req, buff, 1);
2846 }
2847 
2848 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2849     0, 0, sysctl_kern_sched_thread_bind_cluster_type, "A", "");
2850 
2851 extern char sysctl_get_task_cluster_type(void);
2852 extern kern_return_t sysctl_task_set_cluster_type(char cluster_type);
2853 static int
2854 sysctl_kern_sched_task_set_cluster_type SYSCTL_HANDLER_ARGS
2855 {
2856 #pragma unused(oidp, arg1, arg2)
2857 	char buff[4];
2858 
2859 	if (!PE_parse_boot_argn("enable_skstsct", NULL, 0)) {
2860 		return ENOENT;
2861 	}
2862 
2863 	int error = SYSCTL_IN(req, buff, 1);
2864 	if (error) {
2865 		return error;
2866 	}
2867 	char cluster_type = buff[0];
2868 
2869 	if (!req->newptr) {
2870 		goto out;
2871 	}
2872 
2873 	kern_return_t kr = sysctl_task_set_cluster_type(cluster_type);
2874 	if (kr != KERN_SUCCESS) {
2875 		return mach_to_bsd_errno(kr);
2876 	}
2877 
2878 out:
2879 	cluster_type = sysctl_get_task_cluster_type();
2880 	buff[0] = cluster_type;
2881 
2882 	return SYSCTL_OUT(req, buff, 1);
2883 }
2884 
2885 SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2886     0, 0, sysctl_kern_sched_task_set_cluster_type, "A", "");
2887 
2888 extern kern_return_t thread_soft_bind_cluster_id(thread_t thread, uint32_t cluster_id, thread_bind_option_t options);
2889 extern uint32_t thread_bound_cluster_id(thread_t);
2890 static int
2891 sysctl_kern_sched_thread_bind_cluster_id SYSCTL_HANDLER_ARGS
2892 {
2893 #pragma unused(oidp, arg1, arg2)
2894 	if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2895 		return ENOENT;
2896 	}
2897 
2898 	thread_t self = current_thread();
2899 	int32_t cluster_id = thread_bound_cluster_id(self);
2900 	int32_t new_value;
2901 	int changed;
2902 	int error = sysctl_io_number(req, cluster_id, sizeof(cluster_id), &new_value, &changed);
2903 	if (error) {
2904 		return error;
2905 	}
2906 
2907 	if (changed) {
2908 		/*
2909 		 * Note, this binds the thread to the cluster without passing the
2910 		 * THREAD_BIND_ELIGIBLE_ONLY option, which means we won't check
2911 		 * whether the thread is otherwise eligible to run on that cluster--
2912 		 * we will send it there regardless.
2913 		 */
2914 		kern_return_t kr = thread_soft_bind_cluster_id(self, new_value, 0);
2915 		if (kr == KERN_INVALID_VALUE) {
2916 			return ERANGE;
2917 		}
2918 
2919 		if (kr != KERN_SUCCESS) {
2920 			return EINVAL;
2921 		}
2922 	}
2923 
2924 	return error;
2925 }
2926 
2927 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_id, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2928     0, 0, sysctl_kern_sched_thread_bind_cluster_id, "I", "");
2929 
2930 #if CONFIG_SCHED_EDGE
2931 
2932 extern int sched_edge_migrate_ipi_immediate;
2933 SYSCTL_INT(_kern, OID_AUTO, sched_edge_migrate_ipi_immediate, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_migrate_ipi_immediate, 0, "Edge Scheduler uses immediate IPIs for migration event based on execution latency");
2934 
2935 #endif /* CONFIG_SCHED_EDGE */
2936 
2937 #endif /* __AMP__ */
2938 
2939 #if DEVELOPMENT || DEBUG
2940 extern int timeouts_are_fatal;
2941 EXPERIMENT_FACTOR_INT(timeouts_are_fatal, &timeouts_are_fatal, 0, 1,
2942     "Do timeouts panic or emit telemetry (0: telemetry, 1: panic)");
2943 #endif
2944 
2945 #if SCHED_HYGIENE_DEBUG
2946 
2947 SYSCTL_QUAD(_kern, OID_AUTO, interrupt_masked_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_LEGACY_EXPERIMENT,
2948     &interrupt_masked_timeout,
2949     "Interrupt masked duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2950 
2951 SYSCTL_INT(_kern, OID_AUTO, interrupt_masked_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2952     &interrupt_masked_debug_mode, 0,
2953     "Enable interrupt masked tracing or panic (0: off, 1: trace, 2: panic)");
2954 
2955 SYSCTL_QUAD(_kern, OID_AUTO, sched_preemption_disable_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_LEGACY_EXPERIMENT,
2956     &sched_preemption_disable_threshold_mt,
2957     "Preemption disablement duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2958 
2959 SYSCTL_INT(_kern, OID_AUTO, sched_preemption_disable_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2960     &sched_preemption_disable_debug_mode, 0,
2961     "Enable preemption disablement tracing or panic (0: off, 1: trace, 2: panic)");
2962 
2963 static int
sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2964 sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2965 {
2966 	extern unsigned int preemption_disable_get_max_durations(uint64_t *durations, size_t count);
2967 	extern void preemption_disable_reset_max_durations(void);
2968 
2969 	uint64_t stats[MAX_CPUS]; // maximum per CPU
2970 
2971 	unsigned int ncpus = preemption_disable_get_max_durations(stats, MAX_CPUS);
2972 	if (req->newlen > 0) {
2973 		/* Reset when attempting to write to the sysctl. */
2974 		preemption_disable_reset_max_durations();
2975 	}
2976 
2977 	return sysctl_io_opaque(req, stats, ncpus * sizeof(uint64_t), NULL);
2978 }
2979 
2980 SYSCTL_PROC(_kern, OID_AUTO, sched_preemption_disable_stats,
2981     CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2982     0, 0, sysctl_sched_preemption_disable_stats, "I", "Preemption disablement statistics");
2983 
2984 #endif /* SCHED_HYGIENE_DEBUG */
2985 
2986 /* used for testing by exception_tests */
2987 extern uint32_t ipc_control_port_options;
2988 SYSCTL_INT(_kern, OID_AUTO, ipc_control_port_options,
2989     CTLFLAG_RD | CTLFLAG_LOCKED, &ipc_control_port_options, 0, "");
2990 
2991 #endif /* DEVELOPMENT || DEBUG */
2992 
2993 extern uint32_t task_exc_guard_default;
2994 
2995 SYSCTL_INT(_kern, OID_AUTO, task_exc_guard_default,
2996     CTLFLAG_RD | CTLFLAG_LOCKED, &task_exc_guard_default, 0, "");
2997 
2998 
2999 static int
3000 sysctl_kern_tcsm_available SYSCTL_HANDLER_ARGS
3001 {
3002 #pragma unused(oidp, arg1, arg2)
3003 	uint32_t value = machine_csv(CPUVN_CI) ? 1 : 0;
3004 
3005 	if (req->newptr) {
3006 		return EINVAL;
3007 	}
3008 
3009 	return SYSCTL_OUT(req, &value, sizeof(value));
3010 }
3011 SYSCTL_PROC(_kern, OID_AUTO, tcsm_available,
3012     CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
3013     0, 0, sysctl_kern_tcsm_available, "I", "");
3014 
3015 
3016 static int
3017 sysctl_kern_tcsm_enable SYSCTL_HANDLER_ARGS
3018 {
3019 #pragma unused(oidp, arg1, arg2)
3020 	uint32_t soflags = 0;
3021 #if CONFIG_SCHED_SMT
3022 	uint32_t old_value = thread_get_no_smt() ? 1 : 0;
3023 #else /* CONFIG_SCHED_SMT */
3024 	uint32_t old_value = 0;
3025 #endif /* CONFIG_SCHED_SMT */
3026 
3027 	int error = SYSCTL_IN(req, &soflags, sizeof(soflags));
3028 	if (error) {
3029 		return error;
3030 	}
3031 
3032 	if (soflags && machine_csv(CPUVN_CI)) {
3033 #if CONFIG_SCHED_SMT
3034 		thread_set_no_smt(true);
3035 #endif /* CONFIG_SCHED_SMT */
3036 		machine_tecs(current_thread());
3037 	}
3038 
3039 	return SYSCTL_OUT(req, &old_value, sizeof(old_value));
3040 }
3041 SYSCTL_PROC(_kern, OID_AUTO, tcsm_enable,
3042     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
3043     0, 0, sysctl_kern_tcsm_enable, "I", "");
3044 
3045 static int
3046 sysctl_kern_debug_get_preoslog SYSCTL_HANDLER_ARGS
3047 {
3048 #pragma unused(oidp, arg1, arg2)
3049 	static bool oneshot_executed = false;
3050 	size_t preoslog_size = 0;
3051 	const char *preoslog = NULL;
3052 	int ret = 0;
3053 
3054 	// DumpPanic passes a non-zero write value when it needs oneshot behaviour
3055 	if (req->newptr != USER_ADDR_NULL) {
3056 		uint8_t oneshot = 0;
3057 		int error = SYSCTL_IN(req, &oneshot, sizeof(oneshot));
3058 		if (error) {
3059 			return error;
3060 		}
3061 
3062 		if (oneshot) {
3063 			if (!os_atomic_cmpxchg(&oneshot_executed, false, true, acq_rel)) {
3064 				return EPERM;
3065 			}
3066 		}
3067 	}
3068 
3069 	preoslog = sysctl_debug_get_preoslog(&preoslog_size);
3070 	if (preoslog != NULL && preoslog_size == 0) {
3071 		sysctl_debug_free_preoslog();
3072 		return 0;
3073 	}
3074 
3075 	if (preoslog == NULL || preoslog_size == 0) {
3076 		return 0;
3077 	}
3078 
3079 	if (req->oldptr == USER_ADDR_NULL) {
3080 		req->oldidx = preoslog_size;
3081 		return 0;
3082 	}
3083 
3084 	ret = SYSCTL_OUT(req, preoslog, preoslog_size);
3085 	sysctl_debug_free_preoslog();
3086 	return ret;
3087 }
3088 
3089 SYSCTL_PROC(_kern, OID_AUTO, preoslog, CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
3090     0, 0, sysctl_kern_debug_get_preoslog, "-", "");
3091 
3092 #if DEVELOPMENT || DEBUG
3093 extern void sysctl_task_set_no_smt(char no_smt);
3094 extern char sysctl_task_get_no_smt(void);
3095 
3096 static int
3097 sysctl_kern_sched_task_set_no_smt SYSCTL_HANDLER_ARGS
3098 {
3099 #pragma unused(oidp, arg1, arg2)
3100 	char buff[4];
3101 
3102 	int error = SYSCTL_IN(req, buff, 1);
3103 	if (error) {
3104 		return error;
3105 	}
3106 	char no_smt = buff[0];
3107 
3108 	if (!req->newptr) {
3109 		goto out;
3110 	}
3111 
3112 	sysctl_task_set_no_smt(no_smt);
3113 out:
3114 	no_smt = sysctl_task_get_no_smt();
3115 	buff[0] = no_smt;
3116 
3117 	return SYSCTL_OUT(req, buff, 1);
3118 }
3119 
3120 SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_no_smt, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
3121     0, 0, sysctl_kern_sched_task_set_no_smt, "A", "");
3122 
3123 #if CONFIG_SCHED_SMT
3124 static int
sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3125 sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3126 {
3127 	int new_value, changed;
3128 	int old_value = thread_get_no_smt() ? 1 : 0;
3129 	int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3130 
3131 	if (changed) {
3132 		thread_set_no_smt(!!new_value);
3133 	}
3134 
3135 	return error;
3136 }
3137 #else /* CONFIG_SCHED_SMT */
3138 static int
sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,__unused struct sysctl_req * req)3139 sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
3140 {
3141 	return 0;
3142 }
3143 #endif /* CONFIG_SCHED_SMT*/
3144 
3145 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_set_no_smt,
3146     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
3147     0, 0, sysctl_kern_sched_thread_set_no_smt, "I", "");
3148 
3149 #if CONFIG_SCHED_RT_ALLOW
3150 
3151 #if DEVELOPMENT || DEBUG
3152 #define RT_ALLOW_CTLFLAGS CTLFLAG_RW
3153 #else
3154 #define RT_ALLOW_CTLFLAGS CTLFLAG_RD
3155 #endif /* DEVELOPMENT || DEBUG */
3156 
3157 static int
sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3158 sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid *oidp,
3159     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3160 {
3161 	extern uint8_t rt_allow_limit_percent;
3162 
3163 	int new_value = 0;
3164 	int old_value = rt_allow_limit_percent;
3165 	int changed = 0;
3166 
3167 	int error = sysctl_io_number(req, old_value, sizeof(old_value),
3168 	    &new_value, &changed);
3169 	if (error != 0) {
3170 		return error;
3171 	}
3172 
3173 	/* Only accept a percentage between 1 and 99 inclusive. */
3174 	if (changed) {
3175 		if (new_value >= 100 || new_value <= 0) {
3176 			return EINVAL;
3177 		}
3178 
3179 		rt_allow_limit_percent = (uint8_t)new_value;
3180 	}
3181 
3182 	return 0;
3183 }
3184 
3185 SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_percent,
3186     RT_ALLOW_CTLFLAGS | CTLTYPE_INT | CTLFLAG_LOCKED,
3187     0, 0, sysctl_kern_rt_allow_limit_percent, "I", "");
3188 
3189 static int
sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3190 sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid *oidp,
3191     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3192 {
3193 	extern uint16_t rt_allow_limit_interval_ms;
3194 
3195 	uint64_t new_value = 0;
3196 	uint64_t old_value = rt_allow_limit_interval_ms;
3197 	int changed = 0;
3198 
3199 	int error = sysctl_io_number(req, old_value, sizeof(old_value),
3200 	    &new_value, &changed);
3201 	if (error != 0) {
3202 		return error;
3203 	}
3204 
3205 	/* Value is in ns. Must be at least 1ms. */
3206 	if (changed) {
3207 		if (new_value < 1 || new_value > UINT16_MAX) {
3208 			return EINVAL;
3209 		}
3210 
3211 		rt_allow_limit_interval_ms = (uint16_t)new_value;
3212 	}
3213 
3214 	return 0;
3215 }
3216 
3217 SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_interval_ms,
3218     RT_ALLOW_CTLFLAGS | CTLTYPE_QUAD | CTLFLAG_LOCKED,
3219     0, 0, sysctl_kern_rt_allow_limit_interval_ms, "Q", "");
3220 
3221 #endif /* CONFIG_SCHED_RT_ALLOW */
3222 
3223 
3224 static int
3225 sysctl_kern_task_set_filter_msg_flag SYSCTL_HANDLER_ARGS
3226 {
3227 #pragma unused(oidp, arg1, arg2)
3228 	int new_value, changed;
3229 	int old_value = task_get_filter_msg_flag(current_task()) ? 1 : 0;
3230 	int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3231 
3232 	if (changed) {
3233 		task_set_filter_msg_flag(current_task(), !!new_value);
3234 	}
3235 
3236 	return error;
3237 }
3238 
3239 SYSCTL_PROC(_kern, OID_AUTO, task_set_filter_msg_flag, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3240     0, 0, sysctl_kern_task_set_filter_msg_flag, "I", "");
3241 
3242 #if CONFIG_PROC_RESOURCE_LIMITS
3243 
3244 extern mach_port_name_t current_task_get_fatal_port_name(void);
3245 
3246 static int
3247 sysctl_kern_task_get_fatal_port SYSCTL_HANDLER_ARGS
3248 {
3249 #pragma unused(oidp, arg1, arg2)
3250 	int port = 0;
3251 	int flag = 0;
3252 
3253 	if (req->oldptr == USER_ADDR_NULL) {
3254 		req->oldidx = sizeof(mach_port_t);
3255 		return 0;
3256 	}
3257 
3258 	int error = SYSCTL_IN(req, &flag, sizeof(flag));
3259 	if (error) {
3260 		return error;
3261 	}
3262 
3263 	if (flag == 1) {
3264 		port = (int)current_task_get_fatal_port_name();
3265 	}
3266 	return SYSCTL_OUT(req, &port, sizeof(port));
3267 }
3268 
3269 SYSCTL_PROC(_machdep, OID_AUTO, task_get_fatal_port, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3270     0, 0, sysctl_kern_task_get_fatal_port, "I", "");
3271 
3272 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
3273 
3274 extern unsigned int ipc_entry_table_count_max(void);
3275 
3276 static int
3277 sysctl_mach_max_port_table_size SYSCTL_HANDLER_ARGS
3278 {
3279 #pragma unused(oidp, arg1, arg2)
3280 	int old_value = ipc_entry_table_count_max();
3281 	int error = sysctl_io_number(req, old_value, sizeof(int), NULL, NULL);
3282 
3283 	return error;
3284 }
3285 
3286 SYSCTL_PROC(_machdep, OID_AUTO, max_port_table_size, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3287     0, 0, sysctl_mach_max_port_table_size, "I", "");
3288 
3289 #endif /* DEVELOPMENT || DEBUG */
3290 
3291 #if defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(CONFIG_KDP_COREDUMP_ENCRYPTION)
3292 
3293 #define COREDUMP_ENCRYPTION_KEY_ENTITLEMENT "com.apple.private.coredump-encryption-key"
3294 
3295 static int
3296 sysctl_coredump_encryption_key_update SYSCTL_HANDLER_ARGS
3297 {
3298 	kern_return_t ret = KERN_SUCCESS;
3299 	int error = 0;
3300 	struct kdp_core_encryption_key_descriptor key_descriptor = {
3301 		.kcekd_format = MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256,
3302 	};
3303 
3304 	/* Need to be root and have entitlement */
3305 	if (!kauth_cred_issuser(kauth_cred_get()) && !IOCurrentTaskHasEntitlement(COREDUMP_ENCRYPTION_KEY_ENTITLEMENT)) {
3306 		return EPERM;
3307 	}
3308 
3309 	// Sanity-check the given key length
3310 	if (req->newlen > UINT16_MAX) {
3311 		return EINVAL;
3312 	}
3313 
3314 	// It is allowed for the caller to pass in a NULL buffer.
3315 	// This indicates that they want us to forget about any public key we might have.
3316 	if (req->newptr) {
3317 		key_descriptor.kcekd_size = (uint16_t) req->newlen;
3318 		key_descriptor.kcekd_key = kalloc_data(key_descriptor.kcekd_size, Z_WAITOK);
3319 
3320 		if (key_descriptor.kcekd_key == NULL) {
3321 			return ENOMEM;
3322 		}
3323 
3324 		error = SYSCTL_IN(req, key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3325 		if (error) {
3326 			goto out;
3327 		}
3328 	}
3329 
3330 	ret = IOProvideCoreFileAccess(kdp_core_handle_new_encryption_key, (void *)&key_descriptor);
3331 	if (KERN_SUCCESS != ret) {
3332 		printf("Failed to handle the new encryption key. Error 0x%x", ret);
3333 		error = EFAULT;
3334 	}
3335 
3336 out:
3337 	kfree_data(key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3338 	return 0;
3339 }
3340 
3341 SYSCTL_PROC(_kern, OID_AUTO, coredump_encryption_key, CTLTYPE_OPAQUE | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED,
3342     0, 0, &sysctl_coredump_encryption_key_update, "-", "Set a new encryption key for coredumps");
3343 
3344 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING && CONFIG_KDP_COREDUMP_ENCRYPTION*/
3345