1 /*
2 * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/ioctl.h>
79 #include <sys/file_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/socketvar.h>
82 #include <sys/uio_internal.h>
83 #include <sys/kernel.h>
84 #include <sys/guarded.h>
85 #include <sys/stat.h>
86 #include <sys/malloc.h>
87 #include <sys/sysproto.h>
88
89 #include <sys/mount_internal.h>
90 #include <sys/protosw.h>
91 #include <sys/ev.h>
92 #include <sys/user.h>
93 #include <sys/kdebug.h>
94 #include <sys/poll.h>
95 #include <sys/event.h>
96 #include <sys/eventvar.h>
97 #include <sys/proc.h>
98 #include <sys/kauth.h>
99
100 #include <machine/smp.h>
101 #include <mach/mach_types.h>
102 #include <kern/kern_types.h>
103 #include <kern/assert.h>
104 #include <kern/kalloc.h>
105 #include <kern/thread.h>
106 #include <kern/clock.h>
107 #include <kern/ledger.h>
108 #include <kern/monotonic.h>
109 #include <kern/task.h>
110 #include <kern/telemetry.h>
111 #include <kern/waitq.h>
112 #include <kern/sched_hygiene.h>
113 #include <kern/sched_prim.h>
114 #include <kern/mpsc_queue.h>
115 #include <kern/debug.h>
116
117 #include <sys/mbuf.h>
118 #include <sys/domain.h>
119 #include <sys/socket.h>
120 #include <sys/socketvar.h>
121 #include <sys/errno.h>
122 #include <sys/syscall.h>
123 #include <sys/pipe.h>
124
125 #include <security/audit/audit.h>
126
127 #include <net/if.h>
128 #include <net/route.h>
129
130 #include <netinet/in.h>
131 #include <netinet/in_systm.h>
132 #include <netinet/ip.h>
133 #include <netinet/in_pcb.h>
134 #include <netinet/ip_var.h>
135 #include <netinet/ip6.h>
136 #include <netinet/tcp.h>
137 #include <netinet/tcp_fsm.h>
138 #include <netinet/tcp_seq.h>
139 #include <netinet/tcp_timer.h>
140 #include <netinet/tcp_var.h>
141 #include <netinet/tcpip.h>
142 /* for wait queue based select */
143 #include <kern/waitq.h>
144 #include <sys/vnode_internal.h>
145 /* for remote time api*/
146 #include <kern/remote_time.h>
147 #include <os/log.h>
148 #include <sys/log_data.h>
149
150 #include <machine/monotonic.h>
151
152 #if CONFIG_MACF
153 #include <security/mac_framework.h>
154 #endif
155
156 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
157 #include <mach_debug/mach_debug_types.h>
158 #endif
159
160 /* for entitlement check */
161 #include <IOKit/IOBSD.h>
162
163 /* XXX should be in a header file somewhere */
164 extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp);
165
166 int do_uiowrite(struct proc *p, struct fileproc *fp, uio_t uio, int flags, user_ssize_t *retval);
167 __private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp,
168 user_addr_t bufp, user_size_t nbyte,
169 off_t offset, int flags, user_ssize_t *retval);
170 __private_extern__ int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
171 user_addr_t bufp, user_size_t nbyte,
172 off_t offset, int flags, user_ssize_t *retval);
173 static int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode);
174
175 /* needed by guarded_writev, etc. */
176 int write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
177 off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval);
178 int writev_uio(struct proc *p, int fd, user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
179 guardid_t *puguard, user_ssize_t *retval);
180
181 #define f_flag fp_glob->fg_flag
182 #define f_type fp_glob->fg_ops->fo_type
183 #define f_cred fp_glob->fg_cred
184 #define f_ops fp_glob->fg_ops
185
186 /*
187 * Validate if the file can be used for random access (pread, pwrite, etc).
188 *
189 * Conditions:
190 * proc_fdlock is held
191 *
192 * Returns: 0 Success
193 * ESPIPE
194 * ENXIO
195 */
196 static int
valid_for_random_access(struct fileproc * fp)197 valid_for_random_access(struct fileproc *fp)
198 {
199 if (__improbable(fp->f_type != DTYPE_VNODE)) {
200 return ESPIPE;
201 }
202
203 vnode_t vp = (struct vnode *)fp_get_data(fp);
204 if (__improbable(vnode_isfifo(vp))) {
205 return ESPIPE;
206 }
207
208 if (__improbable(vp->v_flag & VISTTY)) {
209 return ENXIO;
210 }
211
212 return 0;
213 }
214
215 /*
216 * Returns: 0 Success
217 * EBADF
218 * ESPIPE
219 * ENXIO
220 * fp_lookup:EBADF
221 * valid_for_random_access:ESPIPE
222 * valid_for_random_access:ENXIO
223 */
224 static int
preparefileread(struct proc * p,struct fileproc ** fp_ret,int fd,int check_for_pread)225 preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread)
226 {
227 int error;
228 struct fileproc *fp;
229
230 AUDIT_ARG(fd, fd);
231
232 proc_fdlock_spin(p);
233
234 error = fp_lookup(p, fd, &fp, 1);
235
236 if (error) {
237 proc_fdunlock(p);
238 return error;
239 }
240 if ((fp->f_flag & FREAD) == 0) {
241 error = EBADF;
242 goto out;
243 }
244 if (check_for_pread) {
245 if ((error = valid_for_random_access(fp))) {
246 goto out;
247 }
248 }
249
250 *fp_ret = fp;
251
252 proc_fdunlock(p);
253 return 0;
254
255 out:
256 fp_drop(p, fd, fp, 1);
257 proc_fdunlock(p);
258 return error;
259 }
260
261 static int
fp_readv(vfs_context_t ctx,struct fileproc * fp,uio_t uio,int flags,user_ssize_t * retval)262 fp_readv(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
263 user_ssize_t *retval)
264 {
265 int error;
266 user_ssize_t count;
267
268 if ((error = uio_calculateresid_user(uio))) {
269 *retval = 0;
270 return error;
271 }
272
273 count = uio_resid(uio);
274 error = fo_read(fp, uio, flags, ctx);
275
276 switch (error) {
277 case ERESTART:
278 case EINTR:
279 case EWOULDBLOCK:
280 if (uio_resid(uio) != count) {
281 error = 0;
282 }
283 break;
284
285 default:
286 break;
287 }
288
289 *retval = count - uio_resid(uio);
290 return error;
291 }
292
293 /*
294 * Returns: 0 Success
295 * EINVAL
296 * fo_read:???
297 */
298 __private_extern__ int
dofileread(vfs_context_t ctx,struct fileproc * fp,user_addr_t bufp,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)299 dofileread(vfs_context_t ctx, struct fileproc *fp,
300 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
301 user_ssize_t *retval)
302 {
303 UIO_STACKBUF(uio_buf, 1);
304 uio_t uio;
305 int spacetype;
306
307 if (nbyte > INT_MAX) {
308 *retval = 0;
309 return EINVAL;
310 }
311
312 spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
313 uio = uio_createwithbuffer(1, offset, spacetype, UIO_READ, &uio_buf[0],
314 sizeof(uio_buf));
315
316 if (uio_addiov(uio, bufp, nbyte) != 0) {
317 *retval = 0;
318 return EINVAL;
319 }
320
321 return fp_readv(ctx, fp, uio, flags, retval);
322 }
323
324 static int
readv_internal(struct proc * p,int fd,uio_t uio,int flags,user_ssize_t * retval)325 readv_internal(struct proc *p, int fd, uio_t uio, int flags,
326 user_ssize_t *retval)
327 {
328 struct fileproc *fp = NULL;
329 struct vfs_context context;
330 int error;
331
332 if ((error = preparefileread(p, &fp, fd, flags & FOF_OFFSET))) {
333 *retval = 0;
334 return error;
335 }
336
337 context = *(vfs_context_current());
338 context.vc_ucred = fp->fp_glob->fg_cred;
339
340 error = fp_readv(&context, fp, uio, flags, retval);
341
342 fp_drop(p, fd, fp, 0);
343 return error;
344 }
345
346 static int
read_internal(struct proc * p,int fd,user_addr_t buf,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)347 read_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
348 off_t offset, int flags, user_ssize_t *retval)
349 {
350 UIO_STACKBUF(uio_buf, 1);
351 uio_t uio;
352 int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
353
354 if (nbyte > INT_MAX) {
355 *retval = 0;
356 return EINVAL;
357 }
358
359 uio = uio_createwithbuffer(1, offset, spacetype, UIO_READ,
360 &uio_buf[0], sizeof(uio_buf));
361
362 if (uio_addiov(uio, buf, nbyte) != 0) {
363 *retval = 0;
364 return EINVAL;
365 }
366
367 return readv_internal(p, fd, uio, flags, retval);
368 }
369
370 int
read_nocancel(struct proc * p,struct read_nocancel_args * uap,user_ssize_t * retval)371 read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retval)
372 {
373 return read_internal(p, uap->fd, uap->cbuf, uap->nbyte, (off_t)-1, 0,
374 retval);
375 }
376
377 /*
378 * Read system call.
379 *
380 * Returns: 0 Success
381 * preparefileread:EBADF
382 * preparefileread:ESPIPE
383 * preparefileread:ENXIO
384 * preparefileread:EBADF
385 * dofileread:???
386 */
387 int
read(struct proc * p,struct read_args * uap,user_ssize_t * retval)388 read(struct proc *p, struct read_args *uap, user_ssize_t *retval)
389 {
390 __pthread_testcancel(1);
391 return read_nocancel(p, (struct read_nocancel_args *)uap, retval);
392 }
393
394 int
pread_nocancel(struct proc * p,struct pread_nocancel_args * uap,user_ssize_t * retval)395 pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *retval)
396 {
397 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE),
398 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
399
400 return read_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset,
401 FOF_OFFSET, retval);
402 }
403
404 /*
405 * Pread system call
406 *
407 * Returns: 0 Success
408 * preparefileread:EBADF
409 * preparefileread:ESPIPE
410 * preparefileread:ENXIO
411 * preparefileread:EBADF
412 * dofileread:???
413 */
414 int
pread(struct proc * p,struct pread_args * uap,user_ssize_t * retval)415 pread(struct proc *p, struct pread_args *uap, user_ssize_t *retval)
416 {
417 __pthread_testcancel(1);
418 return pread_nocancel(p, (struct pread_nocancel_args *)uap, retval);
419 }
420
421 /*
422 * Vector read.
423 *
424 * Returns: 0 Success
425 * EINVAL
426 * ENOMEM
427 * preparefileread:EBADF
428 * preparefileread:ESPIPE
429 * preparefileread:ENXIO
430 * preparefileread:EBADF
431 * copyin:EFAULT
432 * rd_uio:???
433 */
434 static int
readv_uio(struct proc * p,int fd,user_addr_t user_iovp,int iovcnt,off_t offset,int flags,user_ssize_t * retval)435 readv_uio(struct proc *p, int fd,
436 user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
437 user_ssize_t *retval)
438 {
439 uio_t uio = NULL;
440 int error;
441 struct user_iovec *iovp;
442
443 if (iovcnt <= 0 || iovcnt > UIO_MAXIOV) {
444 error = EINVAL;
445 goto out;
446 }
447
448 uio = uio_create(iovcnt, offset,
449 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
450 UIO_READ);
451
452 iovp = uio_iovsaddr_user(uio);
453 if (iovp == NULL) {
454 error = ENOMEM;
455 goto out;
456 }
457
458 error = copyin_user_iovec_array(user_iovp,
459 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
460 iovcnt, iovp);
461
462 if (error) {
463 goto out;
464 }
465
466 error = readv_internal(p, fd, uio, flags, retval);
467
468 out:
469 if (uio != NULL) {
470 uio_free(uio);
471 }
472
473 return error;
474 }
475
476 int
readv_nocancel(struct proc * p,struct readv_nocancel_args * uap,user_ssize_t * retval)477 readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval)
478 {
479 return readv_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, retval);
480 }
481
482 /*
483 * Scatter read system call.
484 */
485 int
readv(struct proc * p,struct readv_args * uap,user_ssize_t * retval)486 readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval)
487 {
488 __pthread_testcancel(1);
489 return readv_nocancel(p, (struct readv_nocancel_args *)uap, retval);
490 }
491
492 int
sys_preadv_nocancel(struct proc * p,struct preadv_nocancel_args * uap,user_ssize_t * retval)493 sys_preadv_nocancel(struct proc *p, struct preadv_nocancel_args *uap, user_ssize_t *retval)
494 {
495 return readv_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset,
496 FOF_OFFSET, retval);
497 }
498
499 /*
500 * Preadv system call
501 */
502 int
sys_preadv(struct proc * p,struct preadv_args * uap,user_ssize_t * retval)503 sys_preadv(struct proc *p, struct preadv_args *uap, user_ssize_t *retval)
504 {
505 __pthread_testcancel(1);
506 return sys_preadv_nocancel(p, (struct preadv_nocancel_args *)uap, retval);
507 }
508
509 /*
510 * Returns: 0 Success
511 * EBADF
512 * ESPIPE
513 * ENXIO
514 * fp_lookup:EBADF
515 * fp_guard_exception:???
516 * valid_for_random_access:ESPIPE
517 * valid_for_random_access:ENXIO
518 */
519 static int
preparefilewrite(struct proc * p,struct fileproc ** fp_ret,int fd,int check_for_pwrite,guardid_t * puguard)520 preparefilewrite(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pwrite,
521 guardid_t *puguard)
522 {
523 int error;
524 struct fileproc *fp;
525
526 AUDIT_ARG(fd, fd);
527
528 proc_fdlock_spin(p);
529
530 if (puguard) {
531 error = fp_lookup_guarded(p, fd, *puguard, &fp, 1);
532 if (error) {
533 proc_fdunlock(p);
534 return error;
535 }
536
537 if ((fp->f_flag & FWRITE) == 0) {
538 error = EBADF;
539 goto out;
540 }
541 } else {
542 error = fp_lookup(p, fd, &fp, 1);
543 if (error) {
544 proc_fdunlock(p);
545 return error;
546 }
547
548 /* Allow EBADF first. */
549 if ((fp->f_flag & FWRITE) == 0) {
550 error = EBADF;
551 goto out;
552 }
553
554 if (fp_isguarded(fp, GUARD_WRITE)) {
555 error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
556 goto out;
557 }
558 }
559
560 if (check_for_pwrite) {
561 if ((error = valid_for_random_access(fp))) {
562 goto out;
563 }
564 }
565
566 *fp_ret = fp;
567
568 proc_fdunlock(p);
569 return 0;
570
571 out:
572 fp_drop(p, fd, fp, 1);
573 proc_fdunlock(p);
574 return error;
575 }
576
577 static int
fp_writev(vfs_context_t ctx,struct fileproc * fp,uio_t uio,int flags,user_ssize_t * retval)578 fp_writev(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
579 user_ssize_t *retval)
580 {
581 int error;
582 user_ssize_t count;
583
584 if ((error = uio_calculateresid_user(uio))) {
585 *retval = 0;
586 return error;
587 }
588
589 count = uio_resid(uio);
590 error = fo_write(fp, uio, flags, ctx);
591
592 switch (error) {
593 case ERESTART:
594 case EINTR:
595 case EWOULDBLOCK:
596 if (uio_resid(uio) != count) {
597 error = 0;
598 }
599 break;
600
601 case EPIPE:
602 if (fp->f_type != DTYPE_SOCKET &&
603 (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) == 0) {
604 /* XXX Raise the signal on the thread? */
605 psignal(vfs_context_proc(ctx), SIGPIPE);
606 }
607 break;
608
609 default:
610 break;
611 }
612
613 if ((*retval = count - uio_resid(uio))) {
614 os_atomic_or(&fp->fp_glob->fg_flag, FWASWRITTEN, relaxed);
615 }
616
617 return error;
618 }
619
620 /*
621 * Returns: 0 Success
622 * EINVAL
623 * <fo_write>:EPIPE
624 * <fo_write>:??? [indirect through struct fileops]
625 */
626 __private_extern__ int
dofilewrite(vfs_context_t ctx,struct fileproc * fp,user_addr_t bufp,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)627 dofilewrite(vfs_context_t ctx, struct fileproc *fp,
628 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
629 user_ssize_t *retval)
630 {
631 UIO_STACKBUF(uio_buf, 1);
632 uio_t uio;
633 int spacetype;
634
635 if (nbyte > INT_MAX) {
636 *retval = 0;
637 return EINVAL;
638 }
639
640 spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
641 uio = uio_createwithbuffer(1, offset, spacetype, UIO_WRITE, &uio_buf[0],
642 sizeof(uio_buf));
643
644 if (uio_addiov(uio, bufp, nbyte) != 0) {
645 *retval = 0;
646 return EINVAL;
647 }
648
649 return fp_writev(ctx, fp, uio, flags, retval);
650 }
651
652 static int
writev_internal(struct proc * p,int fd,uio_t uio,int flags,guardid_t * puguard,user_ssize_t * retval)653 writev_internal(struct proc *p, int fd, uio_t uio, int flags,
654 guardid_t *puguard, user_ssize_t *retval)
655 {
656 struct fileproc *fp = NULL;
657 struct vfs_context context;
658 int error;
659
660 if ((error = preparefilewrite(p, &fp, fd, flags & FOF_OFFSET, puguard))) {
661 *retval = 0;
662 return error;
663 }
664
665 context = *(vfs_context_current());
666 context.vc_ucred = fp->fp_glob->fg_cred;
667
668 error = fp_writev(&context, fp, uio, flags, retval);
669
670 fp_drop(p, fd, fp, 0);
671 return error;
672 }
673
674 int
write_internal(struct proc * p,int fd,user_addr_t buf,user_size_t nbyte,off_t offset,int flags,guardid_t * puguard,user_ssize_t * retval)675 write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
676 off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval)
677 {
678 UIO_STACKBUF(uio_buf, 1);
679 uio_t uio;
680 int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
681
682 if (nbyte > INT_MAX) {
683 *retval = 0;
684 return EINVAL;
685 }
686
687 uio = uio_createwithbuffer(1, offset, spacetype, UIO_WRITE,
688 &uio_buf[0], sizeof(uio_buf));
689
690 if (uio_addiov(uio, buf, nbyte) != 0) {
691 *retval = 0;
692 return EINVAL;
693 }
694
695 return writev_internal(p, fd, uio, flags, puguard, retval);
696 }
697
698 int
write_nocancel(struct proc * p,struct write_nocancel_args * uap,user_ssize_t * retval)699 write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *retval)
700 {
701 return write_internal(p, uap->fd, uap->cbuf, uap->nbyte, (off_t)-1, 0,
702 NULL, retval);
703 }
704
705 /*
706 * Write system call
707 *
708 * Returns: 0 Success
709 * EBADF
710 * fp_lookup:EBADF
711 * dofilewrite:???
712 */
713 int
write(struct proc * p,struct write_args * uap,user_ssize_t * retval)714 write(struct proc *p, struct write_args *uap, user_ssize_t *retval)
715 {
716 __pthread_testcancel(1);
717 return write_nocancel(p, (struct write_nocancel_args *)uap, retval);
718 }
719
720 int
pwrite_nocancel(struct proc * p,struct pwrite_nocancel_args * uap,user_ssize_t * retval)721 pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *retval)
722 {
723 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE),
724 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
725
726 /* XXX: Should be < 0 instead? (See man page + pwritev) */
727 if (uap->offset == (off_t)-1) {
728 return EINVAL;
729 }
730
731 return write_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset,
732 FOF_OFFSET, NULL, retval);
733 }
734
735 /*
736 * pwrite system call
737 *
738 * Returns: 0 Success
739 * EBADF
740 * ESPIPE
741 * ENXIO
742 * EINVAL
743 * fp_lookup:EBADF
744 * dofilewrite:???
745 */
746 int
pwrite(struct proc * p,struct pwrite_args * uap,user_ssize_t * retval)747 pwrite(struct proc *p, struct pwrite_args *uap, user_ssize_t *retval)
748 {
749 __pthread_testcancel(1);
750 return pwrite_nocancel(p, (struct pwrite_nocancel_args *)uap, retval);
751 }
752
753 int
writev_uio(struct proc * p,int fd,user_addr_t user_iovp,int iovcnt,off_t offset,int flags,guardid_t * puguard,user_ssize_t * retval)754 writev_uio(struct proc *p, int fd,
755 user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
756 guardid_t *puguard, user_ssize_t *retval)
757 {
758 uio_t uio = NULL;
759 int error;
760 struct user_iovec *iovp;
761
762 if (iovcnt <= 0 || iovcnt > UIO_MAXIOV || offset < 0) {
763 error = EINVAL;
764 goto out;
765 }
766
767 uio = uio_create(iovcnt, offset,
768 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
769 UIO_WRITE);
770
771 iovp = uio_iovsaddr_user(uio);
772 if (iovp == NULL) {
773 error = ENOMEM;
774 goto out;
775 }
776
777 error = copyin_user_iovec_array(user_iovp,
778 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
779 iovcnt, iovp);
780
781 if (error) {
782 goto out;
783 }
784
785 error = writev_internal(p, fd, uio, flags, puguard, retval);
786
787 out:
788 if (uio != NULL) {
789 uio_free(uio);
790 }
791
792 return error;
793 }
794
795 int
writev_nocancel(struct proc * p,struct writev_nocancel_args * uap,user_ssize_t * retval)796 writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval)
797 {
798 return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, NULL, retval);
799 }
800
801 /*
802 * Gather write system call
803 */
804 int
writev(struct proc * p,struct writev_args * uap,user_ssize_t * retval)805 writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval)
806 {
807 __pthread_testcancel(1);
808 return writev_nocancel(p, (struct writev_nocancel_args *)uap, retval);
809 }
810
811 int
sys_pwritev_nocancel(struct proc * p,struct pwritev_nocancel_args * uap,user_ssize_t * retval)812 sys_pwritev_nocancel(struct proc *p, struct pwritev_nocancel_args *uap, user_ssize_t *retval)
813 {
814 return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset,
815 FOF_OFFSET, NULL, retval);
816 }
817
818 /*
819 * Pwritev system call
820 */
821 int
sys_pwritev(struct proc * p,struct pwritev_args * uap,user_ssize_t * retval)822 sys_pwritev(struct proc *p, struct pwritev_args *uap, user_ssize_t *retval)
823 {
824 __pthread_testcancel(1);
825 return sys_pwritev_nocancel(p, (struct pwritev_nocancel_args *)uap, retval);
826 }
827
828 /*
829 * Ioctl system call
830 *
831 * Returns: 0 Success
832 * EBADF
833 * ENOTTY
834 * ENOMEM
835 * ESRCH
836 * copyin:EFAULT
837 * copyoutEFAULT
838 * fp_lookup:EBADF Bad file descriptor
839 * fo_ioctl:???
840 */
841 int
ioctl(struct proc * p,struct ioctl_args * uap,__unused int32_t * retval)842 ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval)
843 {
844 struct fileproc *fp = NULL;
845 int error = 0;
846 u_int size = 0;
847 caddr_t datap = NULL, memp = NULL;
848 boolean_t is64bit = FALSE;
849 int tmp = 0;
850 #define STK_PARAMS 128
851 char stkbuf[STK_PARAMS] = {};
852 int fd = uap->fd;
853 u_long com = uap->com;
854 struct vfs_context context = *vfs_context_current();
855
856 AUDIT_ARG(fd, uap->fd);
857 AUDIT_ARG(addr, uap->data);
858
859 is64bit = proc_is64bit(p);
860 #if CONFIG_AUDIT
861 if (is64bit) {
862 AUDIT_ARG(value64, com);
863 } else {
864 AUDIT_ARG(cmd, CAST_DOWN_EXPLICIT(int, com));
865 }
866 #endif /* CONFIG_AUDIT */
867
868 /*
869 * Interpret high order word to find amount of data to be
870 * copied to/from the user's address space.
871 */
872 size = IOCPARM_LEN(com);
873 if (size > IOCPARM_MAX) {
874 return ENOTTY;
875 }
876 if (size > sizeof(stkbuf)) {
877 memp = (caddr_t)kalloc_data(size, Z_WAITOK);
878 if (memp == 0) {
879 return ENOMEM;
880 }
881 datap = memp;
882 } else {
883 datap = &stkbuf[0];
884 }
885 if (com & IOC_IN) {
886 if (size) {
887 error = copyin(uap->data, datap, size);
888 if (error) {
889 goto out_nofp;
890 }
891 } else {
892 /* XXX - IOC_IN and no size? we should proably return an error here!! */
893 if (is64bit) {
894 *(user_addr_t *)datap = uap->data;
895 } else {
896 *(uint32_t *)datap = (uint32_t)uap->data;
897 }
898 }
899 } else if ((com & IOC_OUT) && size) {
900 /*
901 * Zero the buffer so the user always
902 * gets back something deterministic.
903 */
904 bzero(datap, size);
905 } else if (com & IOC_VOID) {
906 /* XXX - this is odd since IOC_VOID means no parameters */
907 if (is64bit) {
908 *(user_addr_t *)datap = uap->data;
909 } else {
910 *(uint32_t *)datap = (uint32_t)uap->data;
911 }
912 }
913
914 proc_fdlock(p);
915 error = fp_lookup(p, fd, &fp, 1);
916 if (error) {
917 proc_fdunlock(p);
918 goto out_nofp;
919 }
920
921 AUDIT_ARG(file, p, fp);
922
923 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
924 error = EBADF;
925 goto out;
926 }
927
928 context.vc_ucred = fp->fp_glob->fg_cred;
929
930 #if CONFIG_MACF
931 error = mac_file_check_ioctl(context.vc_ucred, fp->fp_glob, com);
932 if (error) {
933 goto out;
934 }
935 #endif
936
937 switch (com) {
938 case FIONCLEX:
939 fp->fp_flags &= ~FP_CLOEXEC;
940 break;
941
942 case FIOCLEX:
943 fp->fp_flags |= FP_CLOEXEC;
944 break;
945
946 case FIONBIO:
947 // FIXME (rdar://54898652)
948 //
949 // this code is broken if fnctl(F_SETFL), ioctl() are
950 // called concurrently for the same fileglob.
951 if ((tmp = *(int *)datap)) {
952 os_atomic_or(&fp->f_flag, FNONBLOCK, relaxed);
953 } else {
954 os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed);
955 }
956 error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
957 break;
958
959 case FIOASYNC:
960 // FIXME (rdar://54898652)
961 //
962 // this code is broken if fnctl(F_SETFL), ioctl() are
963 // called concurrently for the same fileglob.
964 if ((tmp = *(int *)datap)) {
965 os_atomic_or(&fp->f_flag, FASYNC, relaxed);
966 } else {
967 os_atomic_andnot(&fp->f_flag, FASYNC, relaxed);
968 }
969 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
970 break;
971
972 case FIOSETOWN:
973 tmp = *(int *)datap;
974 if (fp->f_type == DTYPE_SOCKET) {
975 ((struct socket *)fp_get_data(fp))->so_pgid = tmp;
976 break;
977 }
978 if (fp->f_type == DTYPE_PIPE) {
979 error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
980 break;
981 }
982 if (tmp <= 0) {
983 tmp = -tmp;
984 } else {
985 struct proc *p1 = proc_find(tmp);
986 if (p1 == 0) {
987 error = ESRCH;
988 break;
989 }
990 tmp = p1->p_pgrpid;
991 proc_rele(p1);
992 }
993 error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
994 break;
995
996 case FIOGETOWN:
997 if (fp->f_type == DTYPE_SOCKET) {
998 *(int *)datap = ((struct socket *)fp_get_data(fp))->so_pgid;
999 break;
1000 }
1001 error = fo_ioctl(fp, TIOCGPGRP, datap, &context);
1002 *(int *)datap = -*(int *)datap;
1003 break;
1004
1005 default:
1006 error = fo_ioctl(fp, com, datap, &context);
1007 /*
1008 * Copy any data to user, size was
1009 * already set and checked above.
1010 */
1011 if (error == 0 && (com & IOC_OUT) && size) {
1012 error = copyout(datap, uap->data, (u_int)size);
1013 }
1014 break;
1015 }
1016 out:
1017 fp_drop(p, fd, fp, 1);
1018 proc_fdunlock(p);
1019
1020 out_nofp:
1021 if (memp) {
1022 kfree_data(memp, size);
1023 }
1024 return error;
1025 }
1026
1027 int selwait;
1028 #define SEL_FIRSTPASS 1
1029 #define SEL_SECONDPASS 2
1030 static int selprocess(struct proc *p, int error, int sel_pass);
1031 static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata,
1032 int nfd, int32_t *retval, int sel_pass, struct select_set *selset);
1033 static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count);
1034 static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup);
1035 static int seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim);
1036 static int select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval);
1037
1038 /*
1039 * This is used for the special device nodes that do not implement
1040 * a proper kevent filter (see filt_specattach).
1041 *
1042 * In order to enable kevents on those, the spec_filtops will pretend
1043 * to call select, and try to sniff the selrecord(), if it observes one,
1044 * the knote is attached, which pairs with selwakeup() or selthreadclear().
1045 *
1046 * The last issue remaining, is that we need to serialize filt_specdetach()
1047 * with this, but it really can't know the "selinfo" or any locking domain.
1048 * To make up for this, We protect knote list operations with a global lock,
1049 * which give us a safe shared locking domain.
1050 *
1051 * Note: It is a little distasteful, but we really have very few of those.
1052 * The big problem here is that sharing a lock domain without
1053 * any kind of shared knowledge is a little complicated.
1054 *
1055 * 1. filters can really implement their own kqueue integration
1056 * to side step this,
1057 *
1058 * 2. There's an opportunity to pick a private lock in selspec_attach()
1059 * because both the selinfo and the knote are locked at that time.
1060 * The cleanup story is however a little complicated.
1061 */
1062 static LCK_GRP_DECLARE(selspec_grp, "spec_filtops");
1063 static LCK_SPIN_DECLARE(selspec_lock, &selspec_grp);
1064
1065 /*
1066 * The "primitive" lock is held.
1067 * The knote lock is held.
1068 */
1069 void
selspec_attach(struct knote * kn,struct selinfo * si)1070 selspec_attach(struct knote *kn, struct selinfo *si)
1071 {
1072 struct selinfo *cur = knote_kn_hook_get_raw(kn);
1073
1074 if (cur == NULL) {
1075 si->si_flags |= SI_SELSPEC;
1076 lck_spin_lock(&selspec_lock);
1077 knote_kn_hook_set_raw(kn, (void *) si);
1078 KNOTE_ATTACH(&si->si_note, kn);
1079 lck_spin_unlock(&selspec_lock);
1080 } else {
1081 /*
1082 * selspec_attach() can be called from e.g. filt_spectouch()
1083 * which might be called before any event was dequeued.
1084 *
1085 * It is hence not impossible for the knote already be hooked.
1086 *
1087 * Note that selwakeup_internal() could possibly
1088 * already have cleared this pointer. This is a race
1089 * that filt_specprocess will debounce.
1090 */
1091 assert(si->si_flags & SI_SELSPEC);
1092 assert(cur == si);
1093 }
1094 }
1095
1096 /*
1097 * The "primitive" lock is _not_ held.
1098 *
1099 * knote "lock" is held
1100 */
1101 void
selspec_detach(struct knote * kn)1102 selspec_detach(struct knote *kn)
1103 {
1104 lck_spin_lock(&selspec_lock);
1105
1106 if (!KNOTE_IS_AUTODETACHED(kn)) {
1107 struct selinfo *sip = knote_kn_hook_get_raw(kn);
1108 if (sip) {
1109 KNOTE_DETACH(&sip->si_note, kn);
1110 }
1111 }
1112
1113 knote_kn_hook_set_raw(kn, NULL);
1114
1115 lck_spin_unlock(&selspec_lock);
1116 }
1117
1118 /*
1119 * Select system call.
1120 *
1121 * Returns: 0 Success
1122 * EINVAL Invalid argument
1123 * EAGAIN Nonconformant error if allocation fails
1124 */
1125 int
select(struct proc * p,struct select_args * uap,int32_t * retval)1126 select(struct proc *p, struct select_args *uap, int32_t *retval)
1127 {
1128 __pthread_testcancel(1);
1129 return select_nocancel(p, (struct select_nocancel_args *)uap, retval);
1130 }
1131
1132 int
select_nocancel(struct proc * p,struct select_nocancel_args * uap,int32_t * retval)1133 select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retval)
1134 {
1135 uint64_t timeout = 0;
1136
1137 if (uap->tv) {
1138 int err;
1139 struct timeval atv;
1140 if (IS_64BIT_PROCESS(p)) {
1141 struct user64_timeval atv64;
1142 err = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
1143 /* Loses resolution - assume timeout < 68 years */
1144 atv.tv_sec = (__darwin_time_t)atv64.tv_sec;
1145 atv.tv_usec = atv64.tv_usec;
1146 } else {
1147 struct user32_timeval atv32;
1148 err = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
1149 atv.tv_sec = atv32.tv_sec;
1150 atv.tv_usec = atv32.tv_usec;
1151 }
1152 if (err) {
1153 return err;
1154 }
1155
1156 if (itimerfix(&atv)) {
1157 err = EINVAL;
1158 return err;
1159 }
1160
1161 clock_absolutetime_interval_to_deadline(tvtoabstime(&atv), &timeout);
1162 }
1163
1164 return select_internal(p, uap, timeout, retval);
1165 }
1166
1167 int
pselect(struct proc * p,struct pselect_args * uap,int32_t * retval)1168 pselect(struct proc *p, struct pselect_args *uap, int32_t *retval)
1169 {
1170 __pthread_testcancel(1);
1171 return pselect_nocancel(p, (struct pselect_nocancel_args *)uap, retval);
1172 }
1173
1174 int
pselect_nocancel(struct proc * p,struct pselect_nocancel_args * uap,int32_t * retval)1175 pselect_nocancel(struct proc *p, struct pselect_nocancel_args *uap, int32_t *retval)
1176 {
1177 int err;
1178 struct uthread *ut;
1179 uint64_t timeout = 0;
1180
1181 if (uap->ts) {
1182 struct timespec ts;
1183
1184 if (IS_64BIT_PROCESS(p)) {
1185 struct user64_timespec ts64;
1186 err = copyin(uap->ts, (caddr_t)&ts64, sizeof(ts64));
1187 ts.tv_sec = (__darwin_time_t)ts64.tv_sec;
1188 ts.tv_nsec = (long)ts64.tv_nsec;
1189 } else {
1190 struct user32_timespec ts32;
1191 err = copyin(uap->ts, (caddr_t)&ts32, sizeof(ts32));
1192 ts.tv_sec = ts32.tv_sec;
1193 ts.tv_nsec = ts32.tv_nsec;
1194 }
1195 if (err) {
1196 return err;
1197 }
1198
1199 if (!timespec_is_valid(&ts)) {
1200 return EINVAL;
1201 }
1202 clock_absolutetime_interval_to_deadline(tstoabstime(&ts), &timeout);
1203 }
1204
1205 ut = current_uthread();
1206
1207 if (uap->mask != USER_ADDR_NULL) {
1208 /* save current mask, then copyin and set new mask */
1209 sigset_t newset;
1210 err = copyin(uap->mask, &newset, sizeof(sigset_t));
1211 if (err) {
1212 return err;
1213 }
1214 ut->uu_oldmask = ut->uu_sigmask;
1215 ut->uu_flag |= UT_SAS_OLDMASK;
1216 ut->uu_sigmask = (newset & ~sigcantmask);
1217 }
1218
1219 err = select_internal(p, (struct select_nocancel_args *)uap, timeout, retval);
1220
1221 if (err != EINTR && ut->uu_flag & UT_SAS_OLDMASK) {
1222 /*
1223 * Restore old mask (direct return case). NOTE: EINTR can also be returned
1224 * if the thread is cancelled. In that case, we don't reset the signal
1225 * mask to its original value (which usually happens in the signal
1226 * delivery path). This behavior is permitted by POSIX.
1227 */
1228 ut->uu_sigmask = ut->uu_oldmask;
1229 ut->uu_oldmask = 0;
1230 ut->uu_flag &= ~UT_SAS_OLDMASK;
1231 }
1232
1233 return err;
1234 }
1235
1236 void
select_cleanup_uthread(struct _select * sel)1237 select_cleanup_uthread(struct _select *sel)
1238 {
1239 kfree_data(sel->ibits, 2 * sel->nbytes);
1240 sel->ibits = sel->obits = NULL;
1241 sel->nbytes = 0;
1242 }
1243
1244 static int
select_grow_uthread_cache(struct _select * sel,uint32_t nbytes)1245 select_grow_uthread_cache(struct _select *sel, uint32_t nbytes)
1246 {
1247 uint32_t *buf;
1248
1249 buf = kalloc_data(2 * nbytes, Z_WAITOK | Z_ZERO);
1250 if (buf) {
1251 select_cleanup_uthread(sel);
1252 sel->ibits = buf;
1253 sel->obits = buf + nbytes / sizeof(uint32_t);
1254 sel->nbytes = nbytes;
1255 return true;
1256 }
1257 return false;
1258 }
1259
1260 static void
select_bzero_uthread_cache(struct _select * sel)1261 select_bzero_uthread_cache(struct _select *sel)
1262 {
1263 bzero(sel->ibits, sel->nbytes * 2);
1264 }
1265
1266 /*
1267 * Generic implementation of {,p}select. Care: we type-pun uap across the two
1268 * syscalls, which differ slightly. The first 4 arguments (nfds and the fd sets)
1269 * are identical. The 5th (timeout) argument points to different types, so we
1270 * unpack in the syscall-specific code, but the generic code still does a null
1271 * check on this argument to determine if a timeout was specified.
1272 */
1273 static int
select_internal(struct proc * p,struct select_nocancel_args * uap,uint64_t timeout,int32_t * retval)1274 select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval)
1275 {
1276 struct uthread *uth = current_uthread();
1277 struct _select *sel = &uth->uu_select;
1278 struct _select_data *seldata = &uth->uu_save.uus_select_data;
1279 int error = 0;
1280 u_int ni, nw;
1281
1282 *retval = 0;
1283
1284 seldata->abstime = timeout;
1285 seldata->args = uap;
1286 seldata->retval = retval;
1287 seldata->count = 0;
1288
1289 if (uap->nd < 0) {
1290 return EINVAL;
1291 }
1292
1293 if (uap->nd > p->p_fd.fd_nfiles) {
1294 uap->nd = p->p_fd.fd_nfiles; /* forgiving; slightly wrong */
1295 }
1296 nw = howmany(uap->nd, NFDBITS);
1297 ni = nw * sizeof(fd_mask);
1298
1299 /*
1300 * if the previously allocated space for the bits is smaller than
1301 * what is requested or no space has yet been allocated for this
1302 * thread, allocate enough space now.
1303 *
1304 * Note: If this process fails, select() will return EAGAIN; this
1305 * is the same thing pool() returns in a no-memory situation, but
1306 * it is not a POSIX compliant error code for select().
1307 */
1308 if (sel->nbytes >= (3 * ni)) {
1309 select_bzero_uthread_cache(sel);
1310 } else if (!select_grow_uthread_cache(sel, 3 * ni)) {
1311 return EAGAIN;
1312 }
1313
1314 /*
1315 * get the bits from the user address space
1316 */
1317 #define getbits(name, x) \
1318 (uap->name ? copyin(uap->name, &sel->ibits[(x) * nw], ni) : 0)
1319
1320 if ((error = getbits(in, 0))) {
1321 return error;
1322 }
1323 if ((error = getbits(ou, 1))) {
1324 return error;
1325 }
1326 if ((error = getbits(ex, 2))) {
1327 return error;
1328 }
1329 #undef getbits
1330
1331 if ((error = selcount(p, sel->ibits, uap->nd, &seldata->count))) {
1332 return error;
1333 }
1334
1335 if (uth->uu_selset == NULL) {
1336 uth->uu_selset = select_set_alloc();
1337 }
1338 return selprocess(p, 0, SEL_FIRSTPASS);
1339 }
1340
1341 static int
selcontinue(int error)1342 selcontinue(int error)
1343 {
1344 return selprocess(current_proc(), error, SEL_SECONDPASS);
1345 }
1346
1347
1348 /*
1349 * selprocess
1350 *
1351 * Parameters: error The error code from our caller
1352 * sel_pass The pass we are on
1353 */
1354 int
selprocess(struct proc * p,int error,int sel_pass)1355 selprocess(struct proc *p, int error, int sel_pass)
1356 {
1357 struct uthread *uth = current_uthread();
1358 struct _select *sel = &uth->uu_select;
1359 struct _select_data *seldata = &uth->uu_save.uus_select_data;
1360 struct select_nocancel_args *uap = seldata->args;
1361 int *retval = seldata->retval;
1362
1363 int unwind = 1;
1364 int prepost = 0;
1365 int somewakeup = 0;
1366 int doretry = 0;
1367 wait_result_t wait_result;
1368
1369 if ((error != 0) && (sel_pass == SEL_FIRSTPASS)) {
1370 unwind = 0;
1371 }
1372 if (seldata->count == 0) {
1373 unwind = 0;
1374 }
1375 retry:
1376 if (error != 0) {
1377 goto done;
1378 }
1379
1380 OSBitOrAtomic(P_SELECT, &p->p_flag);
1381
1382 /* skip scans if the select is just for timeouts */
1383 if (seldata->count) {
1384 error = selscan(p, sel, seldata, uap->nd, retval, sel_pass,
1385 uth->uu_selset);
1386 if (error || *retval) {
1387 goto done;
1388 }
1389 if (prepost || somewakeup) {
1390 /*
1391 * if the select of log, then we can wakeup and
1392 * discover some one else already read the data;
1393 * go to select again if time permits
1394 */
1395 prepost = 0;
1396 somewakeup = 0;
1397 doretry = 1;
1398 }
1399 }
1400
1401 if (uap->tv) {
1402 uint64_t now;
1403
1404 clock_get_uptime(&now);
1405 if (now >= seldata->abstime) {
1406 goto done;
1407 }
1408 }
1409
1410 if (doretry) {
1411 /* cleanup obits and try again */
1412 doretry = 0;
1413 sel_pass = SEL_FIRSTPASS;
1414 goto retry;
1415 }
1416
1417 /*
1418 * To effect a poll, the timeout argument should be
1419 * non-nil, pointing to a zero-valued timeval structure.
1420 */
1421 if (uap->tv && seldata->abstime == 0) {
1422 goto done;
1423 }
1424
1425 /* No spurious wakeups due to colls,no need to check for them */
1426 if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) {
1427 sel_pass = SEL_FIRSTPASS;
1428 goto retry;
1429 }
1430
1431 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1432
1433 /* if the select is just for timeout skip check */
1434 if (seldata->count && (sel_pass == SEL_SECONDPASS)) {
1435 panic("selprocess: 2nd pass assertwaiting");
1436 }
1437
1438 wait_result = waitq_assert_wait64_leeway(uth->uu_selset,
1439 NO_EVENT64, THREAD_ABORTSAFE,
1440 TIMEOUT_URGENCY_USER_NORMAL,
1441 seldata->abstime,
1442 TIMEOUT_NO_LEEWAY);
1443 if (wait_result != THREAD_AWAKENED) {
1444 /* there are no preposted events */
1445 error = tsleep1(NULL, PSOCK | PCATCH,
1446 "select", 0, selcontinue);
1447 } else {
1448 prepost = 1;
1449 error = 0;
1450 }
1451
1452 if (error == 0) {
1453 sel_pass = SEL_SECONDPASS;
1454 if (!prepost) {
1455 somewakeup = 1;
1456 }
1457 goto retry;
1458 }
1459 done:
1460 if (unwind) {
1461 seldrop(p, sel->ibits, uap->nd, seldata->count);
1462 select_set_reset(uth->uu_selset);
1463 }
1464 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1465 /* select is not restarted after signals... */
1466 if (error == ERESTART) {
1467 error = EINTR;
1468 }
1469 if (error == EWOULDBLOCK) {
1470 error = 0;
1471 }
1472
1473 if (error == 0) {
1474 uint32_t nw = howmany(uap->nd, NFDBITS);
1475 uint32_t ni = nw * sizeof(fd_mask);
1476
1477 #define putbits(name, x) \
1478 (uap->name ? copyout(&sel->obits[(x) * nw], uap->name, ni) : 0)
1479 int e0 = putbits(in, 0);
1480 int e1 = putbits(ou, 1);
1481 int e2 = putbits(ex, 2);
1482
1483 error = e0 ?: e1 ?: e2;
1484 #undef putbits
1485 }
1486
1487 if (error != EINTR && sel_pass == SEL_SECONDPASS && uth->uu_flag & UT_SAS_OLDMASK) {
1488 /* restore signal mask - continuation case */
1489 uth->uu_sigmask = uth->uu_oldmask;
1490 uth->uu_oldmask = 0;
1491 uth->uu_flag &= ~UT_SAS_OLDMASK;
1492 }
1493
1494 return error;
1495 }
1496
1497
1498 /**
1499 * remove the fileproc's underlying waitq from the supplied waitq set;
1500 * clear FP_INSELECT when appropriate
1501 *
1502 * Parameters:
1503 * fp File proc that is potentially currently in select
1504 * selset Waitq set to which the fileproc may belong
1505 * (usually this is the thread's private waitq set)
1506 * Conditions:
1507 * proc_fdlock is held
1508 */
1509 static void
selunlinkfp(struct fileproc * fp,struct select_set * selset)1510 selunlinkfp(struct fileproc *fp, struct select_set *selset)
1511 {
1512 if (fp->fp_flags & FP_INSELECT) {
1513 if (fp->fp_guard_attrs) {
1514 if (fp->fp_guard->fpg_wset == selset) {
1515 fp->fp_guard->fpg_wset = NULL;
1516 fp->fp_flags &= ~FP_INSELECT;
1517 }
1518 } else {
1519 if (fp->fp_wset == selset) {
1520 fp->fp_wset = NULL;
1521 fp->fp_flags &= ~FP_INSELECT;
1522 }
1523 }
1524 }
1525 }
1526
1527 /**
1528 * connect a fileproc to the given selset, potentially bridging to a waitq
1529 * pointed to indirectly by wq_data
1530 *
1531 * Parameters:
1532 * fp File proc potentially currently in select
1533 * selset Waitq set to which the fileproc should now belong
1534 * (usually this is the thread's private waitq set)
1535 *
1536 * Conditions:
1537 * proc_fdlock is held
1538 */
1539 static void
sellinkfp(struct fileproc * fp,struct select_set * selset,waitq_link_t * linkp)1540 sellinkfp(struct fileproc *fp, struct select_set *selset, waitq_link_t *linkp)
1541 {
1542 if ((fp->fp_flags & FP_INSELECT) == 0) {
1543 if (fp->fp_guard_attrs) {
1544 fp->fp_guard->fpg_wset = selset;
1545 } else {
1546 fp->fp_wset = selset;
1547 }
1548 fp->fp_flags |= FP_INSELECT;
1549 } else {
1550 fp->fp_flags |= FP_SELCONFLICT;
1551 if (linkp->wqlh == NULL) {
1552 *linkp = waitq_link_alloc(WQT_SELECT_SET);
1553 }
1554 select_set_link(&select_conflict_queue, selset, linkp);
1555 }
1556 }
1557
1558
1559 /*
1560 * selscan
1561 *
1562 * Parameters: p Process performing the select
1563 * sel The per-thread select context structure
1564 * nfd The number of file descriptors to scan
1565 * retval The per thread system call return area
1566 * sel_pass Which pass this is; allowed values are
1567 * SEL_FIRSTPASS and SEL_SECONDPASS
1568 * selset The per thread wait queue set
1569 *
1570 * Returns: 0 Success
1571 * EIO Invalid p->p_fd field XXX Obsolete?
1572 * EBADF One of the files in the bit vector is
1573 * invalid.
1574 */
1575 static int
selscan(struct proc * p,struct _select * sel,struct _select_data * seldata,int nfd,int32_t * retval,int sel_pass,struct select_set * selset)1576 selscan(struct proc *p, struct _select *sel, struct _select_data * seldata,
1577 int nfd, int32_t *retval, int sel_pass, struct select_set *selset)
1578 {
1579 int msk, i, j, fd;
1580 u_int32_t bits;
1581 struct fileproc *fp;
1582 int n = 0; /* count of bits */
1583 int nc = 0; /* bit vector offset (nc'th bit) */
1584 static int flag[3] = { FREAD, FWRITE, 0 };
1585 u_int32_t *iptr, *optr;
1586 u_int nw;
1587 u_int32_t *ibits, *obits;
1588 int count;
1589 struct vfs_context context = {
1590 .vc_thread = current_thread(),
1591 };
1592 waitq_link_t link = WQL_NULL;
1593 void *s_data;
1594
1595 ibits = sel->ibits;
1596 obits = sel->obits;
1597
1598 nw = howmany(nfd, NFDBITS);
1599
1600 count = seldata->count;
1601
1602 nc = 0;
1603 if (!count) {
1604 *retval = 0;
1605 return 0;
1606 }
1607
1608 if (sel_pass == SEL_FIRSTPASS) {
1609 /*
1610 * Make sure the waitq-set is all clean:
1611 *
1612 * select loops until it finds at least one event, however it
1613 * doesn't mean that the event that woke up select is still
1614 * fired by the time the second pass runs, and then
1615 * select_internal will loop back to a first pass.
1616 */
1617 select_set_reset(selset);
1618 s_data = &link;
1619 } else {
1620 s_data = NULL;
1621 }
1622
1623 proc_fdlock(p);
1624 for (msk = 0; msk < 3; msk++) {
1625 iptr = (u_int32_t *)&ibits[msk * nw];
1626 optr = (u_int32_t *)&obits[msk * nw];
1627
1628 for (i = 0; i < nfd; i += NFDBITS) {
1629 bits = iptr[i / NFDBITS];
1630
1631 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1632 bits &= ~(1U << j);
1633
1634 fp = fp_get_noref_locked(p, fd);
1635 if (fp == NULL) {
1636 /*
1637 * If we abort because of a bad
1638 * fd, let the caller unwind...
1639 */
1640 proc_fdunlock(p);
1641 return EBADF;
1642 }
1643 if (sel_pass == SEL_SECONDPASS) {
1644 selunlinkfp(fp, selset);
1645 } else if (link.wqlh == NULL) {
1646 link = waitq_link_alloc(WQT_SELECT_SET);
1647 }
1648
1649 context.vc_ucred = fp->f_cred;
1650
1651 /* The select; set the bit, if true */
1652 if (fo_select(fp, flag[msk], s_data, &context)) {
1653 optr[fd / NFDBITS] |= (1U << (fd % NFDBITS));
1654 n++;
1655 }
1656 if (sel_pass == SEL_FIRSTPASS) {
1657 /*
1658 * Hook up the thread's waitq set either to
1659 * the fileproc structure, or to the global
1660 * conflict queue: but only on the first
1661 * select pass.
1662 */
1663 sellinkfp(fp, selset, &link);
1664 }
1665 nc++;
1666 }
1667 }
1668 }
1669 proc_fdunlock(p);
1670
1671 if (link.wqlh) {
1672 waitq_link_free(WQT_SELECT_SET, link);
1673 }
1674
1675 *retval = n;
1676 return 0;
1677 }
1678
1679 static int poll_callback(struct kevent_qos_s *, kevent_ctx_t);
1680
1681 int
poll(struct proc * p,struct poll_args * uap,int32_t * retval)1682 poll(struct proc *p, struct poll_args *uap, int32_t *retval)
1683 {
1684 __pthread_testcancel(1);
1685 return poll_nocancel(p, (struct poll_nocancel_args *)uap, retval);
1686 }
1687
1688
1689 int
poll_nocancel(struct proc * p,struct poll_nocancel_args * uap,int32_t * retval)1690 poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval)
1691 {
1692 struct pollfd *fds = NULL;
1693 struct kqueue *kq = NULL;
1694 int error = 0;
1695 u_int nfds = uap->nfds;
1696 u_int rfds = 0;
1697 rlim_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE);
1698 size_t ni = nfds * sizeof(struct pollfd);
1699
1700 /*
1701 * This is kinda bogus. We have fd limits, but that is not
1702 * really related to the size of the pollfd array. Make sure
1703 * we let the process use at least FD_SETSIZE entries and at
1704 * least enough for the current limits. We want to be reasonably
1705 * safe, but not overly restrictive.
1706 */
1707 if (nfds > OPEN_MAX ||
1708 (nfds > nofile && (proc_suser(p) || nfds > FD_SETSIZE))) {
1709 return EINVAL;
1710 }
1711
1712 kq = kqueue_alloc(p);
1713 if (kq == NULL) {
1714 return EAGAIN;
1715 }
1716
1717 if (nfds) {
1718 fds = (struct pollfd *)kalloc_data(ni, Z_WAITOK);
1719 if (NULL == fds) {
1720 error = EAGAIN;
1721 goto out;
1722 }
1723
1724 error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd));
1725 if (error) {
1726 goto out;
1727 }
1728 }
1729
1730 /* JMM - all this P_SELECT stuff is bogus */
1731 OSBitOrAtomic(P_SELECT, &p->p_flag);
1732 for (u_int i = 0; i < nfds; i++) {
1733 short events = fds[i].events;
1734 __assert_only int rc;
1735
1736 /* per spec, ignore fd values below zero */
1737 if (fds[i].fd < 0) {
1738 fds[i].revents = 0;
1739 continue;
1740 }
1741
1742 /* convert the poll event into a kqueue kevent */
1743 struct kevent_qos_s kev = {
1744 .ident = fds[i].fd,
1745 .flags = EV_ADD | EV_ONESHOT | EV_POLL,
1746 .udata = i, /* Index into pollfd array */
1747 };
1748
1749 /* Handle input events */
1750 if (events & (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP)) {
1751 kev.filter = EVFILT_READ;
1752 if (events & (POLLPRI | POLLRDBAND)) {
1753 kev.flags |= EV_OOBAND;
1754 }
1755 rc = kevent_register(kq, &kev, NULL);
1756 assert((rc & FILTER_REGISTER_WAIT) == 0);
1757 }
1758
1759 /* Handle output events */
1760 if ((kev.flags & EV_ERROR) == 0 &&
1761 (events & (POLLOUT | POLLWRNORM | POLLWRBAND))) {
1762 kev.filter = EVFILT_WRITE;
1763 rc = kevent_register(kq, &kev, NULL);
1764 assert((rc & FILTER_REGISTER_WAIT) == 0);
1765 }
1766
1767 /* Handle BSD extension vnode events */
1768 if ((kev.flags & EV_ERROR) == 0 &&
1769 (events & (POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE))) {
1770 kev.filter = EVFILT_VNODE;
1771 kev.fflags = 0;
1772 if (events & POLLEXTEND) {
1773 kev.fflags |= NOTE_EXTEND;
1774 }
1775 if (events & POLLATTRIB) {
1776 kev.fflags |= NOTE_ATTRIB;
1777 }
1778 if (events & POLLNLINK) {
1779 kev.fflags |= NOTE_LINK;
1780 }
1781 if (events & POLLWRITE) {
1782 kev.fflags |= NOTE_WRITE;
1783 }
1784 rc = kevent_register(kq, &kev, NULL);
1785 assert((rc & FILTER_REGISTER_WAIT) == 0);
1786 }
1787
1788 if (kev.flags & EV_ERROR) {
1789 fds[i].revents = POLLNVAL;
1790 rfds++;
1791 } else {
1792 fds[i].revents = 0;
1793 }
1794 }
1795
1796 /*
1797 * Did we have any trouble registering?
1798 * If user space passed 0 FDs, then respect any timeout value passed.
1799 * This is an extremely inefficient sleep. If user space passed one or
1800 * more FDs, and we had trouble registering _all_ of them, then bail
1801 * out. If a subset of the provided FDs failed to register, then we
1802 * will still call the kqueue_scan function.
1803 */
1804 if (nfds && (rfds == nfds)) {
1805 goto done;
1806 }
1807
1808 /* scan for, and possibly wait for, the kevents to trigger */
1809 kevent_ctx_t kectx = kevent_get_context(current_thread());
1810 *kectx = (struct kevent_ctx_s){
1811 .kec_process_noutputs = rfds,
1812 .kec_process_flags = KEVENT_FLAG_POLL,
1813 .kec_deadline = 0, /* wait forever */
1814 .kec_poll_fds = fds,
1815 };
1816
1817 /*
1818 * If any events have trouble registering, an event has fired and we
1819 * shouldn't wait for events in kqueue_scan.
1820 */
1821 if (rfds) {
1822 kectx->kec_process_flags |= KEVENT_FLAG_IMMEDIATE;
1823 } else if (uap->timeout != -1) {
1824 clock_interval_to_deadline(uap->timeout, NSEC_PER_MSEC,
1825 &kectx->kec_deadline);
1826 }
1827
1828 error = kqueue_scan(kq, kectx->kec_process_flags, kectx, poll_callback);
1829 rfds = kectx->kec_process_noutputs;
1830
1831 done:
1832 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1833 /* poll is not restarted after signals... */
1834 if (error == ERESTART) {
1835 error = EINTR;
1836 }
1837 if (error == 0) {
1838 error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd));
1839 *retval = rfds;
1840 }
1841
1842 out:
1843 kfree_data(fds, ni);
1844
1845 kqueue_dealloc(kq);
1846 return error;
1847 }
1848
1849 static int
poll_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)1850 poll_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
1851 {
1852 assert(kectx->kec_process_flags & KEVENT_FLAG_POLL);
1853 struct pollfd *fds = &kectx->kec_poll_fds[kevp->udata];
1854
1855 short prev_revents = fds->revents;
1856 short mask = 0;
1857
1858 /* convert the results back into revents */
1859 if (kevp->flags & EV_EOF) {
1860 fds->revents |= POLLHUP;
1861 }
1862 if (kevp->flags & EV_ERROR) {
1863 fds->revents |= POLLERR;
1864 }
1865
1866 switch (kevp->filter) {
1867 case EVFILT_READ:
1868 if (fds->revents & POLLHUP) {
1869 mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND);
1870 } else {
1871 mask = (POLLIN | POLLRDNORM);
1872 if (kevp->flags & EV_OOBAND) {
1873 mask |= (POLLPRI | POLLRDBAND);
1874 }
1875 }
1876 fds->revents |= (fds->events & mask);
1877 break;
1878
1879 case EVFILT_WRITE:
1880 if (!(fds->revents & POLLHUP)) {
1881 fds->revents |= (fds->events & (POLLOUT | POLLWRNORM | POLLWRBAND));
1882 }
1883 break;
1884
1885 case EVFILT_VNODE:
1886 if (kevp->fflags & NOTE_EXTEND) {
1887 fds->revents |= (fds->events & POLLEXTEND);
1888 }
1889 if (kevp->fflags & NOTE_ATTRIB) {
1890 fds->revents |= (fds->events & POLLATTRIB);
1891 }
1892 if (kevp->fflags & NOTE_LINK) {
1893 fds->revents |= (fds->events & POLLNLINK);
1894 }
1895 if (kevp->fflags & NOTE_WRITE) {
1896 fds->revents |= (fds->events & POLLWRITE);
1897 }
1898 break;
1899 }
1900
1901 if (fds->revents != 0 && prev_revents == 0) {
1902 kectx->kec_process_noutputs++;
1903 }
1904
1905 return 0;
1906 }
1907
1908 int
seltrue(__unused dev_t dev,__unused int flag,__unused struct proc * p)1909 seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p)
1910 {
1911 return 1;
1912 }
1913
1914 /*
1915 * selcount
1916 *
1917 * Count the number of bits set in the input bit vector, and establish an
1918 * outstanding fp->fp_iocount for each of the descriptors which will be in
1919 * use in the select operation.
1920 *
1921 * Parameters: p The process doing the select
1922 * ibits The input bit vector
1923 * nfd The number of fd's in the vector
1924 * countp Pointer to where to store the bit count
1925 *
1926 * Returns: 0 Success
1927 * EIO Bad per process open file table
1928 * EBADF One of the bits in the input bit vector
1929 * references an invalid fd
1930 *
1931 * Implicit: *countp (modified) Count of fd's
1932 *
1933 * Notes: This function is the first pass under the proc_fdlock() that
1934 * permits us to recognize invalid descriptors in the bit vector;
1935 * the may, however, not remain valid through the drop and
1936 * later reacquisition of the proc_fdlock().
1937 */
1938 static int
selcount(struct proc * p,u_int32_t * ibits,int nfd,int * countp)1939 selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp)
1940 {
1941 int msk, i, j, fd;
1942 u_int32_t bits;
1943 struct fileproc *fp;
1944 int n = 0;
1945 u_int32_t *iptr;
1946 u_int nw;
1947 int error = 0;
1948 int need_wakeup = 0;
1949
1950 nw = howmany(nfd, NFDBITS);
1951
1952 proc_fdlock(p);
1953 for (msk = 0; msk < 3; msk++) {
1954 iptr = (u_int32_t *)&ibits[msk * nw];
1955 for (i = 0; i < nfd; i += NFDBITS) {
1956 bits = iptr[i / NFDBITS];
1957 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1958 bits &= ~(1U << j);
1959
1960 fp = fp_get_noref_locked(p, fd);
1961 if (fp == NULL) {
1962 *countp = 0;
1963 error = EBADF;
1964 goto bad;
1965 }
1966 os_ref_retain_locked(&fp->fp_iocount);
1967 n++;
1968 }
1969 }
1970 }
1971 proc_fdunlock(p);
1972
1973 *countp = n;
1974 return 0;
1975
1976 bad:
1977 if (n == 0) {
1978 goto out;
1979 }
1980 /* Ignore error return; it's already EBADF */
1981 (void)seldrop_locked(p, ibits, nfd, n, &need_wakeup);
1982
1983 out:
1984 proc_fdunlock(p);
1985 if (need_wakeup) {
1986 wakeup(&p->p_fd.fd_fpdrainwait);
1987 }
1988 return error;
1989 }
1990
1991
1992 /*
1993 * seldrop_locked
1994 *
1995 * Drop outstanding wait queue references set up during selscan(); drop the
1996 * outstanding per fileproc fp_iocount picked up during the selcount().
1997 *
1998 * Parameters: p Process performing the select
1999 * ibits Input bit bector of fd's
2000 * nfd Number of fd's
2001 * lim Limit to number of vector entries to
2002 * consider, or -1 for "all"
2003 * inselect True if
2004 * need_wakeup Pointer to flag to set to do a wakeup
2005 * if f_iocont on any descriptor goes to 0
2006 *
2007 * Returns: 0 Success
2008 * EBADF One or more fds in the bit vector
2009 * were invalid, but the rest
2010 * were successfully dropped
2011 *
2012 * Notes: An fd make become bad while the proc_fdlock() is not held,
2013 * if a multithreaded application closes the fd out from under
2014 * the in progress select. In this case, we still have to
2015 * clean up after the set up on the remaining fds.
2016 */
2017 static int
seldrop_locked(struct proc * p,u_int32_t * ibits,int nfd,int lim,int * need_wakeup)2018 seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup)
2019 {
2020 int msk, i, j, nc, fd;
2021 u_int32_t bits;
2022 struct fileproc *fp;
2023 u_int32_t *iptr;
2024 u_int nw;
2025 int error = 0;
2026 uthread_t uth = current_uthread();
2027 struct _select_data *seldata;
2028
2029 *need_wakeup = 0;
2030
2031 nw = howmany(nfd, NFDBITS);
2032 seldata = &uth->uu_save.uus_select_data;
2033
2034 nc = 0;
2035 for (msk = 0; msk < 3; msk++) {
2036 iptr = (u_int32_t *)&ibits[msk * nw];
2037 for (i = 0; i < nfd; i += NFDBITS) {
2038 bits = iptr[i / NFDBITS];
2039 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
2040 bits &= ~(1U << j);
2041 /*
2042 * If we've already dropped as many as were
2043 * counted/scanned, then we are done.
2044 */
2045 if (nc >= lim) {
2046 goto done;
2047 }
2048
2049 /*
2050 * We took an I/O reference in selcount,
2051 * so the fp can't possibly be NULL.
2052 */
2053 fp = fp_get_noref_locked_with_iocount(p, fd);
2054 selunlinkfp(fp, uth->uu_selset);
2055
2056 nc++;
2057
2058 const os_ref_count_t refc = os_ref_release_locked(&fp->fp_iocount);
2059 if (0 == refc) {
2060 panic("fp_iocount overdecrement!");
2061 }
2062
2063 if (1 == refc) {
2064 /*
2065 * The last iocount is responsible for clearing
2066 * selconfict flag - even if we didn't set it -
2067 * and is also responsible for waking up anyone
2068 * waiting on iocounts to drain.
2069 */
2070 if (fp->fp_flags & FP_SELCONFLICT) {
2071 fp->fp_flags &= ~FP_SELCONFLICT;
2072 }
2073 if (p->p_fd.fd_fpdrainwait) {
2074 p->p_fd.fd_fpdrainwait = 0;
2075 *need_wakeup = 1;
2076 }
2077 }
2078 }
2079 }
2080 }
2081 done:
2082 return error;
2083 }
2084
2085
2086 static int
seldrop(struct proc * p,u_int32_t * ibits,int nfd,int lim)2087 seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim)
2088 {
2089 int error;
2090 int need_wakeup = 0;
2091
2092 proc_fdlock(p);
2093 error = seldrop_locked(p, ibits, nfd, lim, &need_wakeup);
2094 proc_fdunlock(p);
2095 if (need_wakeup) {
2096 wakeup(&p->p_fd.fd_fpdrainwait);
2097 }
2098 return error;
2099 }
2100
2101 /*
2102 * Record a select request.
2103 */
2104 void
selrecord(__unused struct proc * selector,struct selinfo * sip,void * s_data)2105 selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data)
2106 {
2107 struct select_set *selset = current_uthread()->uu_selset;
2108
2109 /* do not record if this is second pass of select */
2110 if (!s_data) {
2111 return;
2112 }
2113
2114 if (selset == SELSPEC_RECORD_MARKER) {
2115 /*
2116 * The kevent subsystem is trying to sniff
2117 * the selinfo::si_note to attach to.
2118 */
2119 ((selspec_record_hook_t)s_data)(sip);
2120 } else {
2121 waitq_link_t *linkp = s_data;
2122
2123 if (!waitq_is_valid(&sip->si_waitq)) {
2124 waitq_init(&sip->si_waitq, WQT_SELECT, SYNC_POLICY_FIFO);
2125 }
2126
2127 /* note: this checks for pre-existing linkage */
2128 select_set_link(&sip->si_waitq, selset, linkp);
2129 }
2130 }
2131
2132 static void
selwakeup_internal(struct selinfo * sip,long hint,wait_result_t wr)2133 selwakeup_internal(struct selinfo *sip, long hint, wait_result_t wr)
2134 {
2135 if (sip->si_flags & SI_SELSPEC) {
2136 /*
2137 * The "primitive" lock is held.
2138 * The knote lock is not held.
2139 *
2140 * All knotes will transition their kn_hook to NULL and we will
2141 * reeinitialize the primitive's klist
2142 */
2143 lck_spin_lock(&selspec_lock);
2144 knote(&sip->si_note, hint, /*autodetach=*/ true);
2145 lck_spin_unlock(&selspec_lock);
2146 sip->si_flags &= ~SI_SELSPEC;
2147 }
2148
2149 /*
2150 * After selrecord() has been called, selinfo owners must call
2151 * at least one of selwakeup() or selthreadclear().
2152 *
2153 * Use this opportunity to deinit the waitq
2154 * so that all linkages are garbage collected
2155 * in a combined wakeup-all + unlink + deinit call.
2156 */
2157 select_waitq_wakeup_and_deinit(&sip->si_waitq, NO_EVENT64, wr);
2158 }
2159
2160
2161 void
selwakeup(struct selinfo * sip)2162 selwakeup(struct selinfo *sip)
2163 {
2164 selwakeup_internal(sip, 0, THREAD_AWAKENED);
2165 }
2166
2167 void
selthreadclear(struct selinfo * sip)2168 selthreadclear(struct selinfo *sip)
2169 {
2170 selwakeup_internal(sip, NOTE_REVOKE, THREAD_RESTART);
2171 }
2172
2173
2174 /*
2175 * gethostuuid
2176 *
2177 * Description: Get the host UUID from IOKit and return it to user space.
2178 *
2179 * Parameters: uuid_buf Pointer to buffer to receive UUID
2180 * timeout Timespec for timout
2181 *
2182 * Returns: 0 Success
2183 * EWOULDBLOCK Timeout is too short
2184 * copyout:EFAULT Bad user buffer
2185 * mac_system_check_info:EPERM Client not allowed to perform this operation
2186 *
2187 * Notes: A timeout seems redundant, since if it's tolerable to not
2188 * have a system UUID in hand, then why ask for one?
2189 */
2190 int
gethostuuid(struct proc * p,struct gethostuuid_args * uap,__unused int32_t * retval)2191 gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval)
2192 {
2193 kern_return_t kret;
2194 int error;
2195 mach_timespec_t mach_ts; /* for IOKit call */
2196 __darwin_uuid_t uuid_kern = {}; /* for IOKit call */
2197
2198 /* Check entitlement */
2199 if (!IOCurrentTaskHasEntitlement("com.apple.private.getprivatesysid")) {
2200 #if !defined(XNU_TARGET_OS_OSX)
2201 #if CONFIG_MACF
2202 if ((error = mac_system_check_info(kauth_cred_get(), "hw.uuid")) != 0) {
2203 /* EPERM invokes userspace upcall if present */
2204 return error;
2205 }
2206 #endif
2207 #endif
2208 }
2209
2210 /* Convert the 32/64 bit timespec into a mach_timespec_t */
2211 if (proc_is64bit(p)) {
2212 struct user64_timespec ts;
2213 error = copyin(uap->timeoutp, &ts, sizeof(ts));
2214 if (error) {
2215 return error;
2216 }
2217 mach_ts.tv_sec = (unsigned int)ts.tv_sec;
2218 mach_ts.tv_nsec = (clock_res_t)ts.tv_nsec;
2219 } else {
2220 struct user32_timespec ts;
2221 error = copyin(uap->timeoutp, &ts, sizeof(ts));
2222 if (error) {
2223 return error;
2224 }
2225 mach_ts.tv_sec = ts.tv_sec;
2226 mach_ts.tv_nsec = ts.tv_nsec;
2227 }
2228
2229 /* Call IOKit with the stack buffer to get the UUID */
2230 kret = IOBSDGetPlatformUUID(uuid_kern, mach_ts);
2231
2232 /*
2233 * If we get it, copy out the data to the user buffer; note that a
2234 * uuid_t is an array of characters, so this is size invariant for
2235 * 32 vs. 64 bit.
2236 */
2237 if (kret == KERN_SUCCESS) {
2238 error = copyout(uuid_kern, uap->uuid_buf, sizeof(uuid_kern));
2239 } else {
2240 error = EWOULDBLOCK;
2241 }
2242
2243 return error;
2244 }
2245
2246 /*
2247 * ledger
2248 *
2249 * Description: Omnibus system call for ledger operations
2250 */
2251 int
ledger(struct proc * p,struct ledger_args * args,__unused int32_t * retval)2252 ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval)
2253 {
2254 #if !CONFIG_MACF
2255 #pragma unused(p)
2256 #endif
2257 int rval, pid, len, error;
2258 #ifdef LEDGER_DEBUG
2259 struct ledger_limit_args lla;
2260 #endif
2261 task_t task;
2262 proc_t proc;
2263
2264 /* Finish copying in the necessary args before taking the proc lock */
2265 error = 0;
2266 len = 0;
2267 if (args->cmd == LEDGER_ENTRY_INFO) {
2268 error = copyin(args->arg3, (char *)&len, sizeof(len));
2269 } else if (args->cmd == LEDGER_TEMPLATE_INFO) {
2270 error = copyin(args->arg2, (char *)&len, sizeof(len));
2271 } else if (args->cmd == LEDGER_LIMIT)
2272 #ifdef LEDGER_DEBUG
2273 { error = copyin(args->arg2, (char *)&lla, sizeof(lla));}
2274 #else
2275 { return EINVAL; }
2276 #endif
2277 else if ((args->cmd < 0) || (args->cmd > LEDGER_MAX_CMD)) {
2278 return EINVAL;
2279 }
2280
2281 if (error) {
2282 return error;
2283 }
2284 if (len < 0) {
2285 return EINVAL;
2286 }
2287
2288 rval = 0;
2289 if (args->cmd != LEDGER_TEMPLATE_INFO) {
2290 pid = (int)args->arg1;
2291 proc = proc_find(pid);
2292 if (proc == NULL) {
2293 return ESRCH;
2294 }
2295
2296 #if CONFIG_MACF
2297 error = mac_proc_check_ledger(p, proc, args->cmd);
2298 if (error) {
2299 proc_rele(proc);
2300 return error;
2301 }
2302 #endif
2303
2304 task = proc_task(proc);
2305 }
2306
2307 switch (args->cmd) {
2308 #ifdef LEDGER_DEBUG
2309 case LEDGER_LIMIT: {
2310 if (!kauth_cred_issuser(kauth_cred_get())) {
2311 rval = EPERM;
2312 }
2313 rval = ledger_limit(task, &lla);
2314 proc_rele(proc);
2315 break;
2316 }
2317 #endif
2318 case LEDGER_INFO: {
2319 struct ledger_info info = {};
2320
2321 rval = ledger_info(task, &info);
2322 proc_rele(proc);
2323 if (rval == 0) {
2324 rval = copyout(&info, args->arg2,
2325 sizeof(info));
2326 }
2327 break;
2328 }
2329
2330 case LEDGER_ENTRY_INFO: {
2331 void *buf;
2332 int sz;
2333
2334 /* Settle ledger entries for memorystatus and pages grabbed */
2335 task_ledger_settle(task);
2336
2337 rval = ledger_get_task_entry_info_multiple(task, &buf, &len);
2338 proc_rele(proc);
2339 if ((rval == 0) && (len >= 0)) {
2340 sz = len * sizeof(struct ledger_entry_info);
2341 rval = copyout(buf, args->arg2, sz);
2342 kfree_data(buf, sz);
2343 }
2344 if (rval == 0) {
2345 rval = copyout(&len, args->arg3, sizeof(len));
2346 }
2347 break;
2348 }
2349
2350 case LEDGER_TEMPLATE_INFO: {
2351 void *buf;
2352 int sz;
2353
2354 rval = ledger_template_info(&buf, &len);
2355 if ((rval == 0) && (len >= 0)) {
2356 sz = len * sizeof(struct ledger_template_info);
2357 rval = copyout(buf, args->arg1, sz);
2358 kfree_data(buf, sz);
2359 }
2360 if (rval == 0) {
2361 rval = copyout(&len, args->arg2, sizeof(len));
2362 }
2363 break;
2364 }
2365
2366 default:
2367 panic("ledger syscall logic error -- command type %d", args->cmd);
2368 proc_rele(proc);
2369 rval = EINVAL;
2370 }
2371
2372 return rval;
2373 }
2374
2375 int
telemetry(__unused struct proc * p,struct telemetry_args * args,__unused int32_t * retval)2376 telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t *retval)
2377 {
2378 int error = 0;
2379
2380 switch (args->cmd) {
2381 #if CONFIG_TELEMETRY
2382 case TELEMETRY_CMD_TIMER_EVENT:
2383 error = ENOTSUP;
2384 break;
2385 case TELEMETRY_CMD_PMI_SETUP:
2386 error = telemetry_pmi_setup((enum telemetry_pmi)args->deadline, args->interval);
2387 break;
2388 #endif /* CONFIG_TELEMETRY */
2389 case TELEMETRY_CMD_VOUCHER_NAME:
2390 if (thread_set_voucher_name((mach_port_name_t)args->deadline)) {
2391 error = EINVAL;
2392 }
2393 break;
2394
2395 default:
2396 error = EINVAL;
2397 break;
2398 }
2399
2400 return error;
2401 }
2402
2403 /*
2404 * Logging
2405 *
2406 * Description: syscall to access kernel logging from userspace
2407 *
2408 * Args:
2409 * tag - used for syncing with userspace on the version.
2410 * flags - flags used by the syscall.
2411 * buffer - userspace address of string to copy.
2412 * size - size of buffer.
2413 */
2414 int
log_data(__unused struct proc * p,struct log_data_args * args,int * retval)2415 log_data(__unused struct proc *p, struct log_data_args *args, int *retval)
2416 {
2417 unsigned int tag = args->tag;
2418 unsigned int flags = args->flags;
2419 user_addr_t buffer = args->buffer;
2420 unsigned int size = args->size;
2421 int ret = 0;
2422 *retval = 0;
2423
2424 /* Only DEXTs are suppose to use this syscall. */
2425 if (!task_is_driver(current_task())) {
2426 return EPERM;
2427 }
2428
2429 /*
2430 * Tag synchronize the syscall version with userspace.
2431 * Tag == 0 => flags == OS_LOG_TYPE
2432 */
2433 if (tag != 0) {
2434 return EINVAL;
2435 }
2436
2437 /*
2438 * OS_LOG_TYPE are defined in libkern/os/log.h
2439 * In userspace they are defined in libtrace/os/log.h
2440 */
2441 if (flags != OS_LOG_TYPE_DEFAULT &&
2442 flags != OS_LOG_TYPE_INFO &&
2443 flags != OS_LOG_TYPE_DEBUG &&
2444 flags != OS_LOG_TYPE_ERROR &&
2445 flags != OS_LOG_TYPE_FAULT) {
2446 return EINVAL;
2447 }
2448
2449 if (size == 0) {
2450 return EINVAL;
2451 }
2452
2453 /* truncate to OS_LOG_DATA_MAX_SIZE */
2454 if (size > OS_LOG_DATA_MAX_SIZE) {
2455 size = OS_LOG_DATA_MAX_SIZE;
2456 }
2457
2458 char *log_msg = (char *)kalloc_data(size, Z_WAITOK);
2459 if (!log_msg) {
2460 return ENOMEM;
2461 }
2462
2463 if (copyin(buffer, log_msg, size) != 0) {
2464 ret = EFAULT;
2465 goto out;
2466 }
2467 log_msg[size - 1] = '\0';
2468
2469 /*
2470 * This will log to dmesg and logd.
2471 * The call will fail if the current
2472 * process is not a driverKit process.
2473 */
2474 os_log_driverKit(&ret, OS_LOG_DEFAULT, (os_log_type_t)flags, "%s", log_msg);
2475
2476 out:
2477 if (log_msg != NULL) {
2478 kfree_data(log_msg, size);
2479 }
2480
2481 return ret;
2482 }
2483
2484 /*
2485 * Coprocessor logging
2486 *
2487 * Description: syscall to access kernel coprocessor logging from userspace
2488 *
2489 * Args:
2490 * buff - userspace address of string to copy.
2491 * buff_len - size of buffer.
2492 * type - log type/level
2493 * uuid - log source identifier
2494 * timestamp - log timestamp
2495 * offset - log format offset
2496 * stream_log - flag indicating stream
2497 */
2498 int
oslog_coproc(__unused struct proc * p,struct oslog_coproc_args * args,int * retval)2499 oslog_coproc(__unused struct proc *p, struct oslog_coproc_args *args, int *retval)
2500 {
2501 user_addr_t buff = args->buff;
2502 uint64_t buff_len = args->buff_len;
2503 uint32_t type = args->type;
2504 user_addr_t uuid = args->uuid;
2505 uint64_t timestamp = args->timestamp;
2506 uint32_t offset = args->offset;
2507 uint32_t stream_log = args->stream_log;
2508 char *log_buff = NULL;
2509 uuid_t log_uuid;
2510
2511 int ret = 0;
2512 *retval = 0;
2513
2514 const task_t __single task = proc_task(p);
2515 if (task == NULL || !IOTaskHasEntitlement(task, "com.apple.private.coprocessor-logging")) {
2516 return EPERM;
2517 }
2518
2519 /* Only DEXTs are supposed to use this syscall. */
2520 if (!task_is_driver(current_task())) {
2521 return EPERM;
2522 }
2523
2524 // the full message contains a 32 bit offset value, 16 byte uuid and then the provided buffer
2525 // entire message needs to fit within OS_LOG_BUFFER_MAX_SIZE
2526 // this is reflected in `log.c:os_log_coprocessor`
2527 uint64_t full_len;
2528 if (os_add_overflow(buff_len, sizeof(uuid_t) + sizeof(uint32_t), &full_len) || full_len > OS_LOG_BUFFER_MAX_SIZE) {
2529 return ERANGE;
2530 }
2531
2532 log_buff = (char *)kalloc_data(buff_len, Z_WAITOK);
2533 if (!log_buff) {
2534 return ENOMEM;
2535 }
2536
2537 ret = copyin(buff, log_buff, buff_len);
2538 if (ret) {
2539 goto out;
2540 }
2541
2542 ret = copyin(uuid, &log_uuid, sizeof(uuid_t));
2543 if (ret) {
2544 goto out;
2545 }
2546
2547 os_log_coprocessor(log_buff, buff_len, (os_log_type_t)type, (char *)log_uuid, timestamp, offset, stream_log);
2548
2549 out:
2550 if (log_buff != NULL) {
2551 kfree_data(log_buff, buff_len);
2552 }
2553
2554 return ret;
2555 }
2556
2557 /*
2558 * Coprocessor logging registration
2559 *
2560 * Description: syscall to access kernel coprocessor logging registration from userspace
2561 *
2562 * Args:
2563 * uuid - coprocessor fw uuid to harvest
2564 * file_path - file name for logd to harvest from
2565 * file_path_len - file name length
2566 */
2567 int
oslog_coproc_reg(__unused struct proc * p,struct oslog_coproc_reg_args * args,int * retval)2568 oslog_coproc_reg(__unused struct proc *p, struct oslog_coproc_reg_args *args, int *retval)
2569 {
2570 user_addr_t uuid = args->uuid;
2571 user_addr_t file_path = args->file_path;
2572 size_t file_path_len = args->file_path_len;
2573 char *file_path_buf = NULL;
2574 uuid_t uuid_buf;
2575
2576 int ret = 0;
2577 *retval = 0;
2578
2579 const task_t __single task = proc_task(p);
2580 if (task == NULL || !IOTaskHasEntitlement(task, "com.apple.private.coprocessor-logging")) {
2581 return EPERM;
2582 }
2583
2584 /* Only DEXTs are supposed to use this syscall. */
2585 if (!task_is_driver(current_task())) {
2586 return EPERM;
2587 }
2588
2589 if (file_path_len > PATH_MAX) {
2590 return EINVAL;
2591 }
2592
2593 file_path_buf = (char *)kalloc_data(file_path_len, Z_WAITOK);
2594 if (!file_path_buf) {
2595 return ENOMEM;
2596 }
2597
2598 ret = copyin(file_path, file_path_buf, file_path_len);
2599 if (ret) {
2600 goto out;
2601 }
2602
2603 ret = copyin(uuid, &uuid_buf, sizeof(uuid_t));
2604 if (ret) {
2605 goto out;
2606 }
2607
2608 os_log_coprocessor_register_with_type((char *)uuid_buf, file_path_buf, os_log_coproc_register_harvest_fs_ftab);
2609
2610 out:
2611 if (file_path_buf != NULL) {
2612 kfree_data(file_path_buf, file_path_len);
2613 }
2614
2615 return ret;
2616 }
2617
2618 #if DEVELOPMENT || DEBUG
2619
2620 static int
2621 sysctl_mpsc_test_pingpong SYSCTL_HANDLER_ARGS
2622 {
2623 #pragma unused(oidp, arg1, arg2)
2624 uint64_t value = 0;
2625 int error;
2626
2627 error = SYSCTL_IN(req, &value, sizeof(value));
2628 if (error) {
2629 return error;
2630 }
2631
2632 if (error == 0 && req->newptr) {
2633 error = mpsc_test_pingpong(value, &value);
2634 if (error == 0) {
2635 error = SYSCTL_OUT(req, &value, sizeof(value));
2636 }
2637 }
2638
2639 return error;
2640 }
2641 SYSCTL_PROC(_kern, OID_AUTO, mpsc_test_pingpong, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2642 0, 0, sysctl_mpsc_test_pingpong, "Q", "MPSC tests: pingpong");
2643
2644 #endif /* DEVELOPMENT || DEBUG */
2645
2646 /* Telemetry, microstackshots */
2647
2648 SYSCTL_NODE(_kern, OID_AUTO, microstackshot, CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2649 "microstackshot info");
2650
2651 #if defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES)
2652
2653 extern uint64_t mt_microstackshot_period;
2654 SYSCTL_QUAD(_kern_microstackshot, OID_AUTO, pmi_sample_period,
2655 CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_period,
2656 "PMI sampling rate");
2657 extern unsigned int mt_microstackshot_ctr;
2658 SYSCTL_UINT(_kern_microstackshot, OID_AUTO, pmi_sample_counter,
2659 CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_ctr, 0,
2660 "PMI counter");
2661
2662 #endif /* defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) */
2663
2664 /*Remote Time api*/
2665 SYSCTL_NODE(_machdep, OID_AUTO, remotetime, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "Remote time api");
2666
2667 #if DEVELOPMENT || DEBUG
2668 #if CONFIG_MACH_BRIDGE_SEND_TIME
2669 extern _Atomic uint32_t bt_init_flag;
2670 extern uint32_t mach_bridge_timer_enable(uint32_t, int);
2671
2672 SYSCTL_INT(_machdep_remotetime, OID_AUTO, bridge_timer_init_flag,
2673 CTLFLAG_RD | CTLFLAG_LOCKED, &bt_init_flag, 0, "");
2674
2675 static int sysctl_mach_bridge_timer_enable SYSCTL_HANDLER_ARGS
2676 {
2677 #pragma unused(oidp, arg1, arg2)
2678 uint32_t value = 0;
2679 int error = 0;
2680 /* User is querying buffer size */
2681 if (req->oldptr == USER_ADDR_NULL && req->newptr == USER_ADDR_NULL) {
2682 req->oldidx = sizeof(value);
2683 return 0;
2684 }
2685 if (os_atomic_load(&bt_init_flag, acquire)) {
2686 if (req->newptr) {
2687 int new_value = 0;
2688 error = SYSCTL_IN(req, &new_value, sizeof(new_value));
2689 if (error) {
2690 return error;
2691 }
2692 if (new_value == 0 || new_value == 1) {
2693 value = mach_bridge_timer_enable(new_value, 1);
2694 } else {
2695 return EPERM;
2696 }
2697 } else {
2698 value = mach_bridge_timer_enable(0, 0);
2699 }
2700 }
2701 error = SYSCTL_OUT(req, &value, sizeof(value));
2702 return error;
2703 }
2704
2705 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, bridge_timer_enable,
2706 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2707 0, 0, sysctl_mach_bridge_timer_enable, "I", "");
2708
2709 #endif /* CONFIG_MACH_BRIDGE_SEND_TIME */
2710
2711 static int sysctl_mach_bridge_remote_time SYSCTL_HANDLER_ARGS
2712 {
2713 #pragma unused(oidp, arg1, arg2)
2714 uint64_t ltime = 0, rtime = 0;
2715 if (req->oldptr == USER_ADDR_NULL) {
2716 req->oldidx = sizeof(rtime);
2717 return 0;
2718 }
2719 if (req->newptr) {
2720 int error = SYSCTL_IN(req, <ime, sizeof(ltime));
2721 if (error) {
2722 return error;
2723 }
2724 }
2725 rtime = mach_bridge_remote_time(ltime);
2726 return SYSCTL_OUT(req, &rtime, sizeof(rtime));
2727 }
2728 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, mach_bridge_remote_time,
2729 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2730 0, 0, sysctl_mach_bridge_remote_time, "Q", "");
2731
2732 #endif /* DEVELOPMENT || DEBUG */
2733
2734 #if CONFIG_MACH_BRIDGE_RECV_TIME
2735 extern struct bt_params bt_params_get_latest(void);
2736
2737 static int sysctl_mach_bridge_conversion_params SYSCTL_HANDLER_ARGS
2738 {
2739 #pragma unused(oidp, arg1, arg2)
2740 struct bt_params params = {};
2741 if (req->oldptr == USER_ADDR_NULL) {
2742 req->oldidx = sizeof(struct bt_params);
2743 return 0;
2744 }
2745 if (req->newptr) {
2746 return EPERM;
2747 }
2748 params = bt_params_get_latest();
2749 return SYSCTL_OUT(req, ¶ms, MIN(sizeof(params), req->oldlen));
2750 }
2751
2752 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, conversion_params,
2753 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2754 0, sysctl_mach_bridge_conversion_params, "S,bt_params", "");
2755
2756 #endif /* CONFIG_MACH_BRIDGE_RECV_TIME */
2757
2758 #if DEVELOPMENT || DEBUG
2759
2760 #include <pexpert/pexpert.h>
2761 extern int32_t sysctl_get_bound_cpuid(void);
2762 extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
2763 static int
2764 sysctl_kern_sched_thread_bind_cpu SYSCTL_HANDLER_ARGS
2765 {
2766 #pragma unused(oidp, arg1, arg2)
2767
2768 /*
2769 * DO NOT remove this bootarg guard or make this non-development.
2770 * This kind of binding should only be used for tests and
2771 * experiments in a custom configuration, never shipping code.
2772 */
2773
2774 if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2775 return ENOENT;
2776 }
2777
2778 int32_t cpuid = sysctl_get_bound_cpuid();
2779
2780 int32_t new_value;
2781 int changed;
2782 int error = sysctl_io_number(req, cpuid, sizeof(cpuid), &new_value, &changed);
2783 if (error) {
2784 return error;
2785 }
2786
2787 if (changed) {
2788 kern_return_t kr = sysctl_thread_bind_cpuid(new_value);
2789
2790 if (kr == KERN_NOT_SUPPORTED) {
2791 return ENOTSUP;
2792 }
2793
2794 if (kr == KERN_INVALID_VALUE) {
2795 return ERANGE;
2796 }
2797 }
2798
2799 return error;
2800 }
2801
2802 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cpu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2803 0, 0, sysctl_kern_sched_thread_bind_cpu, "I", "");
2804
2805 #if __AMP__
2806
2807 extern char sysctl_get_bound_cluster_type(void);
2808 static int
2809 sysctl_kern_sched_thread_bind_cluster_type SYSCTL_HANDLER_ARGS
2810 {
2811 #pragma unused(oidp, arg1, arg2)
2812 char buff[4];
2813
2814 if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2815 return ENOENT;
2816 }
2817
2818 int error = SYSCTL_IN(req, buff, 1);
2819 if (error) {
2820 return error;
2821 }
2822 char cluster_type = buff[0];
2823
2824 if (!req->newptr) {
2825 goto out;
2826 }
2827
2828 if (cluster_type != 'P' &&
2829 cluster_type != 'p' &&
2830 cluster_type != 'E' &&
2831 cluster_type != 'e') {
2832 return EINVAL;
2833 }
2834
2835 thread_soft_bind_cluster_type(current_thread(), cluster_type);
2836
2837 out:
2838 buff[0] = sysctl_get_bound_cluster_type();
2839
2840 return SYSCTL_OUT(req, buff, 1);
2841 }
2842
2843 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2844 0, 0, sysctl_kern_sched_thread_bind_cluster_type, "A", "");
2845
2846 extern char sysctl_get_task_cluster_type(void);
2847 extern void sysctl_task_set_cluster_type(char cluster_type);
2848 static int
2849 sysctl_kern_sched_task_set_cluster_type SYSCTL_HANDLER_ARGS
2850 {
2851 #pragma unused(oidp, arg1, arg2)
2852 char buff[4];
2853
2854 if (!PE_parse_boot_argn("enable_skstsct", NULL, 0)) {
2855 return ENOENT;
2856 }
2857
2858 int error = SYSCTL_IN(req, buff, 1);
2859 if (error) {
2860 return error;
2861 }
2862 char cluster_type = buff[0];
2863
2864 if (!req->newptr) {
2865 goto out;
2866 }
2867
2868 if (cluster_type != 'E' &&
2869 cluster_type != 'e' &&
2870 cluster_type != 'P' &&
2871 cluster_type != 'p') {
2872 return EINVAL;
2873 }
2874
2875 sysctl_task_set_cluster_type(cluster_type);
2876 out:
2877 cluster_type = sysctl_get_task_cluster_type();
2878 buff[0] = cluster_type;
2879
2880 return SYSCTL_OUT(req, buff, 1);
2881 }
2882
2883 SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2884 0, 0, sysctl_kern_sched_task_set_cluster_type, "A", "");
2885
2886 extern kern_return_t thread_soft_bind_cluster_id(thread_t thread, uint32_t cluster_id, thread_bind_option_t options);
2887 extern uint32_t thread_bound_cluster_id(thread_t);
2888 static int
2889 sysctl_kern_sched_thread_bind_cluster_id SYSCTL_HANDLER_ARGS
2890 {
2891 #pragma unused(oidp, arg1, arg2)
2892 if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2893 return ENOENT;
2894 }
2895
2896 thread_t self = current_thread();
2897 int32_t cluster_id = thread_bound_cluster_id(self);
2898 int32_t new_value;
2899 int changed;
2900 int error = sysctl_io_number(req, cluster_id, sizeof(cluster_id), &new_value, &changed);
2901 if (error) {
2902 return error;
2903 }
2904
2905 if (changed) {
2906 /*
2907 * Note, this binds the thread to the cluster without passing the
2908 * THREAD_BIND_ELIGIBLE_ONLY option, which means we won't check
2909 * whether the thread is otherwise eligible to run on that cluster--
2910 * we will send it there regardless.
2911 */
2912 kern_return_t kr = thread_soft_bind_cluster_id(self, new_value, 0);
2913 if (kr == KERN_INVALID_VALUE) {
2914 return ERANGE;
2915 }
2916
2917 if (kr != KERN_SUCCESS) {
2918 return EINVAL;
2919 }
2920 }
2921
2922 return error;
2923 }
2924
2925 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_id, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2926 0, 0, sysctl_kern_sched_thread_bind_cluster_id, "I", "");
2927
2928 #if CONFIG_SCHED_EDGE
2929
2930 extern int sched_edge_migrate_ipi_immediate;
2931 SYSCTL_INT(_kern, OID_AUTO, sched_edge_migrate_ipi_immediate, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_migrate_ipi_immediate, 0, "Edge Scheduler uses immediate IPIs for migration event based on execution latency");
2932
2933 #endif /* CONFIG_SCHED_EDGE */
2934
2935 #endif /* __AMP__ */
2936
2937 #if SCHED_HYGIENE_DEBUG
2938
2939 SYSCTL_QUAD(_kern, OID_AUTO, interrupt_masked_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED,
2940 &interrupt_masked_timeout,
2941 "Interrupt masked duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2942
2943 SYSCTL_INT(_kern, OID_AUTO, interrupt_masked_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2944 &interrupt_masked_debug_mode, 0,
2945 "Enable interrupt masked tracing or panic (0: off, 1: trace, 2: panic)");
2946
2947 SYSCTL_QUAD(_kern, OID_AUTO, sched_preemption_disable_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED,
2948 &sched_preemption_disable_threshold_mt,
2949 "Preemption disablement duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2950
2951 SYSCTL_INT(_kern, OID_AUTO, sched_preemption_disable_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2952 &sched_preemption_disable_debug_mode, 0,
2953 "Enable preemption disablement tracing or panic (0: off, 1: trace, 2: panic)");
2954
2955 static int
sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2956 sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2957 {
2958 extern unsigned int preemption_disable_get_max_durations(uint64_t *durations, size_t count);
2959 extern void preemption_disable_reset_max_durations(void);
2960
2961 uint64_t stats[MAX_CPUS]; // maximum per CPU
2962
2963 unsigned int ncpus = preemption_disable_get_max_durations(stats, MAX_CPUS);
2964 if (req->newlen > 0) {
2965 /* Reset when attempting to write to the sysctl. */
2966 preemption_disable_reset_max_durations();
2967 }
2968
2969 return sysctl_io_opaque(req, stats, ncpus * sizeof(uint64_t), NULL);
2970 }
2971
2972 SYSCTL_PROC(_kern, OID_AUTO, sched_preemption_disable_stats,
2973 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2974 0, 0, sysctl_sched_preemption_disable_stats, "I", "Preemption disablement statistics");
2975
2976 #endif /* SCHED_HYGIENE_DEBUG */
2977
2978 /* used for testing by exception_tests */
2979 extern uint32_t ipc_control_port_options;
2980 SYSCTL_INT(_kern, OID_AUTO, ipc_control_port_options,
2981 CTLFLAG_RD | CTLFLAG_LOCKED, &ipc_control_port_options, 0, "");
2982
2983 #endif /* DEVELOPMENT || DEBUG */
2984
2985 extern uint32_t task_exc_guard_default;
2986
2987 SYSCTL_INT(_kern, OID_AUTO, task_exc_guard_default,
2988 CTLFLAG_RD | CTLFLAG_LOCKED, &task_exc_guard_default, 0, "");
2989
2990
2991 static int
2992 sysctl_kern_tcsm_available SYSCTL_HANDLER_ARGS
2993 {
2994 #pragma unused(oidp, arg1, arg2)
2995 uint32_t value = machine_csv(CPUVN_CI) ? 1 : 0;
2996
2997 if (req->newptr) {
2998 return EINVAL;
2999 }
3000
3001 return SYSCTL_OUT(req, &value, sizeof(value));
3002 }
3003 SYSCTL_PROC(_kern, OID_AUTO, tcsm_available,
3004 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
3005 0, 0, sysctl_kern_tcsm_available, "I", "");
3006
3007
3008 static int
3009 sysctl_kern_tcsm_enable SYSCTL_HANDLER_ARGS
3010 {
3011 #pragma unused(oidp, arg1, arg2)
3012 uint32_t soflags = 0;
3013 #if CONFIG_SCHED_SMT
3014 uint32_t old_value = thread_get_no_smt() ? 1 : 0;
3015 #else /* CONFIG_SCHED_SMT */
3016 uint32_t old_value = 0;
3017 #endif /* CONFIG_SCHED_SMT */
3018
3019 int error = SYSCTL_IN(req, &soflags, sizeof(soflags));
3020 if (error) {
3021 return error;
3022 }
3023
3024 if (soflags && machine_csv(CPUVN_CI)) {
3025 #if CONFIG_SCHED_SMT
3026 thread_set_no_smt(true);
3027 #endif /* CONFIG_SCHED_SMT */
3028 machine_tecs(current_thread());
3029 }
3030
3031 return SYSCTL_OUT(req, &old_value, sizeof(old_value));
3032 }
3033 SYSCTL_PROC(_kern, OID_AUTO, tcsm_enable,
3034 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
3035 0, 0, sysctl_kern_tcsm_enable, "I", "");
3036
3037 static int
3038 sysctl_kern_debug_get_preoslog SYSCTL_HANDLER_ARGS
3039 {
3040 #pragma unused(oidp, arg1, arg2)
3041 static bool oneshot_executed = false;
3042 size_t preoslog_size = 0;
3043 const char *preoslog = NULL;
3044 int ret = 0;
3045
3046 // DumpPanic passes a non-zero write value when it needs oneshot behaviour
3047 if (req->newptr != USER_ADDR_NULL) {
3048 uint8_t oneshot = 0;
3049 int error = SYSCTL_IN(req, &oneshot, sizeof(oneshot));
3050 if (error) {
3051 return error;
3052 }
3053
3054 if (oneshot) {
3055 if (!os_atomic_cmpxchg(&oneshot_executed, false, true, acq_rel)) {
3056 return EPERM;
3057 }
3058 }
3059 }
3060
3061 preoslog = sysctl_debug_get_preoslog(&preoslog_size);
3062 if (preoslog != NULL && preoslog_size == 0) {
3063 sysctl_debug_free_preoslog();
3064 return 0;
3065 }
3066
3067 if (preoslog == NULL || preoslog_size == 0) {
3068 return 0;
3069 }
3070
3071 if (req->oldptr == USER_ADDR_NULL) {
3072 req->oldidx = preoslog_size;
3073 return 0;
3074 }
3075
3076 ret = SYSCTL_OUT(req, preoslog, preoslog_size);
3077 sysctl_debug_free_preoslog();
3078 return ret;
3079 }
3080
3081 SYSCTL_PROC(_kern, OID_AUTO, preoslog, CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
3082 0, 0, sysctl_kern_debug_get_preoslog, "-", "");
3083
3084 #if DEVELOPMENT || DEBUG
3085 extern void sysctl_task_set_no_smt(char no_smt);
3086 extern char sysctl_task_get_no_smt(void);
3087
3088 static int
3089 sysctl_kern_sched_task_set_no_smt SYSCTL_HANDLER_ARGS
3090 {
3091 #pragma unused(oidp, arg1, arg2)
3092 char buff[4];
3093
3094 int error = SYSCTL_IN(req, buff, 1);
3095 if (error) {
3096 return error;
3097 }
3098 char no_smt = buff[0];
3099
3100 if (!req->newptr) {
3101 goto out;
3102 }
3103
3104 sysctl_task_set_no_smt(no_smt);
3105 out:
3106 no_smt = sysctl_task_get_no_smt();
3107 buff[0] = no_smt;
3108
3109 return SYSCTL_OUT(req, buff, 1);
3110 }
3111
3112 SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_no_smt, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
3113 0, 0, sysctl_kern_sched_task_set_no_smt, "A", "");
3114
3115 #if CONFIG_SCHED_SMT
3116 static int
sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3117 sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3118 {
3119 int new_value, changed;
3120 int old_value = thread_get_no_smt() ? 1 : 0;
3121 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3122
3123 if (changed) {
3124 thread_set_no_smt(!!new_value);
3125 }
3126
3127 return error;
3128 }
3129 #else /* CONFIG_SCHED_SMT */
3130 static int
sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,__unused struct sysctl_req * req)3131 sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
3132 {
3133 return 0;
3134 }
3135 #endif /* CONFIG_SCHED_SMT*/
3136
3137 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_set_no_smt,
3138 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
3139 0, 0, sysctl_kern_sched_thread_set_no_smt, "I", "");
3140
3141 #if CONFIG_SCHED_RT_ALLOW
3142
3143 #if DEVELOPMENT || DEBUG
3144 #define RT_ALLOW_CTLFLAGS CTLFLAG_RW
3145 #else
3146 #define RT_ALLOW_CTLFLAGS CTLFLAG_RD
3147 #endif /* DEVELOPMENT || DEBUG */
3148
3149 static int
sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3150 sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid *oidp,
3151 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3152 {
3153 extern uint8_t rt_allow_limit_percent;
3154
3155 int new_value = 0;
3156 int old_value = rt_allow_limit_percent;
3157 int changed = 0;
3158
3159 int error = sysctl_io_number(req, old_value, sizeof(old_value),
3160 &new_value, &changed);
3161 if (error != 0) {
3162 return error;
3163 }
3164
3165 /* Only accept a percentage between 1 and 99 inclusive. */
3166 if (changed) {
3167 if (new_value >= 100 || new_value <= 0) {
3168 return EINVAL;
3169 }
3170
3171 rt_allow_limit_percent = (uint8_t)new_value;
3172 }
3173
3174 return 0;
3175 }
3176
3177 SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_percent,
3178 RT_ALLOW_CTLFLAGS | CTLTYPE_INT | CTLFLAG_LOCKED,
3179 0, 0, sysctl_kern_rt_allow_limit_percent, "I", "");
3180
3181 static int
sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3182 sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid *oidp,
3183 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3184 {
3185 extern uint16_t rt_allow_limit_interval_ms;
3186
3187 uint64_t new_value = 0;
3188 uint64_t old_value = rt_allow_limit_interval_ms;
3189 int changed = 0;
3190
3191 int error = sysctl_io_number(req, old_value, sizeof(old_value),
3192 &new_value, &changed);
3193 if (error != 0) {
3194 return error;
3195 }
3196
3197 /* Value is in ns. Must be at least 1ms. */
3198 if (changed) {
3199 if (new_value < 1 || new_value > UINT16_MAX) {
3200 return EINVAL;
3201 }
3202
3203 rt_allow_limit_interval_ms = (uint16_t)new_value;
3204 }
3205
3206 return 0;
3207 }
3208
3209 SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_interval_ms,
3210 RT_ALLOW_CTLFLAGS | CTLTYPE_QUAD | CTLFLAG_LOCKED,
3211 0, 0, sysctl_kern_rt_allow_limit_interval_ms, "Q", "");
3212
3213 #endif /* CONFIG_SCHED_RT_ALLOW */
3214
3215
3216 static int
3217 sysctl_kern_task_set_filter_msg_flag SYSCTL_HANDLER_ARGS
3218 {
3219 #pragma unused(oidp, arg1, arg2)
3220 int new_value, changed;
3221 int old_value = task_get_filter_msg_flag(current_task()) ? 1 : 0;
3222 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3223
3224 if (changed) {
3225 task_set_filter_msg_flag(current_task(), !!new_value);
3226 }
3227
3228 return error;
3229 }
3230
3231 SYSCTL_PROC(_kern, OID_AUTO, task_set_filter_msg_flag, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3232 0, 0, sysctl_kern_task_set_filter_msg_flag, "I", "");
3233
3234 #if CONFIG_PROC_RESOURCE_LIMITS
3235
3236 extern mach_port_name_t current_task_get_fatal_port_name(void);
3237
3238 static int
3239 sysctl_kern_task_get_fatal_port SYSCTL_HANDLER_ARGS
3240 {
3241 #pragma unused(oidp, arg1, arg2)
3242 int port = 0;
3243 int flag = 0;
3244
3245 if (req->oldptr == USER_ADDR_NULL) {
3246 req->oldidx = sizeof(mach_port_t);
3247 return 0;
3248 }
3249
3250 int error = SYSCTL_IN(req, &flag, sizeof(flag));
3251 if (error) {
3252 return error;
3253 }
3254
3255 if (flag == 1) {
3256 port = (int)current_task_get_fatal_port_name();
3257 }
3258 return SYSCTL_OUT(req, &port, sizeof(port));
3259 }
3260
3261 SYSCTL_PROC(_machdep, OID_AUTO, task_get_fatal_port, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3262 0, 0, sysctl_kern_task_get_fatal_port, "I", "");
3263
3264 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
3265
3266 extern unsigned int ipc_entry_table_count_max(void);
3267
3268 static int
3269 sysctl_mach_max_port_table_size SYSCTL_HANDLER_ARGS
3270 {
3271 #pragma unused(oidp, arg1, arg2)
3272 int old_value = ipc_entry_table_count_max();
3273 int error = sysctl_io_number(req, old_value, sizeof(int), NULL, NULL);
3274
3275 return error;
3276 }
3277
3278 SYSCTL_PROC(_machdep, OID_AUTO, max_port_table_size, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3279 0, 0, sysctl_mach_max_port_table_size, "I", "");
3280
3281 #endif /* DEVELOPMENT || DEBUG */
3282
3283 #if defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(CONFIG_KDP_COREDUMP_ENCRYPTION)
3284
3285 #define COREDUMP_ENCRYPTION_KEY_ENTITLEMENT "com.apple.private.coredump-encryption-key"
3286
3287 static int
3288 sysctl_coredump_encryption_key_update SYSCTL_HANDLER_ARGS
3289 {
3290 kern_return_t ret = KERN_SUCCESS;
3291 int error = 0;
3292 struct kdp_core_encryption_key_descriptor key_descriptor = {
3293 .kcekd_format = MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256,
3294 };
3295
3296 /* Need to be root and have entitlement */
3297 if (!kauth_cred_issuser(kauth_cred_get()) && !IOCurrentTaskHasEntitlement(COREDUMP_ENCRYPTION_KEY_ENTITLEMENT)) {
3298 return EPERM;
3299 }
3300
3301 // Sanity-check the given key length
3302 if (req->newlen > UINT16_MAX) {
3303 return EINVAL;
3304 }
3305
3306 // It is allowed for the caller to pass in a NULL buffer.
3307 // This indicates that they want us to forget about any public key we might have.
3308 if (req->newptr) {
3309 key_descriptor.kcekd_size = (uint16_t) req->newlen;
3310 key_descriptor.kcekd_key = kalloc_data(key_descriptor.kcekd_size, Z_WAITOK);
3311
3312 if (key_descriptor.kcekd_key == NULL) {
3313 return ENOMEM;
3314 }
3315
3316 error = SYSCTL_IN(req, key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3317 if (error) {
3318 goto out;
3319 }
3320 }
3321
3322 ret = IOProvideCoreFileAccess(kdp_core_handle_new_encryption_key, (void *)&key_descriptor);
3323 if (KERN_SUCCESS != ret) {
3324 printf("Failed to handle the new encryption key. Error 0x%x", ret);
3325 error = EFAULT;
3326 }
3327
3328 out:
3329 kfree_data(key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3330 return 0;
3331 }
3332
3333 SYSCTL_PROC(_kern, OID_AUTO, coredump_encryption_key, CTLTYPE_OPAQUE | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED,
3334 0, 0, &sysctl_coredump_encryption_key_update, "-", "Set a new encryption key for coredumps");
3335
3336 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING && CONFIG_KDP_COREDUMP_ENCRYPTION*/
3337