1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/ioctl.h>
79 #include <sys/file_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/socketvar.h>
82 #include <sys/uio_internal.h>
83 #include <sys/kernel.h>
84 #include <sys/guarded.h>
85 #include <sys/stat.h>
86 #include <sys/malloc.h>
87 #include <sys/sysproto.h>
88
89 #include <sys/mount_internal.h>
90 #include <sys/protosw.h>
91 #include <sys/ev.h>
92 #include <sys/user.h>
93 #include <sys/kdebug.h>
94 #include <sys/poll.h>
95 #include <sys/event.h>
96 #include <sys/eventvar.h>
97 #include <sys/proc.h>
98 #include <sys/kauth.h>
99
100 #include <machine/smp.h>
101 #include <mach/mach_types.h>
102 #include <kern/kern_types.h>
103 #include <kern/assert.h>
104 #include <kern/kalloc.h>
105 #include <kern/thread.h>
106 #include <kern/clock.h>
107 #include <kern/ledger.h>
108 #include <kern/monotonic.h>
109 #include <kern/task.h>
110 #include <kern/telemetry.h>
111 #include <kern/waitq.h>
112 #include <kern/sched_hygiene.h>
113 #include <kern/sched_prim.h>
114 #include <kern/mpsc_queue.h>
115 #include <kern/debug.h>
116
117 #include <sys/mbuf.h>
118 #include <sys/domain.h>
119 #include <sys/socket.h>
120 #include <sys/socketvar.h>
121 #include <sys/errno.h>
122 #include <sys/syscall.h>
123 #include <sys/pipe.h>
124
125 #include <security/audit/audit.h>
126
127 #include <net/if.h>
128 #include <net/route.h>
129
130 #include <netinet/in.h>
131 #include <netinet/in_systm.h>
132 #include <netinet/ip.h>
133 #include <netinet/in_pcb.h>
134 #include <netinet/ip_var.h>
135 #include <netinet/ip6.h>
136 #include <netinet/tcp.h>
137 #include <netinet/tcp_fsm.h>
138 #include <netinet/tcp_seq.h>
139 #include <netinet/tcp_timer.h>
140 #include <netinet/tcp_var.h>
141 #include <netinet/tcpip.h>
142 #include <netinet/tcp_debug.h>
143 /* for wait queue based select */
144 #include <kern/waitq.h>
145 #include <sys/vnode_internal.h>
146 /* for remote time api*/
147 #include <kern/remote_time.h>
148 #include <os/log.h>
149 #include <sys/log_data.h>
150
151 #if CONFIG_MACF
152 #include <security/mac_framework.h>
153 #endif
154
155 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
156 #include <mach_debug/mach_debug_types.h>
157 #endif
158
159 #if MONOTONIC
160 #include <machine/monotonic.h>
161 #endif /* MONOTONIC */
162
163 /* for entitlement check */
164 #include <IOKit/IOBSD.h>
165
166 /* XXX should be in a header file somewhere */
167 extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp);
168
169 int do_uiowrite(struct proc *p, struct fileproc *fp, uio_t uio, int flags, user_ssize_t *retval);
170 __private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp,
171 user_addr_t bufp, user_size_t nbyte,
172 off_t offset, int flags, user_ssize_t *retval);
173 __private_extern__ int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
174 user_addr_t bufp, user_size_t nbyte,
175 off_t offset, int flags, user_ssize_t *retval);
176 static int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode);
177
178 /* needed by guarded_writev, etc. */
179 int write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
180 off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval);
181 int writev_uio(struct proc *p, int fd, user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
182 guardid_t *puguard, user_ssize_t *retval);
183
184 #define f_flag fp_glob->fg_flag
185 #define f_type fp_glob->fg_ops->fo_type
186 #define f_cred fp_glob->fg_cred
187 #define f_ops fp_glob->fg_ops
188
189 /*
190 * Validate if the file can be used for random access (pread, pwrite, etc).
191 *
192 * Conditions:
193 * proc_fdlock is held
194 *
195 * Returns: 0 Success
196 * ESPIPE
197 * ENXIO
198 */
199 static int
valid_for_random_access(struct fileproc * fp)200 valid_for_random_access(struct fileproc *fp)
201 {
202 if (__improbable(fp->f_type != DTYPE_VNODE)) {
203 return ESPIPE;
204 }
205
206 vnode_t vp = (struct vnode *)fp_get_data(fp);
207 if (__improbable(vnode_isfifo(vp))) {
208 return ESPIPE;
209 }
210
211 if (__improbable(vp->v_flag & VISTTY)) {
212 return ENXIO;
213 }
214
215 return 0;
216 }
217
218 /*
219 * Returns: 0 Success
220 * EBADF
221 * ESPIPE
222 * ENXIO
223 * fp_lookup:EBADF
224 * valid_for_random_access:ESPIPE
225 * valid_for_random_access:ENXIO
226 */
227 static int
preparefileread(struct proc * p,struct fileproc ** fp_ret,int fd,int check_for_pread)228 preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread)
229 {
230 int error;
231 struct fileproc *fp;
232
233 AUDIT_ARG(fd, fd);
234
235 proc_fdlock_spin(p);
236
237 error = fp_lookup(p, fd, &fp, 1);
238
239 if (error) {
240 proc_fdunlock(p);
241 return error;
242 }
243 if ((fp->f_flag & FREAD) == 0) {
244 error = EBADF;
245 goto out;
246 }
247 if (check_for_pread) {
248 if ((error = valid_for_random_access(fp))) {
249 goto out;
250 }
251 }
252
253 *fp_ret = fp;
254
255 proc_fdunlock(p);
256 return 0;
257
258 out:
259 fp_drop(p, fd, fp, 1);
260 proc_fdunlock(p);
261 return error;
262 }
263
264 static int
fp_readv(vfs_context_t ctx,struct fileproc * fp,uio_t uio,int flags,user_ssize_t * retval)265 fp_readv(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
266 user_ssize_t *retval)
267 {
268 int error;
269 user_ssize_t count;
270
271 if ((error = uio_calculateresid(uio))) {
272 *retval = 0;
273 return error;
274 }
275
276 count = uio_resid(uio);
277 error = fo_read(fp, uio, flags, ctx);
278
279 switch (error) {
280 case ERESTART:
281 case EINTR:
282 case EWOULDBLOCK:
283 if (uio_resid(uio) != count) {
284 error = 0;
285 }
286 break;
287
288 default:
289 break;
290 }
291
292 *retval = count - uio_resid(uio);
293 return error;
294 }
295
296 /*
297 * Returns: 0 Success
298 * EINVAL
299 * fo_read:???
300 */
301 __private_extern__ int
dofileread(vfs_context_t ctx,struct fileproc * fp,user_addr_t bufp,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)302 dofileread(vfs_context_t ctx, struct fileproc *fp,
303 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
304 user_ssize_t *retval)
305 {
306 UIO_STACKBUF(uio_buf, 1);
307 uio_t uio;
308 int spacetype;
309
310 if (nbyte > INT_MAX) {
311 *retval = 0;
312 return EINVAL;
313 }
314
315 spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
316 uio = uio_createwithbuffer(1, offset, spacetype, UIO_READ, &uio_buf[0],
317 sizeof(uio_buf));
318
319 if (uio_addiov(uio, bufp, nbyte) != 0) {
320 *retval = 0;
321 return EINVAL;
322 }
323
324 return fp_readv(ctx, fp, uio, flags, retval);
325 }
326
327 static int
readv_internal(struct proc * p,int fd,uio_t uio,int flags,user_ssize_t * retval)328 readv_internal(struct proc *p, int fd, uio_t uio, int flags,
329 user_ssize_t *retval)
330 {
331 struct fileproc *fp = NULL;
332 struct vfs_context context;
333 int error;
334
335 if ((error = preparefileread(p, &fp, fd, flags & FOF_OFFSET))) {
336 *retval = 0;
337 return error;
338 }
339
340 context = *(vfs_context_current());
341 context.vc_ucred = fp->fp_glob->fg_cred;
342
343 error = fp_readv(&context, fp, uio, flags, retval);
344
345 fp_drop(p, fd, fp, 0);
346 return error;
347 }
348
349 static int
read_internal(struct proc * p,int fd,user_addr_t buf,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)350 read_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
351 off_t offset, int flags, user_ssize_t *retval)
352 {
353 UIO_STACKBUF(uio_buf, 1);
354 uio_t uio;
355 int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
356
357 if (nbyte > INT_MAX) {
358 *retval = 0;
359 return EINVAL;
360 }
361
362 uio = uio_createwithbuffer(1, offset, spacetype, UIO_READ,
363 &uio_buf[0], sizeof(uio_buf));
364
365 if (uio_addiov(uio, buf, nbyte) != 0) {
366 *retval = 0;
367 return EINVAL;
368 }
369
370 return readv_internal(p, fd, uio, flags, retval);
371 }
372
373 int
read_nocancel(struct proc * p,struct read_nocancel_args * uap,user_ssize_t * retval)374 read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retval)
375 {
376 return read_internal(p, uap->fd, uap->cbuf, uap->nbyte, (off_t)-1, 0,
377 retval);
378 }
379
380 /*
381 * Read system call.
382 *
383 * Returns: 0 Success
384 * preparefileread:EBADF
385 * preparefileread:ESPIPE
386 * preparefileread:ENXIO
387 * preparefileread:EBADF
388 * dofileread:???
389 */
390 int
read(struct proc * p,struct read_args * uap,user_ssize_t * retval)391 read(struct proc *p, struct read_args *uap, user_ssize_t *retval)
392 {
393 __pthread_testcancel(1);
394 return read_nocancel(p, (struct read_nocancel_args *)uap, retval);
395 }
396
397 int
pread_nocancel(struct proc * p,struct pread_nocancel_args * uap,user_ssize_t * retval)398 pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *retval)
399 {
400 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE),
401 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
402
403 return read_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset,
404 FOF_OFFSET, retval);
405 }
406
407 /*
408 * Pread system call
409 *
410 * Returns: 0 Success
411 * preparefileread:EBADF
412 * preparefileread:ESPIPE
413 * preparefileread:ENXIO
414 * preparefileread:EBADF
415 * dofileread:???
416 */
417 int
pread(struct proc * p,struct pread_args * uap,user_ssize_t * retval)418 pread(struct proc *p, struct pread_args *uap, user_ssize_t *retval)
419 {
420 __pthread_testcancel(1);
421 return pread_nocancel(p, (struct pread_nocancel_args *)uap, retval);
422 }
423
424 /*
425 * Vector read.
426 *
427 * Returns: 0 Success
428 * EINVAL
429 * ENOMEM
430 * preparefileread:EBADF
431 * preparefileread:ESPIPE
432 * preparefileread:ENXIO
433 * preparefileread:EBADF
434 * copyin:EFAULT
435 * rd_uio:???
436 */
437 static int
readv_uio(struct proc * p,int fd,user_addr_t user_iovp,int iovcnt,off_t offset,int flags,user_ssize_t * retval)438 readv_uio(struct proc *p, int fd,
439 user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
440 user_ssize_t *retval)
441 {
442 uio_t uio = NULL;
443 int error;
444 struct user_iovec *iovp;
445
446 if (iovcnt <= 0 || iovcnt > UIO_MAXIOV) {
447 error = EINVAL;
448 goto out;
449 }
450
451 uio = uio_create(iovcnt, offset,
452 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
453 UIO_READ);
454
455 iovp = uio_iovsaddr(uio);
456 if (iovp == NULL) {
457 error = ENOMEM;
458 goto out;
459 }
460
461 error = copyin_user_iovec_array(user_iovp,
462 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
463 iovcnt, iovp);
464
465 if (error) {
466 goto out;
467 }
468
469 error = readv_internal(p, fd, uio, flags, retval);
470
471 out:
472 if (uio != NULL) {
473 uio_free(uio);
474 }
475
476 return error;
477 }
478
479 int
readv_nocancel(struct proc * p,struct readv_nocancel_args * uap,user_ssize_t * retval)480 readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval)
481 {
482 return readv_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, retval);
483 }
484
485 /*
486 * Scatter read system call.
487 */
488 int
readv(struct proc * p,struct readv_args * uap,user_ssize_t * retval)489 readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval)
490 {
491 __pthread_testcancel(1);
492 return readv_nocancel(p, (struct readv_nocancel_args *)uap, retval);
493 }
494
495 int
sys_preadv_nocancel(struct proc * p,struct preadv_nocancel_args * uap,user_ssize_t * retval)496 sys_preadv_nocancel(struct proc *p, struct preadv_nocancel_args *uap, user_ssize_t *retval)
497 {
498 return readv_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset,
499 FOF_OFFSET, retval);
500 }
501
502 /*
503 * Preadv system call
504 */
505 int
sys_preadv(struct proc * p,struct preadv_args * uap,user_ssize_t * retval)506 sys_preadv(struct proc *p, struct preadv_args *uap, user_ssize_t *retval)
507 {
508 __pthread_testcancel(1);
509 return sys_preadv_nocancel(p, (struct preadv_nocancel_args *)uap, retval);
510 }
511
512 /*
513 * Returns: 0 Success
514 * EBADF
515 * ESPIPE
516 * ENXIO
517 * fp_lookup:EBADF
518 * fp_guard_exception:???
519 * valid_for_random_access:ESPIPE
520 * valid_for_random_access:ENXIO
521 */
522 static int
preparefilewrite(struct proc * p,struct fileproc ** fp_ret,int fd,int check_for_pwrite,guardid_t * puguard)523 preparefilewrite(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pwrite,
524 guardid_t *puguard)
525 {
526 int error;
527 struct fileproc *fp;
528
529 AUDIT_ARG(fd, fd);
530
531 proc_fdlock_spin(p);
532
533 if (puguard) {
534 error = fp_lookup_guarded(p, fd, *puguard, &fp, 1);
535 if (error) {
536 proc_fdunlock(p);
537 return error;
538 }
539
540 if ((fp->f_flag & FWRITE) == 0) {
541 error = EBADF;
542 goto out;
543 }
544 } else {
545 error = fp_lookup(p, fd, &fp, 1);
546 if (error) {
547 proc_fdunlock(p);
548 return error;
549 }
550
551 /* Allow EBADF first. */
552 if ((fp->f_flag & FWRITE) == 0) {
553 error = EBADF;
554 goto out;
555 }
556
557 if (fp_isguarded(fp, GUARD_WRITE)) {
558 error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
559 goto out;
560 }
561 }
562
563 if (check_for_pwrite) {
564 if ((error = valid_for_random_access(fp))) {
565 goto out;
566 }
567 }
568
569 *fp_ret = fp;
570
571 proc_fdunlock(p);
572 return 0;
573
574 out:
575 fp_drop(p, fd, fp, 1);
576 proc_fdunlock(p);
577 return error;
578 }
579
580 static int
fp_writev(vfs_context_t ctx,struct fileproc * fp,uio_t uio,int flags,user_ssize_t * retval)581 fp_writev(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
582 user_ssize_t *retval)
583 {
584 int error;
585 user_ssize_t count;
586
587 if ((error = uio_calculateresid(uio))) {
588 *retval = 0;
589 return error;
590 }
591
592 count = uio_resid(uio);
593 error = fo_write(fp, uio, flags, ctx);
594
595 switch (error) {
596 case ERESTART:
597 case EINTR:
598 case EWOULDBLOCK:
599 if (uio_resid(uio) != count) {
600 error = 0;
601 }
602 break;
603
604 case EPIPE:
605 if (fp->f_type != DTYPE_SOCKET &&
606 (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) == 0) {
607 /* XXX Raise the signal on the thread? */
608 psignal(vfs_context_proc(ctx), SIGPIPE);
609 }
610 break;
611
612 default:
613 break;
614 }
615
616 if ((*retval = count - uio_resid(uio))) {
617 os_atomic_or(&fp->fp_glob->fg_flag, FWASWRITTEN, relaxed);
618 }
619
620 return error;
621 }
622
623 /*
624 * Returns: 0 Success
625 * EINVAL
626 * <fo_write>:EPIPE
627 * <fo_write>:??? [indirect through struct fileops]
628 */
629 __private_extern__ int
dofilewrite(vfs_context_t ctx,struct fileproc * fp,user_addr_t bufp,user_size_t nbyte,off_t offset,int flags,user_ssize_t * retval)630 dofilewrite(vfs_context_t ctx, struct fileproc *fp,
631 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
632 user_ssize_t *retval)
633 {
634 UIO_STACKBUF(uio_buf, 1);
635 uio_t uio;
636 int spacetype;
637
638 if (nbyte > INT_MAX) {
639 *retval = 0;
640 return EINVAL;
641 }
642
643 spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
644 uio = uio_createwithbuffer(1, offset, spacetype, UIO_WRITE, &uio_buf[0],
645 sizeof(uio_buf));
646
647 if (uio_addiov(uio, bufp, nbyte) != 0) {
648 *retval = 0;
649 return EINVAL;
650 }
651
652 return fp_writev(ctx, fp, uio, flags, retval);
653 }
654
655 static int
writev_internal(struct proc * p,int fd,uio_t uio,int flags,guardid_t * puguard,user_ssize_t * retval)656 writev_internal(struct proc *p, int fd, uio_t uio, int flags,
657 guardid_t *puguard, user_ssize_t *retval)
658 {
659 struct fileproc *fp = NULL;
660 struct vfs_context context;
661 int error;
662
663 if ((error = preparefilewrite(p, &fp, fd, flags & FOF_OFFSET, puguard))) {
664 *retval = 0;
665 return error;
666 }
667
668 context = *(vfs_context_current());
669 context.vc_ucred = fp->fp_glob->fg_cred;
670
671 error = fp_writev(&context, fp, uio, flags, retval);
672
673 fp_drop(p, fd, fp, 0);
674 return error;
675 }
676
677 int
write_internal(struct proc * p,int fd,user_addr_t buf,user_size_t nbyte,off_t offset,int flags,guardid_t * puguard,user_ssize_t * retval)678 write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
679 off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval)
680 {
681 UIO_STACKBUF(uio_buf, 1);
682 uio_t uio;
683 int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
684
685 if (nbyte > INT_MAX) {
686 *retval = 0;
687 return EINVAL;
688 }
689
690 uio = uio_createwithbuffer(1, offset, spacetype, UIO_WRITE,
691 &uio_buf[0], sizeof(uio_buf));
692
693 if (uio_addiov(uio, buf, nbyte) != 0) {
694 *retval = 0;
695 return EINVAL;
696 }
697
698 return writev_internal(p, fd, uio, flags, puguard, retval);
699 }
700
701 int
write_nocancel(struct proc * p,struct write_nocancel_args * uap,user_ssize_t * retval)702 write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *retval)
703 {
704 return write_internal(p, uap->fd, uap->cbuf, uap->nbyte, (off_t)-1, 0,
705 NULL, retval);
706 }
707
708 /*
709 * Write system call
710 *
711 * Returns: 0 Success
712 * EBADF
713 * fp_lookup:EBADF
714 * dofilewrite:???
715 */
716 int
write(struct proc * p,struct write_args * uap,user_ssize_t * retval)717 write(struct proc *p, struct write_args *uap, user_ssize_t *retval)
718 {
719 __pthread_testcancel(1);
720 return write_nocancel(p, (struct write_nocancel_args *)uap, retval);
721 }
722
723 int
pwrite_nocancel(struct proc * p,struct pwrite_nocancel_args * uap,user_ssize_t * retval)724 pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *retval)
725 {
726 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE),
727 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
728
729 /* XXX: Should be < 0 instead? (See man page + pwritev) */
730 if (uap->offset == (off_t)-1) {
731 return EINVAL;
732 }
733
734 return write_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset,
735 FOF_OFFSET, NULL, retval);
736 }
737
738 /*
739 * pwrite system call
740 *
741 * Returns: 0 Success
742 * EBADF
743 * ESPIPE
744 * ENXIO
745 * EINVAL
746 * fp_lookup:EBADF
747 * dofilewrite:???
748 */
749 int
pwrite(struct proc * p,struct pwrite_args * uap,user_ssize_t * retval)750 pwrite(struct proc *p, struct pwrite_args *uap, user_ssize_t *retval)
751 {
752 __pthread_testcancel(1);
753 return pwrite_nocancel(p, (struct pwrite_nocancel_args *)uap, retval);
754 }
755
756 int
writev_uio(struct proc * p,int fd,user_addr_t user_iovp,int iovcnt,off_t offset,int flags,guardid_t * puguard,user_ssize_t * retval)757 writev_uio(struct proc *p, int fd,
758 user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
759 guardid_t *puguard, user_ssize_t *retval)
760 {
761 uio_t uio = NULL;
762 int error;
763 struct user_iovec *iovp;
764
765 if (iovcnt <= 0 || iovcnt > UIO_MAXIOV || offset < 0) {
766 error = EINVAL;
767 goto out;
768 }
769
770 uio = uio_create(iovcnt, offset,
771 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
772 UIO_WRITE);
773
774 iovp = uio_iovsaddr(uio);
775 if (iovp == NULL) {
776 error = ENOMEM;
777 goto out;
778 }
779
780 error = copyin_user_iovec_array(user_iovp,
781 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
782 iovcnt, iovp);
783
784 if (error) {
785 goto out;
786 }
787
788 error = writev_internal(p, fd, uio, flags, puguard, retval);
789
790 out:
791 if (uio != NULL) {
792 uio_free(uio);
793 }
794
795 return error;
796 }
797
798 int
writev_nocancel(struct proc * p,struct writev_nocancel_args * uap,user_ssize_t * retval)799 writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval)
800 {
801 return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, NULL, retval);
802 }
803
804 /*
805 * Gather write system call
806 */
807 int
writev(struct proc * p,struct writev_args * uap,user_ssize_t * retval)808 writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval)
809 {
810 __pthread_testcancel(1);
811 return writev_nocancel(p, (struct writev_nocancel_args *)uap, retval);
812 }
813
814 int
sys_pwritev_nocancel(struct proc * p,struct pwritev_nocancel_args * uap,user_ssize_t * retval)815 sys_pwritev_nocancel(struct proc *p, struct pwritev_nocancel_args *uap, user_ssize_t *retval)
816 {
817 return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset,
818 FOF_OFFSET, NULL, retval);
819 }
820
821 /*
822 * Pwritev system call
823 */
824 int
sys_pwritev(struct proc * p,struct pwritev_args * uap,user_ssize_t * retval)825 sys_pwritev(struct proc *p, struct pwritev_args *uap, user_ssize_t *retval)
826 {
827 __pthread_testcancel(1);
828 return sys_pwritev_nocancel(p, (struct pwritev_nocancel_args *)uap, retval);
829 }
830
831 /*
832 * Ioctl system call
833 *
834 * Returns: 0 Success
835 * EBADF
836 * ENOTTY
837 * ENOMEM
838 * ESRCH
839 * copyin:EFAULT
840 * copyoutEFAULT
841 * fp_lookup:EBADF Bad file descriptor
842 * fo_ioctl:???
843 */
844 int
ioctl(struct proc * p,struct ioctl_args * uap,__unused int32_t * retval)845 ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval)
846 {
847 struct fileproc *fp = NULL;
848 int error = 0;
849 u_int size = 0;
850 caddr_t datap = NULL, memp = NULL;
851 boolean_t is64bit = FALSE;
852 int tmp = 0;
853 #define STK_PARAMS 128
854 char stkbuf[STK_PARAMS] = {};
855 int fd = uap->fd;
856 u_long com = uap->com;
857 struct vfs_context context = *vfs_context_current();
858
859 AUDIT_ARG(fd, uap->fd);
860 AUDIT_ARG(addr, uap->data);
861
862 is64bit = proc_is64bit(p);
863 #if CONFIG_AUDIT
864 if (is64bit) {
865 AUDIT_ARG(value64, com);
866 } else {
867 AUDIT_ARG(cmd, CAST_DOWN_EXPLICIT(int, com));
868 }
869 #endif /* CONFIG_AUDIT */
870
871 /*
872 * Interpret high order word to find amount of data to be
873 * copied to/from the user's address space.
874 */
875 size = IOCPARM_LEN(com);
876 if (size > IOCPARM_MAX) {
877 return ENOTTY;
878 }
879 if (size > sizeof(stkbuf)) {
880 memp = (caddr_t)kalloc_data(size, Z_WAITOK);
881 if (memp == 0) {
882 return ENOMEM;
883 }
884 datap = memp;
885 } else {
886 datap = &stkbuf[0];
887 }
888 if (com & IOC_IN) {
889 if (size) {
890 error = copyin(uap->data, datap, size);
891 if (error) {
892 goto out_nofp;
893 }
894 } else {
895 /* XXX - IOC_IN and no size? we should proably return an error here!! */
896 if (is64bit) {
897 *(user_addr_t *)datap = uap->data;
898 } else {
899 *(uint32_t *)datap = (uint32_t)uap->data;
900 }
901 }
902 } else if ((com & IOC_OUT) && size) {
903 /*
904 * Zero the buffer so the user always
905 * gets back something deterministic.
906 */
907 bzero(datap, size);
908 } else if (com & IOC_VOID) {
909 /* XXX - this is odd since IOC_VOID means no parameters */
910 if (is64bit) {
911 *(user_addr_t *)datap = uap->data;
912 } else {
913 *(uint32_t *)datap = (uint32_t)uap->data;
914 }
915 }
916
917 proc_fdlock(p);
918 error = fp_lookup(p, fd, &fp, 1);
919 if (error) {
920 proc_fdunlock(p);
921 goto out_nofp;
922 }
923
924 AUDIT_ARG(file, p, fp);
925
926 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
927 error = EBADF;
928 goto out;
929 }
930
931 context.vc_ucred = fp->fp_glob->fg_cred;
932
933 #if CONFIG_MACF
934 error = mac_file_check_ioctl(context.vc_ucred, fp->fp_glob, com);
935 if (error) {
936 goto out;
937 }
938 #endif
939
940 switch (com) {
941 case FIONCLEX:
942 fp->fp_flags &= ~FP_CLOEXEC;
943 break;
944
945 case FIOCLEX:
946 fp->fp_flags |= FP_CLOEXEC;
947 break;
948
949 case FIONBIO:
950 // FIXME (rdar://54898652)
951 //
952 // this code is broken if fnctl(F_SETFL), ioctl() are
953 // called concurrently for the same fileglob.
954 if ((tmp = *(int *)datap)) {
955 os_atomic_or(&fp->f_flag, FNONBLOCK, relaxed);
956 } else {
957 os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed);
958 }
959 error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
960 break;
961
962 case FIOASYNC:
963 // FIXME (rdar://54898652)
964 //
965 // this code is broken if fnctl(F_SETFL), ioctl() are
966 // called concurrently for the same fileglob.
967 if ((tmp = *(int *)datap)) {
968 os_atomic_or(&fp->f_flag, FASYNC, relaxed);
969 } else {
970 os_atomic_andnot(&fp->f_flag, FASYNC, relaxed);
971 }
972 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
973 break;
974
975 case FIOSETOWN:
976 tmp = *(int *)datap;
977 if (fp->f_type == DTYPE_SOCKET) {
978 ((struct socket *)fp_get_data(fp))->so_pgid = tmp;
979 break;
980 }
981 if (fp->f_type == DTYPE_PIPE) {
982 error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
983 break;
984 }
985 if (tmp <= 0) {
986 tmp = -tmp;
987 } else {
988 struct proc *p1 = proc_find(tmp);
989 if (p1 == 0) {
990 error = ESRCH;
991 break;
992 }
993 tmp = p1->p_pgrpid;
994 proc_rele(p1);
995 }
996 error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
997 break;
998
999 case FIOGETOWN:
1000 if (fp->f_type == DTYPE_SOCKET) {
1001 *(int *)datap = ((struct socket *)fp_get_data(fp))->so_pgid;
1002 break;
1003 }
1004 error = fo_ioctl(fp, TIOCGPGRP, datap, &context);
1005 *(int *)datap = -*(int *)datap;
1006 break;
1007
1008 default:
1009 error = fo_ioctl(fp, com, datap, &context);
1010 /*
1011 * Copy any data to user, size was
1012 * already set and checked above.
1013 */
1014 if (error == 0 && (com & IOC_OUT) && size) {
1015 error = copyout(datap, uap->data, (u_int)size);
1016 }
1017 break;
1018 }
1019 out:
1020 fp_drop(p, fd, fp, 1);
1021 proc_fdunlock(p);
1022
1023 out_nofp:
1024 if (memp) {
1025 kfree_data(memp, size);
1026 }
1027 return error;
1028 }
1029
1030 int selwait;
1031 #define SEL_FIRSTPASS 1
1032 #define SEL_SECONDPASS 2
1033 static int selprocess(struct proc *p, int error, int sel_pass);
1034 static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata,
1035 int nfd, int32_t *retval, int sel_pass, struct select_set *selset);
1036 static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count);
1037 static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup);
1038 static int seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim);
1039 static int select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval);
1040
1041 /*
1042 * This is used for the special device nodes that do not implement
1043 * a proper kevent filter (see filt_specattach).
1044 *
1045 * In order to enable kevents on those, the spec_filtops will pretend
1046 * to call select, and try to sniff the selrecord(), if it observes one,
1047 * the knote is attached, which pairs with selwakeup() or selthreadclear().
1048 *
1049 * The last issue remaining, is that we need to serialize filt_specdetach()
1050 * with this, but it really can't know the "selinfo" or any locking domain.
1051 * To make up for this, We protect knote list operations with a global lock,
1052 * which give us a safe shared locking domain.
1053 *
1054 * Note: It is a little distasteful, but we really have very few of those.
1055 * The big problem here is that sharing a lock domain without
1056 * any kind of shared knowledge is a little complicated.
1057 *
1058 * 1. filters can really implement their own kqueue integration
1059 * to side step this,
1060 *
1061 * 2. There's an opportunity to pick a private lock in selspec_attach()
1062 * because both the selinfo and the knote are locked at that time.
1063 * The cleanup story is however a little complicated.
1064 */
1065 static LCK_GRP_DECLARE(selspec_grp, "spec_filtops");
1066 static LCK_SPIN_DECLARE(selspec_lock, &selspec_grp);
1067
1068 /*
1069 * The "primitive" lock is held.
1070 * The knote lock is held.
1071 */
1072 void
selspec_attach(struct knote * kn,struct selinfo * si)1073 selspec_attach(struct knote *kn, struct selinfo *si)
1074 {
1075 struct selinfo *cur = knote_kn_hook_get_raw(kn);
1076
1077 if (cur == NULL) {
1078 si->si_flags |= SI_SELSPEC;
1079 lck_spin_lock(&selspec_lock);
1080 knote_kn_hook_set_raw(kn, (void *) si);
1081 KNOTE_ATTACH(&si->si_note, kn);
1082 lck_spin_unlock(&selspec_lock);
1083 } else {
1084 /*
1085 * selspec_attach() can be called from e.g. filt_spectouch()
1086 * which might be called before any event was dequeued.
1087 *
1088 * It is hence not impossible for the knote already be hooked.
1089 *
1090 * Note that selwakeup_internal() could possibly
1091 * already have cleared this pointer. This is a race
1092 * that filt_specprocess will debounce.
1093 */
1094 assert(si->si_flags & SI_SELSPEC);
1095 assert(cur == si);
1096 }
1097 }
1098
1099 /*
1100 * The "primitive" lock is _not_ held.
1101 *
1102 * knote "lock" is held
1103 */
1104 void
selspec_detach(struct knote * kn)1105 selspec_detach(struct knote *kn)
1106 {
1107 lck_spin_lock(&selspec_lock);
1108
1109 if (!KNOTE_IS_AUTODETACHED(kn)) {
1110 struct selinfo *sip = knote_kn_hook_get_raw(kn);
1111 if (sip) {
1112 KNOTE_DETACH(&sip->si_note, kn);
1113 }
1114 }
1115
1116 knote_kn_hook_set_raw(kn, NULL);
1117
1118 lck_spin_unlock(&selspec_lock);
1119 }
1120
1121 /*
1122 * Select system call.
1123 *
1124 * Returns: 0 Success
1125 * EINVAL Invalid argument
1126 * EAGAIN Nonconformant error if allocation fails
1127 */
1128 int
select(struct proc * p,struct select_args * uap,int32_t * retval)1129 select(struct proc *p, struct select_args *uap, int32_t *retval)
1130 {
1131 __pthread_testcancel(1);
1132 return select_nocancel(p, (struct select_nocancel_args *)uap, retval);
1133 }
1134
1135 int
select_nocancel(struct proc * p,struct select_nocancel_args * uap,int32_t * retval)1136 select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retval)
1137 {
1138 uint64_t timeout = 0;
1139
1140 if (uap->tv) {
1141 int err;
1142 struct timeval atv;
1143 if (IS_64BIT_PROCESS(p)) {
1144 struct user64_timeval atv64;
1145 err = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
1146 /* Loses resolution - assume timeout < 68 years */
1147 atv.tv_sec = (__darwin_time_t)atv64.tv_sec;
1148 atv.tv_usec = atv64.tv_usec;
1149 } else {
1150 struct user32_timeval atv32;
1151 err = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
1152 atv.tv_sec = atv32.tv_sec;
1153 atv.tv_usec = atv32.tv_usec;
1154 }
1155 if (err) {
1156 return err;
1157 }
1158
1159 if (itimerfix(&atv)) {
1160 err = EINVAL;
1161 return err;
1162 }
1163
1164 clock_absolutetime_interval_to_deadline(tvtoabstime(&atv), &timeout);
1165 }
1166
1167 return select_internal(p, uap, timeout, retval);
1168 }
1169
1170 int
pselect(struct proc * p,struct pselect_args * uap,int32_t * retval)1171 pselect(struct proc *p, struct pselect_args *uap, int32_t *retval)
1172 {
1173 __pthread_testcancel(1);
1174 return pselect_nocancel(p, (struct pselect_nocancel_args *)uap, retval);
1175 }
1176
1177 int
pselect_nocancel(struct proc * p,struct pselect_nocancel_args * uap,int32_t * retval)1178 pselect_nocancel(struct proc *p, struct pselect_nocancel_args *uap, int32_t *retval)
1179 {
1180 int err;
1181 struct uthread *ut;
1182 uint64_t timeout = 0;
1183
1184 if (uap->ts) {
1185 struct timespec ts;
1186
1187 if (IS_64BIT_PROCESS(p)) {
1188 struct user64_timespec ts64;
1189 err = copyin(uap->ts, (caddr_t)&ts64, sizeof(ts64));
1190 ts.tv_sec = (__darwin_time_t)ts64.tv_sec;
1191 ts.tv_nsec = (long)ts64.tv_nsec;
1192 } else {
1193 struct user32_timespec ts32;
1194 err = copyin(uap->ts, (caddr_t)&ts32, sizeof(ts32));
1195 ts.tv_sec = ts32.tv_sec;
1196 ts.tv_nsec = ts32.tv_nsec;
1197 }
1198 if (err) {
1199 return err;
1200 }
1201
1202 if (!timespec_is_valid(&ts)) {
1203 return EINVAL;
1204 }
1205 clock_absolutetime_interval_to_deadline(tstoabstime(&ts), &timeout);
1206 }
1207
1208 ut = current_uthread();
1209
1210 if (uap->mask != USER_ADDR_NULL) {
1211 /* save current mask, then copyin and set new mask */
1212 sigset_t newset;
1213 err = copyin(uap->mask, &newset, sizeof(sigset_t));
1214 if (err) {
1215 return err;
1216 }
1217 ut->uu_oldmask = ut->uu_sigmask;
1218 ut->uu_flag |= UT_SAS_OLDMASK;
1219 ut->uu_sigmask = (newset & ~sigcantmask);
1220 }
1221
1222 err = select_internal(p, (struct select_nocancel_args *)uap, timeout, retval);
1223
1224 if (err != EINTR && ut->uu_flag & UT_SAS_OLDMASK) {
1225 /*
1226 * Restore old mask (direct return case). NOTE: EINTR can also be returned
1227 * if the thread is cancelled. In that case, we don't reset the signal
1228 * mask to its original value (which usually happens in the signal
1229 * delivery path). This behavior is permitted by POSIX.
1230 */
1231 ut->uu_sigmask = ut->uu_oldmask;
1232 ut->uu_oldmask = 0;
1233 ut->uu_flag &= ~UT_SAS_OLDMASK;
1234 }
1235
1236 return err;
1237 }
1238
1239 void
select_cleanup_uthread(struct _select * sel)1240 select_cleanup_uthread(struct _select *sel)
1241 {
1242 kfree_data(sel->ibits, 2 * sel->nbytes);
1243 sel->ibits = sel->obits = NULL;
1244 sel->nbytes = 0;
1245 }
1246
1247 static int
select_grow_uthread_cache(struct _select * sel,uint32_t nbytes)1248 select_grow_uthread_cache(struct _select *sel, uint32_t nbytes)
1249 {
1250 uint32_t *buf;
1251
1252 buf = kalloc_data(2 * nbytes, Z_WAITOK | Z_ZERO);
1253 if (buf) {
1254 select_cleanup_uthread(sel);
1255 sel->ibits = buf;
1256 sel->obits = buf + nbytes / sizeof(uint32_t);
1257 sel->nbytes = nbytes;
1258 return true;
1259 }
1260 return false;
1261 }
1262
1263 static void
select_bzero_uthread_cache(struct _select * sel)1264 select_bzero_uthread_cache(struct _select *sel)
1265 {
1266 bzero(sel->ibits, sel->nbytes * 2);
1267 }
1268
1269 /*
1270 * Generic implementation of {,p}select. Care: we type-pun uap across the two
1271 * syscalls, which differ slightly. The first 4 arguments (nfds and the fd sets)
1272 * are identical. The 5th (timeout) argument points to different types, so we
1273 * unpack in the syscall-specific code, but the generic code still does a null
1274 * check on this argument to determine if a timeout was specified.
1275 */
1276 static int
select_internal(struct proc * p,struct select_nocancel_args * uap,uint64_t timeout,int32_t * retval)1277 select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval)
1278 {
1279 struct uthread *uth = current_uthread();
1280 struct _select *sel = &uth->uu_select;
1281 struct _select_data *seldata = &uth->uu_save.uus_select_data;
1282 int error = 0;
1283 u_int ni, nw;
1284
1285 *retval = 0;
1286
1287 seldata->abstime = timeout;
1288 seldata->args = uap;
1289 seldata->retval = retval;
1290 seldata->count = 0;
1291
1292 if (uap->nd < 0) {
1293 return EINVAL;
1294 }
1295
1296 if (uap->nd > p->p_fd.fd_nfiles) {
1297 uap->nd = p->p_fd.fd_nfiles; /* forgiving; slightly wrong */
1298 }
1299 nw = howmany(uap->nd, NFDBITS);
1300 ni = nw * sizeof(fd_mask);
1301
1302 /*
1303 * if the previously allocated space for the bits is smaller than
1304 * what is requested or no space has yet been allocated for this
1305 * thread, allocate enough space now.
1306 *
1307 * Note: If this process fails, select() will return EAGAIN; this
1308 * is the same thing pool() returns in a no-memory situation, but
1309 * it is not a POSIX compliant error code for select().
1310 */
1311 if (sel->nbytes >= (3 * ni)) {
1312 select_bzero_uthread_cache(sel);
1313 } else if (!select_grow_uthread_cache(sel, 3 * ni)) {
1314 return EAGAIN;
1315 }
1316
1317 /*
1318 * get the bits from the user address space
1319 */
1320 #define getbits(name, x) \
1321 (uap->name ? copyin(uap->name, &sel->ibits[(x) * nw], ni) : 0)
1322
1323 if ((error = getbits(in, 0))) {
1324 return error;
1325 }
1326 if ((error = getbits(ou, 1))) {
1327 return error;
1328 }
1329 if ((error = getbits(ex, 2))) {
1330 return error;
1331 }
1332 #undef getbits
1333
1334 if ((error = selcount(p, sel->ibits, uap->nd, &seldata->count))) {
1335 return error;
1336 }
1337
1338 if (uth->uu_selset == NULL) {
1339 uth->uu_selset = select_set_alloc();
1340 }
1341 return selprocess(p, 0, SEL_FIRSTPASS);
1342 }
1343
1344 static int
selcontinue(int error)1345 selcontinue(int error)
1346 {
1347 return selprocess(current_proc(), error, SEL_SECONDPASS);
1348 }
1349
1350
1351 /*
1352 * selprocess
1353 *
1354 * Parameters: error The error code from our caller
1355 * sel_pass The pass we are on
1356 */
1357 int
selprocess(struct proc * p,int error,int sel_pass)1358 selprocess(struct proc *p, int error, int sel_pass)
1359 {
1360 struct uthread *uth = current_uthread();
1361 struct _select *sel = &uth->uu_select;
1362 struct _select_data *seldata = &uth->uu_save.uus_select_data;
1363 struct select_nocancel_args *uap = seldata->args;
1364 int *retval = seldata->retval;
1365
1366 int unwind = 1;
1367 int prepost = 0;
1368 int somewakeup = 0;
1369 int doretry = 0;
1370 wait_result_t wait_result;
1371
1372 if ((error != 0) && (sel_pass == SEL_FIRSTPASS)) {
1373 unwind = 0;
1374 }
1375 if (seldata->count == 0) {
1376 unwind = 0;
1377 }
1378 retry:
1379 if (error != 0) {
1380 goto done;
1381 }
1382
1383 OSBitOrAtomic(P_SELECT, &p->p_flag);
1384
1385 /* skip scans if the select is just for timeouts */
1386 if (seldata->count) {
1387 error = selscan(p, sel, seldata, uap->nd, retval, sel_pass,
1388 uth->uu_selset);
1389 if (error || *retval) {
1390 goto done;
1391 }
1392 if (prepost || somewakeup) {
1393 /*
1394 * if the select of log, then we can wakeup and
1395 * discover some one else already read the data;
1396 * go to select again if time permits
1397 */
1398 prepost = 0;
1399 somewakeup = 0;
1400 doretry = 1;
1401 }
1402 }
1403
1404 if (uap->tv) {
1405 uint64_t now;
1406
1407 clock_get_uptime(&now);
1408 if (now >= seldata->abstime) {
1409 goto done;
1410 }
1411 }
1412
1413 if (doretry) {
1414 /* cleanup obits and try again */
1415 doretry = 0;
1416 sel_pass = SEL_FIRSTPASS;
1417 goto retry;
1418 }
1419
1420 /*
1421 * To effect a poll, the timeout argument should be
1422 * non-nil, pointing to a zero-valued timeval structure.
1423 */
1424 if (uap->tv && seldata->abstime == 0) {
1425 goto done;
1426 }
1427
1428 /* No spurious wakeups due to colls,no need to check for them */
1429 if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) {
1430 sel_pass = SEL_FIRSTPASS;
1431 goto retry;
1432 }
1433
1434 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1435
1436 /* if the select is just for timeout skip check */
1437 if (seldata->count && (sel_pass == SEL_SECONDPASS)) {
1438 panic("selprocess: 2nd pass assertwaiting");
1439 }
1440
1441 wait_result = waitq_assert_wait64_leeway(uth->uu_selset,
1442 NO_EVENT64, THREAD_ABORTSAFE,
1443 TIMEOUT_URGENCY_USER_NORMAL,
1444 seldata->abstime,
1445 TIMEOUT_NO_LEEWAY);
1446 if (wait_result != THREAD_AWAKENED) {
1447 /* there are no preposted events */
1448 error = tsleep1(NULL, PSOCK | PCATCH,
1449 "select", 0, selcontinue);
1450 } else {
1451 prepost = 1;
1452 error = 0;
1453 }
1454
1455 if (error == 0) {
1456 sel_pass = SEL_SECONDPASS;
1457 if (!prepost) {
1458 somewakeup = 1;
1459 }
1460 goto retry;
1461 }
1462 done:
1463 if (unwind) {
1464 seldrop(p, sel->ibits, uap->nd, seldata->count);
1465 select_set_reset(uth->uu_selset);
1466 }
1467 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1468 /* select is not restarted after signals... */
1469 if (error == ERESTART) {
1470 error = EINTR;
1471 }
1472 if (error == EWOULDBLOCK) {
1473 error = 0;
1474 }
1475
1476 if (error == 0) {
1477 uint32_t nw = howmany(uap->nd, NFDBITS);
1478 uint32_t ni = nw * sizeof(fd_mask);
1479
1480 #define putbits(name, x) \
1481 (uap->name ? copyout(&sel->obits[(x) * nw], uap->name, ni) : 0)
1482 int e0 = putbits(in, 0);
1483 int e1 = putbits(ou, 1);
1484 int e2 = putbits(ex, 2);
1485
1486 error = e0 ?: e1 ?: e2;
1487 #undef putbits
1488 }
1489
1490 if (error != EINTR && sel_pass == SEL_SECONDPASS && uth->uu_flag & UT_SAS_OLDMASK) {
1491 /* restore signal mask - continuation case */
1492 uth->uu_sigmask = uth->uu_oldmask;
1493 uth->uu_oldmask = 0;
1494 uth->uu_flag &= ~UT_SAS_OLDMASK;
1495 }
1496
1497 return error;
1498 }
1499
1500
1501 /**
1502 * remove the fileproc's underlying waitq from the supplied waitq set;
1503 * clear FP_INSELECT when appropriate
1504 *
1505 * Parameters:
1506 * fp File proc that is potentially currently in select
1507 * selset Waitq set to which the fileproc may belong
1508 * (usually this is the thread's private waitq set)
1509 * Conditions:
1510 * proc_fdlock is held
1511 */
1512 static void
selunlinkfp(struct fileproc * fp,struct select_set * selset)1513 selunlinkfp(struct fileproc *fp, struct select_set *selset)
1514 {
1515 if (fp->fp_flags & FP_INSELECT) {
1516 if (fp->fp_guard_attrs) {
1517 if (fp->fp_guard->fpg_wset == selset) {
1518 fp->fp_guard->fpg_wset = NULL;
1519 fp->fp_flags &= ~FP_INSELECT;
1520 }
1521 } else {
1522 if (fp->fp_wset == selset) {
1523 fp->fp_wset = NULL;
1524 fp->fp_flags &= ~FP_INSELECT;
1525 }
1526 }
1527 }
1528 }
1529
1530 /**
1531 * connect a fileproc to the given selset, potentially bridging to a waitq
1532 * pointed to indirectly by wq_data
1533 *
1534 * Parameters:
1535 * fp File proc potentially currently in select
1536 * selset Waitq set to which the fileproc should now belong
1537 * (usually this is the thread's private waitq set)
1538 *
1539 * Conditions:
1540 * proc_fdlock is held
1541 */
1542 static void
sellinkfp(struct fileproc * fp,struct select_set * selset,waitq_link_t * linkp)1543 sellinkfp(struct fileproc *fp, struct select_set *selset, waitq_link_t *linkp)
1544 {
1545 if ((fp->fp_flags & FP_INSELECT) == 0) {
1546 if (fp->fp_guard_attrs) {
1547 fp->fp_guard->fpg_wset = selset;
1548 } else {
1549 fp->fp_wset = selset;
1550 }
1551 fp->fp_flags |= FP_INSELECT;
1552 } else {
1553 fp->fp_flags |= FP_SELCONFLICT;
1554 if (linkp->wqlh == NULL) {
1555 *linkp = waitq_link_alloc(WQT_SELECT_SET);
1556 }
1557 select_set_link(&select_conflict_queue, selset, linkp);
1558 }
1559 }
1560
1561
1562 /*
1563 * selscan
1564 *
1565 * Parameters: p Process performing the select
1566 * sel The per-thread select context structure
1567 * nfd The number of file descriptors to scan
1568 * retval The per thread system call return area
1569 * sel_pass Which pass this is; allowed values are
1570 * SEL_FIRSTPASS and SEL_SECONDPASS
1571 * selset The per thread wait queue set
1572 *
1573 * Returns: 0 Success
1574 * EIO Invalid p->p_fd field XXX Obsolete?
1575 * EBADF One of the files in the bit vector is
1576 * invalid.
1577 */
1578 static int
selscan(struct proc * p,struct _select * sel,struct _select_data * seldata,int nfd,int32_t * retval,int sel_pass,struct select_set * selset)1579 selscan(struct proc *p, struct _select *sel, struct _select_data * seldata,
1580 int nfd, int32_t *retval, int sel_pass, struct select_set *selset)
1581 {
1582 int msk, i, j, fd;
1583 u_int32_t bits;
1584 struct fileproc *fp;
1585 int n = 0; /* count of bits */
1586 int nc = 0; /* bit vector offset (nc'th bit) */
1587 static int flag[3] = { FREAD, FWRITE, 0 };
1588 u_int32_t *iptr, *optr;
1589 u_int nw;
1590 u_int32_t *ibits, *obits;
1591 int count;
1592 struct vfs_context context = {
1593 .vc_thread = current_thread(),
1594 };
1595 waitq_link_t link = WQL_NULL;
1596 void *s_data;
1597
1598 ibits = sel->ibits;
1599 obits = sel->obits;
1600
1601 nw = howmany(nfd, NFDBITS);
1602
1603 count = seldata->count;
1604
1605 nc = 0;
1606 if (!count) {
1607 *retval = 0;
1608 return 0;
1609 }
1610
1611 if (sel_pass == SEL_FIRSTPASS) {
1612 /*
1613 * Make sure the waitq-set is all clean:
1614 *
1615 * select loops until it finds at least one event, however it
1616 * doesn't mean that the event that woke up select is still
1617 * fired by the time the second pass runs, and then
1618 * select_internal will loop back to a first pass.
1619 */
1620 select_set_reset(selset);
1621 s_data = &link;
1622 } else {
1623 s_data = NULL;
1624 }
1625
1626 proc_fdlock(p);
1627 for (msk = 0; msk < 3; msk++) {
1628 iptr = (u_int32_t *)&ibits[msk * nw];
1629 optr = (u_int32_t *)&obits[msk * nw];
1630
1631 for (i = 0; i < nfd; i += NFDBITS) {
1632 bits = iptr[i / NFDBITS];
1633
1634 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1635 bits &= ~(1U << j);
1636
1637 fp = fp_get_noref_locked(p, fd);
1638 if (fp == NULL) {
1639 /*
1640 * If we abort because of a bad
1641 * fd, let the caller unwind...
1642 */
1643 proc_fdunlock(p);
1644 return EBADF;
1645 }
1646 if (sel_pass == SEL_SECONDPASS) {
1647 selunlinkfp(fp, selset);
1648 } else if (link.wqlh == NULL) {
1649 link = waitq_link_alloc(WQT_SELECT_SET);
1650 }
1651
1652 context.vc_ucred = fp->f_cred;
1653
1654 /* The select; set the bit, if true */
1655 if (fo_select(fp, flag[msk], s_data, &context)) {
1656 optr[fd / NFDBITS] |= (1U << (fd % NFDBITS));
1657 n++;
1658 }
1659 if (sel_pass == SEL_FIRSTPASS) {
1660 /*
1661 * Hook up the thread's waitq set either to
1662 * the fileproc structure, or to the global
1663 * conflict queue: but only on the first
1664 * select pass.
1665 */
1666 sellinkfp(fp, selset, &link);
1667 }
1668 nc++;
1669 }
1670 }
1671 }
1672 proc_fdunlock(p);
1673
1674 if (link.wqlh) {
1675 waitq_link_free(WQT_SELECT_SET, link);
1676 }
1677
1678 *retval = n;
1679 return 0;
1680 }
1681
1682 static int poll_callback(struct kevent_qos_s *, kevent_ctx_t);
1683
1684 int
poll(struct proc * p,struct poll_args * uap,int32_t * retval)1685 poll(struct proc *p, struct poll_args *uap, int32_t *retval)
1686 {
1687 __pthread_testcancel(1);
1688 return poll_nocancel(p, (struct poll_nocancel_args *)uap, retval);
1689 }
1690
1691
1692 int
poll_nocancel(struct proc * p,struct poll_nocancel_args * uap,int32_t * retval)1693 poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval)
1694 {
1695 struct pollfd *fds = NULL;
1696 struct kqueue *kq = NULL;
1697 int error = 0;
1698 u_int nfds = uap->nfds;
1699 u_int rfds = 0;
1700 rlim_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE);
1701 size_t ni = nfds * sizeof(struct pollfd);
1702
1703 /*
1704 * This is kinda bogus. We have fd limits, but that is not
1705 * really related to the size of the pollfd array. Make sure
1706 * we let the process use at least FD_SETSIZE entries and at
1707 * least enough for the current limits. We want to be reasonably
1708 * safe, but not overly restrictive.
1709 */
1710 if (nfds > OPEN_MAX ||
1711 (nfds > nofile && (proc_suser(p) || nfds > FD_SETSIZE))) {
1712 return EINVAL;
1713 }
1714
1715 kq = kqueue_alloc(p);
1716 if (kq == NULL) {
1717 return EAGAIN;
1718 }
1719
1720 if (nfds) {
1721 fds = (struct pollfd *)kalloc_data(ni, Z_WAITOK);
1722 if (NULL == fds) {
1723 error = EAGAIN;
1724 goto out;
1725 }
1726
1727 error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd));
1728 if (error) {
1729 goto out;
1730 }
1731 }
1732
1733 /* JMM - all this P_SELECT stuff is bogus */
1734 OSBitOrAtomic(P_SELECT, &p->p_flag);
1735 for (u_int i = 0; i < nfds; i++) {
1736 short events = fds[i].events;
1737 __assert_only int rc;
1738
1739 /* per spec, ignore fd values below zero */
1740 if (fds[i].fd < 0) {
1741 fds[i].revents = 0;
1742 continue;
1743 }
1744
1745 /* convert the poll event into a kqueue kevent */
1746 struct kevent_qos_s kev = {
1747 .ident = fds[i].fd,
1748 .flags = EV_ADD | EV_ONESHOT | EV_POLL,
1749 .udata = i, /* Index into pollfd array */
1750 };
1751
1752 /* Handle input events */
1753 if (events & (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP)) {
1754 kev.filter = EVFILT_READ;
1755 if (events & (POLLPRI | POLLRDBAND)) {
1756 kev.flags |= EV_OOBAND;
1757 }
1758 rc = kevent_register(kq, &kev, NULL);
1759 assert((rc & FILTER_REGISTER_WAIT) == 0);
1760 }
1761
1762 /* Handle output events */
1763 if ((kev.flags & EV_ERROR) == 0 &&
1764 (events & (POLLOUT | POLLWRNORM | POLLWRBAND))) {
1765 kev.filter = EVFILT_WRITE;
1766 rc = kevent_register(kq, &kev, NULL);
1767 assert((rc & FILTER_REGISTER_WAIT) == 0);
1768 }
1769
1770 /* Handle BSD extension vnode events */
1771 if ((kev.flags & EV_ERROR) == 0 &&
1772 (events & (POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE))) {
1773 kev.filter = EVFILT_VNODE;
1774 kev.fflags = 0;
1775 if (events & POLLEXTEND) {
1776 kev.fflags |= NOTE_EXTEND;
1777 }
1778 if (events & POLLATTRIB) {
1779 kev.fflags |= NOTE_ATTRIB;
1780 }
1781 if (events & POLLNLINK) {
1782 kev.fflags |= NOTE_LINK;
1783 }
1784 if (events & POLLWRITE) {
1785 kev.fflags |= NOTE_WRITE;
1786 }
1787 rc = kevent_register(kq, &kev, NULL);
1788 assert((rc & FILTER_REGISTER_WAIT) == 0);
1789 }
1790
1791 if (kev.flags & EV_ERROR) {
1792 fds[i].revents = POLLNVAL;
1793 rfds++;
1794 } else {
1795 fds[i].revents = 0;
1796 }
1797 }
1798
1799 /*
1800 * Did we have any trouble registering?
1801 * If user space passed 0 FDs, then respect any timeout value passed.
1802 * This is an extremely inefficient sleep. If user space passed one or
1803 * more FDs, and we had trouble registering _all_ of them, then bail
1804 * out. If a subset of the provided FDs failed to register, then we
1805 * will still call the kqueue_scan function.
1806 */
1807 if (nfds && (rfds == nfds)) {
1808 goto done;
1809 }
1810
1811 /* scan for, and possibly wait for, the kevents to trigger */
1812 kevent_ctx_t kectx = kevent_get_context(current_thread());
1813 *kectx = (struct kevent_ctx_s){
1814 .kec_process_noutputs = rfds,
1815 .kec_process_flags = KEVENT_FLAG_POLL,
1816 .kec_deadline = 0, /* wait forever */
1817 .kec_poll_fds = fds,
1818 };
1819
1820 /*
1821 * If any events have trouble registering, an event has fired and we
1822 * shouldn't wait for events in kqueue_scan.
1823 */
1824 if (rfds) {
1825 kectx->kec_process_flags |= KEVENT_FLAG_IMMEDIATE;
1826 } else if (uap->timeout != -1) {
1827 clock_interval_to_deadline(uap->timeout, NSEC_PER_MSEC,
1828 &kectx->kec_deadline);
1829 }
1830
1831 error = kqueue_scan(kq, kectx->kec_process_flags, kectx, poll_callback);
1832 rfds = kectx->kec_process_noutputs;
1833
1834 done:
1835 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1836 /* poll is not restarted after signals... */
1837 if (error == ERESTART) {
1838 error = EINTR;
1839 }
1840 if (error == 0) {
1841 error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd));
1842 *retval = rfds;
1843 }
1844
1845 out:
1846 kfree_data(fds, ni);
1847
1848 kqueue_dealloc(kq);
1849 return error;
1850 }
1851
1852 static int
poll_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)1853 poll_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
1854 {
1855 assert(kectx->kec_process_flags & KEVENT_FLAG_POLL);
1856 struct pollfd *fds = &kectx->kec_poll_fds[kevp->udata];
1857
1858 short prev_revents = fds->revents;
1859 short mask = 0;
1860
1861 /* convert the results back into revents */
1862 if (kevp->flags & EV_EOF) {
1863 fds->revents |= POLLHUP;
1864 }
1865 if (kevp->flags & EV_ERROR) {
1866 fds->revents |= POLLERR;
1867 }
1868
1869 switch (kevp->filter) {
1870 case EVFILT_READ:
1871 if (fds->revents & POLLHUP) {
1872 mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND);
1873 } else {
1874 mask = (POLLIN | POLLRDNORM);
1875 if (kevp->flags & EV_OOBAND) {
1876 mask |= (POLLPRI | POLLRDBAND);
1877 }
1878 }
1879 fds->revents |= (fds->events & mask);
1880 break;
1881
1882 case EVFILT_WRITE:
1883 if (!(fds->revents & POLLHUP)) {
1884 fds->revents |= (fds->events & (POLLOUT | POLLWRNORM | POLLWRBAND));
1885 }
1886 break;
1887
1888 case EVFILT_VNODE:
1889 if (kevp->fflags & NOTE_EXTEND) {
1890 fds->revents |= (fds->events & POLLEXTEND);
1891 }
1892 if (kevp->fflags & NOTE_ATTRIB) {
1893 fds->revents |= (fds->events & POLLATTRIB);
1894 }
1895 if (kevp->fflags & NOTE_LINK) {
1896 fds->revents |= (fds->events & POLLNLINK);
1897 }
1898 if (kevp->fflags & NOTE_WRITE) {
1899 fds->revents |= (fds->events & POLLWRITE);
1900 }
1901 break;
1902 }
1903
1904 if (fds->revents != 0 && prev_revents == 0) {
1905 kectx->kec_process_noutputs++;
1906 }
1907
1908 return 0;
1909 }
1910
1911 int
seltrue(__unused dev_t dev,__unused int flag,__unused struct proc * p)1912 seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p)
1913 {
1914 return 1;
1915 }
1916
1917 /*
1918 * selcount
1919 *
1920 * Count the number of bits set in the input bit vector, and establish an
1921 * outstanding fp->fp_iocount for each of the descriptors which will be in
1922 * use in the select operation.
1923 *
1924 * Parameters: p The process doing the select
1925 * ibits The input bit vector
1926 * nfd The number of fd's in the vector
1927 * countp Pointer to where to store the bit count
1928 *
1929 * Returns: 0 Success
1930 * EIO Bad per process open file table
1931 * EBADF One of the bits in the input bit vector
1932 * references an invalid fd
1933 *
1934 * Implicit: *countp (modified) Count of fd's
1935 *
1936 * Notes: This function is the first pass under the proc_fdlock() that
1937 * permits us to recognize invalid descriptors in the bit vector;
1938 * the may, however, not remain valid through the drop and
1939 * later reacquisition of the proc_fdlock().
1940 */
1941 static int
selcount(struct proc * p,u_int32_t * ibits,int nfd,int * countp)1942 selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp)
1943 {
1944 int msk, i, j, fd;
1945 u_int32_t bits;
1946 struct fileproc *fp;
1947 int n = 0;
1948 u_int32_t *iptr;
1949 u_int nw;
1950 int error = 0;
1951 int need_wakeup = 0;
1952
1953 nw = howmany(nfd, NFDBITS);
1954
1955 proc_fdlock(p);
1956 for (msk = 0; msk < 3; msk++) {
1957 iptr = (u_int32_t *)&ibits[msk * nw];
1958 for (i = 0; i < nfd; i += NFDBITS) {
1959 bits = iptr[i / NFDBITS];
1960 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1961 bits &= ~(1U << j);
1962
1963 fp = fp_get_noref_locked(p, fd);
1964 if (fp == NULL) {
1965 *countp = 0;
1966 error = EBADF;
1967 goto bad;
1968 }
1969 os_ref_retain_locked(&fp->fp_iocount);
1970 n++;
1971 }
1972 }
1973 }
1974 proc_fdunlock(p);
1975
1976 *countp = n;
1977 return 0;
1978
1979 bad:
1980 if (n == 0) {
1981 goto out;
1982 }
1983 /* Ignore error return; it's already EBADF */
1984 (void)seldrop_locked(p, ibits, nfd, n, &need_wakeup);
1985
1986 out:
1987 proc_fdunlock(p);
1988 if (need_wakeup) {
1989 wakeup(&p->p_fd.fd_fpdrainwait);
1990 }
1991 return error;
1992 }
1993
1994
1995 /*
1996 * seldrop_locked
1997 *
1998 * Drop outstanding wait queue references set up during selscan(); drop the
1999 * outstanding per fileproc fp_iocount picked up during the selcount().
2000 *
2001 * Parameters: p Process performing the select
2002 * ibits Input bit bector of fd's
2003 * nfd Number of fd's
2004 * lim Limit to number of vector entries to
2005 * consider, or -1 for "all"
2006 * inselect True if
2007 * need_wakeup Pointer to flag to set to do a wakeup
2008 * if f_iocont on any descriptor goes to 0
2009 *
2010 * Returns: 0 Success
2011 * EBADF One or more fds in the bit vector
2012 * were invalid, but the rest
2013 * were successfully dropped
2014 *
2015 * Notes: An fd make become bad while the proc_fdlock() is not held,
2016 * if a multithreaded application closes the fd out from under
2017 * the in progress select. In this case, we still have to
2018 * clean up after the set up on the remaining fds.
2019 */
2020 static int
seldrop_locked(struct proc * p,u_int32_t * ibits,int nfd,int lim,int * need_wakeup)2021 seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup)
2022 {
2023 int msk, i, j, nc, fd;
2024 u_int32_t bits;
2025 struct fileproc *fp;
2026 u_int32_t *iptr;
2027 u_int nw;
2028 int error = 0;
2029 uthread_t uth = current_uthread();
2030 struct _select_data *seldata;
2031
2032 *need_wakeup = 0;
2033
2034 nw = howmany(nfd, NFDBITS);
2035 seldata = &uth->uu_save.uus_select_data;
2036
2037 nc = 0;
2038 for (msk = 0; msk < 3; msk++) {
2039 iptr = (u_int32_t *)&ibits[msk * nw];
2040 for (i = 0; i < nfd; i += NFDBITS) {
2041 bits = iptr[i / NFDBITS];
2042 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
2043 bits &= ~(1U << j);
2044 /*
2045 * If we've already dropped as many as were
2046 * counted/scanned, then we are done.
2047 */
2048 if (nc >= lim) {
2049 goto done;
2050 }
2051
2052 /*
2053 * We took an I/O reference in selcount,
2054 * so the fp can't possibly be NULL.
2055 */
2056 fp = fp_get_noref_locked_with_iocount(p, fd);
2057 selunlinkfp(fp, uth->uu_selset);
2058
2059 nc++;
2060
2061 const os_ref_count_t refc = os_ref_release_locked(&fp->fp_iocount);
2062 if (0 == refc) {
2063 panic("fp_iocount overdecrement!");
2064 }
2065
2066 if (1 == refc) {
2067 /*
2068 * The last iocount is responsible for clearing
2069 * selconfict flag - even if we didn't set it -
2070 * and is also responsible for waking up anyone
2071 * waiting on iocounts to drain.
2072 */
2073 if (fp->fp_flags & FP_SELCONFLICT) {
2074 fp->fp_flags &= ~FP_SELCONFLICT;
2075 }
2076 if (p->p_fd.fd_fpdrainwait) {
2077 p->p_fd.fd_fpdrainwait = 0;
2078 *need_wakeup = 1;
2079 }
2080 }
2081 }
2082 }
2083 }
2084 done:
2085 return error;
2086 }
2087
2088
2089 static int
seldrop(struct proc * p,u_int32_t * ibits,int nfd,int lim)2090 seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim)
2091 {
2092 int error;
2093 int need_wakeup = 0;
2094
2095 proc_fdlock(p);
2096 error = seldrop_locked(p, ibits, nfd, lim, &need_wakeup);
2097 proc_fdunlock(p);
2098 if (need_wakeup) {
2099 wakeup(&p->p_fd.fd_fpdrainwait);
2100 }
2101 return error;
2102 }
2103
2104 /*
2105 * Record a select request.
2106 */
2107 void
selrecord(__unused struct proc * selector,struct selinfo * sip,void * s_data)2108 selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data)
2109 {
2110 struct select_set *selset = current_uthread()->uu_selset;
2111
2112 /* do not record if this is second pass of select */
2113 if (!s_data) {
2114 return;
2115 }
2116
2117 if (selset == SELSPEC_RECORD_MARKER) {
2118 /*
2119 * The kevent subsystem is trying to sniff
2120 * the selinfo::si_note to attach to.
2121 */
2122 ((selspec_record_hook_t)s_data)(sip);
2123 } else {
2124 waitq_link_t *linkp = s_data;
2125
2126 if (!waitq_is_valid(&sip->si_waitq)) {
2127 waitq_init(&sip->si_waitq, WQT_SELECT, SYNC_POLICY_FIFO);
2128 }
2129
2130 /* note: this checks for pre-existing linkage */
2131 select_set_link(&sip->si_waitq, selset, linkp);
2132 }
2133 }
2134
2135 static void
selwakeup_internal(struct selinfo * sip,long hint,wait_result_t wr)2136 selwakeup_internal(struct selinfo *sip, long hint, wait_result_t wr)
2137 {
2138 if (sip->si_flags & SI_SELSPEC) {
2139 /*
2140 * The "primitive" lock is held.
2141 * The knote lock is not held.
2142 *
2143 * All knotes will transition their kn_hook to NULL and we will
2144 * reeinitialize the primitive's klist
2145 */
2146 lck_spin_lock(&selspec_lock);
2147 knote(&sip->si_note, hint, /*autodetach=*/ true);
2148 lck_spin_unlock(&selspec_lock);
2149 sip->si_flags &= ~SI_SELSPEC;
2150 }
2151
2152 /*
2153 * After selrecord() has been called, selinfo owners must call
2154 * at least one of selwakeup() or selthreadclear().
2155 *
2156 * Use this opportunity to deinit the waitq
2157 * so that all linkages are garbage collected
2158 * in a combined wakeup-all + unlink + deinit call.
2159 */
2160 select_waitq_wakeup_and_deinit(&sip->si_waitq, NO_EVENT64, wr);
2161 }
2162
2163
2164 void
selwakeup(struct selinfo * sip)2165 selwakeup(struct selinfo *sip)
2166 {
2167 selwakeup_internal(sip, 0, THREAD_AWAKENED);
2168 }
2169
2170 void
selthreadclear(struct selinfo * sip)2171 selthreadclear(struct selinfo *sip)
2172 {
2173 selwakeup_internal(sip, NOTE_REVOKE, THREAD_RESTART);
2174 }
2175
2176
2177 /*
2178 * gethostuuid
2179 *
2180 * Description: Get the host UUID from IOKit and return it to user space.
2181 *
2182 * Parameters: uuid_buf Pointer to buffer to receive UUID
2183 * timeout Timespec for timout
2184 *
2185 * Returns: 0 Success
2186 * EWOULDBLOCK Timeout is too short
2187 * copyout:EFAULT Bad user buffer
2188 * mac_system_check_info:EPERM Client not allowed to perform this operation
2189 *
2190 * Notes: A timeout seems redundant, since if it's tolerable to not
2191 * have a system UUID in hand, then why ask for one?
2192 */
2193 int
gethostuuid(struct proc * p,struct gethostuuid_args * uap,__unused int32_t * retval)2194 gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval)
2195 {
2196 kern_return_t kret;
2197 int error;
2198 mach_timespec_t mach_ts; /* for IOKit call */
2199 __darwin_uuid_t uuid_kern = {}; /* for IOKit call */
2200
2201 /* Check entitlement */
2202 if (!IOCurrentTaskHasEntitlement("com.apple.private.getprivatesysid")) {
2203 #if !defined(XNU_TARGET_OS_OSX)
2204 #if CONFIG_MACF
2205 if ((error = mac_system_check_info(kauth_cred_get(), "hw.uuid")) != 0) {
2206 /* EPERM invokes userspace upcall if present */
2207 return error;
2208 }
2209 #endif
2210 #endif
2211 }
2212
2213 /* Convert the 32/64 bit timespec into a mach_timespec_t */
2214 if (proc_is64bit(p)) {
2215 struct user64_timespec ts;
2216 error = copyin(uap->timeoutp, &ts, sizeof(ts));
2217 if (error) {
2218 return error;
2219 }
2220 mach_ts.tv_sec = (unsigned int)ts.tv_sec;
2221 mach_ts.tv_nsec = (clock_res_t)ts.tv_nsec;
2222 } else {
2223 struct user32_timespec ts;
2224 error = copyin(uap->timeoutp, &ts, sizeof(ts));
2225 if (error) {
2226 return error;
2227 }
2228 mach_ts.tv_sec = ts.tv_sec;
2229 mach_ts.tv_nsec = ts.tv_nsec;
2230 }
2231
2232 /* Call IOKit with the stack buffer to get the UUID */
2233 kret = IOBSDGetPlatformUUID(uuid_kern, mach_ts);
2234
2235 /*
2236 * If we get it, copy out the data to the user buffer; note that a
2237 * uuid_t is an array of characters, so this is size invariant for
2238 * 32 vs. 64 bit.
2239 */
2240 if (kret == KERN_SUCCESS) {
2241 error = copyout(uuid_kern, uap->uuid_buf, sizeof(uuid_kern));
2242 } else {
2243 error = EWOULDBLOCK;
2244 }
2245
2246 return error;
2247 }
2248
2249 /*
2250 * ledger
2251 *
2252 * Description: Omnibus system call for ledger operations
2253 */
2254 int
ledger(struct proc * p,struct ledger_args * args,__unused int32_t * retval)2255 ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval)
2256 {
2257 #if !CONFIG_MACF
2258 #pragma unused(p)
2259 #endif
2260 int rval, pid, len, error;
2261 #ifdef LEDGER_DEBUG
2262 struct ledger_limit_args lla;
2263 #endif
2264 task_t task;
2265 proc_t proc;
2266
2267 /* Finish copying in the necessary args before taking the proc lock */
2268 error = 0;
2269 len = 0;
2270 if (args->cmd == LEDGER_ENTRY_INFO) {
2271 error = copyin(args->arg3, (char *)&len, sizeof(len));
2272 } else if (args->cmd == LEDGER_TEMPLATE_INFO) {
2273 error = copyin(args->arg2, (char *)&len, sizeof(len));
2274 } else if (args->cmd == LEDGER_LIMIT)
2275 #ifdef LEDGER_DEBUG
2276 { error = copyin(args->arg2, (char *)&lla, sizeof(lla));}
2277 #else
2278 { return EINVAL; }
2279 #endif
2280 else if ((args->cmd < 0) || (args->cmd > LEDGER_MAX_CMD)) {
2281 return EINVAL;
2282 }
2283
2284 if (error) {
2285 return error;
2286 }
2287 if (len < 0) {
2288 return EINVAL;
2289 }
2290
2291 rval = 0;
2292 if (args->cmd != LEDGER_TEMPLATE_INFO) {
2293 pid = (int)args->arg1;
2294 proc = proc_find(pid);
2295 if (proc == NULL) {
2296 return ESRCH;
2297 }
2298
2299 #if CONFIG_MACF
2300 error = mac_proc_check_ledger(p, proc, args->cmd);
2301 if (error) {
2302 proc_rele(proc);
2303 return error;
2304 }
2305 #endif
2306
2307 task = proc_task(proc);
2308 }
2309
2310 switch (args->cmd) {
2311 #ifdef LEDGER_DEBUG
2312 case LEDGER_LIMIT: {
2313 if (!kauth_cred_issuser(kauth_cred_get())) {
2314 rval = EPERM;
2315 }
2316 rval = ledger_limit(task, &lla);
2317 proc_rele(proc);
2318 break;
2319 }
2320 #endif
2321 case LEDGER_INFO: {
2322 struct ledger_info info = {};
2323
2324 rval = ledger_info(task, &info);
2325 proc_rele(proc);
2326 if (rval == 0) {
2327 rval = copyout(&info, args->arg2,
2328 sizeof(info));
2329 }
2330 break;
2331 }
2332
2333 case LEDGER_ENTRY_INFO: {
2334 void *buf;
2335 int sz;
2336
2337 #if CONFIG_MEMORYSTATUS
2338 task_ledger_settle_dirty_time(task);
2339 #endif /* CONFIG_MEMORYSTATUS */
2340
2341 rval = ledger_get_task_entry_info_multiple(task, &buf, &len);
2342 proc_rele(proc);
2343 if ((rval == 0) && (len >= 0)) {
2344 sz = len * sizeof(struct ledger_entry_info);
2345 rval = copyout(buf, args->arg2, sz);
2346 kfree_data(buf, sz);
2347 }
2348 if (rval == 0) {
2349 rval = copyout(&len, args->arg3, sizeof(len));
2350 }
2351 break;
2352 }
2353
2354 case LEDGER_TEMPLATE_INFO: {
2355 void *buf;
2356 int sz;
2357
2358 rval = ledger_template_info(&buf, &len);
2359 if ((rval == 0) && (len >= 0)) {
2360 sz = len * sizeof(struct ledger_template_info);
2361 rval = copyout(buf, args->arg1, sz);
2362 kfree_data(buf, sz);
2363 }
2364 if (rval == 0) {
2365 rval = copyout(&len, args->arg2, sizeof(len));
2366 }
2367 break;
2368 }
2369
2370 default:
2371 panic("ledger syscall logic error -- command type %d", args->cmd);
2372 proc_rele(proc);
2373 rval = EINVAL;
2374 }
2375
2376 return rval;
2377 }
2378
2379 int
telemetry(__unused struct proc * p,struct telemetry_args * args,__unused int32_t * retval)2380 telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t *retval)
2381 {
2382 int error = 0;
2383
2384 switch (args->cmd) {
2385 #if CONFIG_TELEMETRY
2386 case TELEMETRY_CMD_TIMER_EVENT:
2387 error = telemetry_timer_event(args->deadline, args->interval, args->leeway);
2388 break;
2389 case TELEMETRY_CMD_PMI_SETUP:
2390 error = telemetry_pmi_setup((enum telemetry_pmi)args->deadline, args->interval);
2391 break;
2392 #endif /* CONFIG_TELEMETRY */
2393 case TELEMETRY_CMD_VOUCHER_NAME:
2394 if (thread_set_voucher_name((mach_port_name_t)args->deadline)) {
2395 error = EINVAL;
2396 }
2397 break;
2398
2399 default:
2400 error = EINVAL;
2401 break;
2402 }
2403
2404 return error;
2405 }
2406
2407 /*
2408 * Logging
2409 *
2410 * Description: syscall to access kernel logging from userspace
2411 *
2412 * Args:
2413 * tag - used for syncing with userspace on the version.
2414 * flags - flags used by the syscall.
2415 * buffer - userspace address of string to copy.
2416 * size - size of buffer.
2417 */
2418 int
log_data(__unused struct proc * p,struct log_data_args * args,int * retval)2419 log_data(__unused struct proc *p, struct log_data_args *args, int *retval)
2420 {
2421 unsigned int tag = args->tag;
2422 unsigned int flags = args->flags;
2423 user_addr_t buffer = args->buffer;
2424 unsigned int size = args->size;
2425 int ret = 0;
2426 *retval = 0;
2427
2428 /* Only DEXTs are suppose to use this syscall. */
2429 if (!task_is_driver(current_task())) {
2430 return EPERM;
2431 }
2432
2433 /*
2434 * Tag synchronize the syscall version with userspace.
2435 * Tag == 0 => flags == OS_LOG_TYPE
2436 */
2437 if (tag != 0) {
2438 return EINVAL;
2439 }
2440
2441 /*
2442 * OS_LOG_TYPE are defined in libkern/os/log.h
2443 * In userspace they are defined in libtrace/os/log.h
2444 */
2445 if (flags != OS_LOG_TYPE_DEFAULT &&
2446 flags != OS_LOG_TYPE_INFO &&
2447 flags != OS_LOG_TYPE_DEBUG &&
2448 flags != OS_LOG_TYPE_ERROR &&
2449 flags != OS_LOG_TYPE_FAULT) {
2450 return EINVAL;
2451 }
2452
2453 if (size == 0) {
2454 return EINVAL;
2455 }
2456
2457 /* truncate to OS_LOG_DATA_MAX_SIZE */
2458 if (size > OS_LOG_DATA_MAX_SIZE) {
2459 printf("%s: WARNING msg is going to be truncated from %u to %u\n",
2460 __func__, size, OS_LOG_DATA_MAX_SIZE);
2461 size = OS_LOG_DATA_MAX_SIZE;
2462 }
2463
2464 char *log_msg = (char *)kalloc_data(size, Z_WAITOK);
2465 if (!log_msg) {
2466 return ENOMEM;
2467 }
2468
2469 if (copyin(buffer, log_msg, size) != 0) {
2470 ret = EFAULT;
2471 goto out;
2472 }
2473 log_msg[size - 1] = '\0';
2474
2475 /*
2476 * This will log to dmesg and logd.
2477 * The call will fail if the current
2478 * process is not a driverKit process.
2479 */
2480 os_log_driverKit(&ret, OS_LOG_DEFAULT, (os_log_type_t)flags, "%s", log_msg);
2481
2482 out:
2483 if (log_msg != NULL) {
2484 kfree_data(log_msg, size);
2485 }
2486
2487 return ret;
2488 }
2489
2490 #if DEVELOPMENT || DEBUG
2491
2492 static int
2493 sysctl_mpsc_test_pingpong SYSCTL_HANDLER_ARGS
2494 {
2495 #pragma unused(oidp, arg1, arg2)
2496 uint64_t value = 0;
2497 int error;
2498
2499 error = SYSCTL_IN(req, &value, sizeof(value));
2500 if (error) {
2501 return error;
2502 }
2503
2504 if (error == 0 && req->newptr) {
2505 error = mpsc_test_pingpong(value, &value);
2506 if (error == 0) {
2507 error = SYSCTL_OUT(req, &value, sizeof(value));
2508 }
2509 }
2510
2511 return error;
2512 }
2513 SYSCTL_PROC(_kern, OID_AUTO, mpsc_test_pingpong, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2514 0, 0, sysctl_mpsc_test_pingpong, "Q", "MPSC tests: pingpong");
2515
2516 #endif /* DEVELOPMENT || DEBUG */
2517
2518 /* Telemetry, microstackshots */
2519
2520 SYSCTL_NODE(_kern, OID_AUTO, microstackshot, CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2521 "microstackshot info");
2522
2523 extern uint32_t telemetry_sample_rate;
2524 SYSCTL_UINT(_kern_microstackshot, OID_AUTO, interrupt_sample_rate,
2525 CTLFLAG_RD | CTLFLAG_LOCKED, &telemetry_sample_rate, 0,
2526 "interrupt-based sampling rate in Hz");
2527
2528 #if defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES)
2529
2530 extern uint64_t mt_microstackshot_period;
2531 SYSCTL_QUAD(_kern_microstackshot, OID_AUTO, pmi_sample_period,
2532 CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_period,
2533 "PMI sampling rate");
2534 extern unsigned int mt_microstackshot_ctr;
2535 SYSCTL_UINT(_kern_microstackshot, OID_AUTO, pmi_sample_counter,
2536 CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_ctr, 0,
2537 "PMI counter");
2538
2539 #endif /* defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) */
2540
2541 /*Remote Time api*/
2542 SYSCTL_NODE(_machdep, OID_AUTO, remotetime, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "Remote time api");
2543
2544 #if DEVELOPMENT || DEBUG
2545 #if CONFIG_MACH_BRIDGE_SEND_TIME
2546 extern _Atomic uint32_t bt_init_flag;
2547 extern uint32_t mach_bridge_timer_enable(uint32_t, int);
2548
2549 SYSCTL_INT(_machdep_remotetime, OID_AUTO, bridge_timer_init_flag,
2550 CTLFLAG_RD | CTLFLAG_LOCKED, &bt_init_flag, 0, "");
2551
2552 static int sysctl_mach_bridge_timer_enable SYSCTL_HANDLER_ARGS
2553 {
2554 #pragma unused(oidp, arg1, arg2)
2555 uint32_t value = 0;
2556 int error = 0;
2557 /* User is querying buffer size */
2558 if (req->oldptr == USER_ADDR_NULL && req->newptr == USER_ADDR_NULL) {
2559 req->oldidx = sizeof(value);
2560 return 0;
2561 }
2562 if (os_atomic_load(&bt_init_flag, acquire)) {
2563 if (req->newptr) {
2564 int new_value = 0;
2565 error = SYSCTL_IN(req, &new_value, sizeof(new_value));
2566 if (error) {
2567 return error;
2568 }
2569 if (new_value == 0 || new_value == 1) {
2570 value = mach_bridge_timer_enable(new_value, 1);
2571 } else {
2572 return EPERM;
2573 }
2574 } else {
2575 value = mach_bridge_timer_enable(0, 0);
2576 }
2577 }
2578 error = SYSCTL_OUT(req, &value, sizeof(value));
2579 return error;
2580 }
2581
2582 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, bridge_timer_enable,
2583 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2584 0, 0, sysctl_mach_bridge_timer_enable, "I", "");
2585
2586 #endif /* CONFIG_MACH_BRIDGE_SEND_TIME */
2587
2588 static int sysctl_mach_bridge_remote_time SYSCTL_HANDLER_ARGS
2589 {
2590 #pragma unused(oidp, arg1, arg2)
2591 uint64_t ltime = 0, rtime = 0;
2592 if (req->oldptr == USER_ADDR_NULL) {
2593 req->oldidx = sizeof(rtime);
2594 return 0;
2595 }
2596 if (req->newptr) {
2597 int error = SYSCTL_IN(req, <ime, sizeof(ltime));
2598 if (error) {
2599 return error;
2600 }
2601 }
2602 rtime = mach_bridge_remote_time(ltime);
2603 return SYSCTL_OUT(req, &rtime, sizeof(rtime));
2604 }
2605 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, mach_bridge_remote_time,
2606 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2607 0, 0, sysctl_mach_bridge_remote_time, "Q", "");
2608
2609 #endif /* DEVELOPMENT || DEBUG */
2610
2611 #if CONFIG_MACH_BRIDGE_RECV_TIME
2612 extern struct bt_params bt_params_get_latest(void);
2613
2614 static int sysctl_mach_bridge_conversion_params SYSCTL_HANDLER_ARGS
2615 {
2616 #pragma unused(oidp, arg1, arg2)
2617 struct bt_params params = {};
2618 if (req->oldptr == USER_ADDR_NULL) {
2619 req->oldidx = sizeof(struct bt_params);
2620 return 0;
2621 }
2622 if (req->newptr) {
2623 return EPERM;
2624 }
2625 params = bt_params_get_latest();
2626 return SYSCTL_OUT(req, ¶ms, MIN(sizeof(params), req->oldlen));
2627 }
2628
2629 SYSCTL_PROC(_machdep_remotetime, OID_AUTO, conversion_params,
2630 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2631 0, sysctl_mach_bridge_conversion_params, "S,bt_params", "");
2632
2633 #endif /* CONFIG_MACH_BRIDGE_RECV_TIME */
2634
2635 #if DEVELOPMENT || DEBUG
2636
2637 #include <pexpert/pexpert.h>
2638 extern int32_t sysctl_get_bound_cpuid(void);
2639 extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
2640 static int
2641 sysctl_kern_sched_thread_bind_cpu SYSCTL_HANDLER_ARGS
2642 {
2643 #pragma unused(oidp, arg1, arg2)
2644
2645 /*
2646 * DO NOT remove this bootarg guard or make this non-development.
2647 * This kind of binding should only be used for tests and
2648 * experiments in a custom configuration, never shipping code.
2649 */
2650
2651 if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2652 return ENOENT;
2653 }
2654
2655 int32_t cpuid = sysctl_get_bound_cpuid();
2656
2657 int32_t new_value;
2658 int changed;
2659 int error = sysctl_io_number(req, cpuid, sizeof cpuid, &new_value, &changed);
2660 if (error) {
2661 return error;
2662 }
2663
2664 if (changed) {
2665 kern_return_t kr = sysctl_thread_bind_cpuid(new_value);
2666
2667 if (kr == KERN_NOT_SUPPORTED) {
2668 return ENOTSUP;
2669 }
2670
2671 if (kr == KERN_INVALID_VALUE) {
2672 return ERANGE;
2673 }
2674 }
2675
2676 return error;
2677 }
2678
2679 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cpu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2680 0, 0, sysctl_kern_sched_thread_bind_cpu, "I", "");
2681
2682 #if __AMP__
2683 extern char sysctl_get_bound_cluster_type(void);
2684 extern void sysctl_thread_bind_cluster_type(char cluster_type);
2685 static int
2686 sysctl_kern_sched_thread_bind_cluster_type SYSCTL_HANDLER_ARGS
2687 {
2688 #pragma unused(oidp, arg1, arg2)
2689 char buff[4];
2690
2691 if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2692 return ENOENT;
2693 }
2694
2695 int error = SYSCTL_IN(req, buff, 1);
2696 if (error) {
2697 return error;
2698 }
2699 char cluster_type = buff[0];
2700
2701 if (!req->newptr) {
2702 goto out;
2703 }
2704
2705 sysctl_thread_bind_cluster_type(cluster_type);
2706 out:
2707 cluster_type = sysctl_get_bound_cluster_type();
2708 buff[0] = cluster_type;
2709
2710 return SYSCTL_OUT(req, buff, 1);
2711 }
2712
2713 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2714 0, 0, sysctl_kern_sched_thread_bind_cluster_type, "A", "");
2715
2716 extern char sysctl_get_task_cluster_type(void);
2717 extern void sysctl_task_set_cluster_type(char cluster_type);
2718 static int
2719 sysctl_kern_sched_task_set_cluster_type SYSCTL_HANDLER_ARGS
2720 {
2721 #pragma unused(oidp, arg1, arg2)
2722 char buff[4];
2723
2724 if (!PE_parse_boot_argn("enable_skstsct", NULL, 0)) {
2725 return ENOENT;
2726 }
2727
2728 int error = SYSCTL_IN(req, buff, 1);
2729 if (error) {
2730 return error;
2731 }
2732 char cluster_type = buff[0];
2733
2734 if (!req->newptr) {
2735 goto out;
2736 }
2737
2738 sysctl_task_set_cluster_type(cluster_type);
2739 out:
2740 cluster_type = sysctl_get_task_cluster_type();
2741 buff[0] = cluster_type;
2742
2743 return SYSCTL_OUT(req, buff, 1);
2744 }
2745
2746 SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2747 0, 0, sysctl_kern_sched_task_set_cluster_type, "A", "");
2748
2749 extern kern_return_t thread_bind_cluster_id(thread_t thread, uint32_t cluster_id, thread_bind_option_t options);
2750 extern uint32_t thread_bound_cluster_id(thread_t);
2751 static int
2752 sysctl_kern_sched_thread_bind_cluster_id SYSCTL_HANDLER_ARGS
2753 {
2754 #pragma unused(oidp, arg1, arg2)
2755 if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2756 return ENOENT;
2757 }
2758
2759 thread_t self = current_thread();
2760 uint32_t old_value = thread_bound_cluster_id(self);
2761 uint32_t new_value;
2762
2763 int error = SYSCTL_IN(req, &new_value, sizeof(new_value));
2764 if (error) {
2765 return error;
2766 }
2767 if (new_value != old_value) {
2768 /*
2769 * This sysctl binds the thread to the cluster without any flags,
2770 * which means it will be hard bound and not check eligibility.
2771 */
2772 thread_bind_cluster_id(self, new_value, 0);
2773 }
2774 return SYSCTL_OUT(req, &old_value, sizeof(old_value));
2775 }
2776
2777 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_id, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2778 0, 0, sysctl_kern_sched_thread_bind_cluster_id, "I", "");
2779
2780 #if CONFIG_SCHED_EDGE
2781
2782 extern int sched_edge_restrict_ut;
2783 SYSCTL_INT(_kern, OID_AUTO, sched_edge_restrict_ut, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_restrict_ut, 0, "Edge Scheduler Restrict UT Threads");
2784 extern int sched_edge_restrict_bg;
2785 SYSCTL_INT(_kern, OID_AUTO, sched_edge_restrict_bg, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_restrict_ut, 0, "Edge Scheduler Restrict BG Threads");
2786 extern int sched_edge_migrate_ipi_immediate;
2787 SYSCTL_INT(_kern, OID_AUTO, sched_edge_migrate_ipi_immediate, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_migrate_ipi_immediate, 0, "Edge Scheduler uses immediate IPIs for migration event based on execution latency");
2788
2789 #endif /* CONFIG_SCHED_EDGE */
2790
2791 #endif /* __AMP__ */
2792
2793 #if SCHED_HYGIENE_DEBUG
2794
2795 SYSCTL_QUAD(_kern, OID_AUTO, interrupt_masked_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED,
2796 &interrupt_masked_timeout,
2797 "Interrupt masked duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2798
2799 SYSCTL_INT(_kern, OID_AUTO, interrupt_masked_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2800 &interrupt_masked_debug_mode, 0,
2801 "Enable interrupt masked tracing or panic (0: off, 1: trace, 2: panic)");
2802
2803 SYSCTL_QUAD(_kern, OID_AUTO, sched_preemption_disable_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED,
2804 &sched_preemption_disable_threshold_mt,
2805 "Preemption disablement duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2806
2807 SYSCTL_INT(_kern, OID_AUTO, sched_preemption_disable_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2808 &sched_preemption_disable_debug_mode, 0,
2809 "Enable preemption disablement tracing or panic (0: off, 1: trace, 2: panic)");
2810
2811 static int
sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2812 sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2813 {
2814 extern unsigned int preemption_disable_get_max_durations(uint64_t *durations, size_t count);
2815 extern void preemption_disable_reset_max_durations(void);
2816
2817 uint64_t stats[MAX_CPUS]; // maximum per CPU
2818
2819 unsigned int ncpus = preemption_disable_get_max_durations(stats, MAX_CPUS);
2820 if (req->newlen > 0) {
2821 /* Reset when attempting to write to the sysctl. */
2822 preemption_disable_reset_max_durations();
2823 }
2824
2825 return sysctl_io_opaque(req, stats, ncpus * sizeof(uint64_t), NULL);
2826 }
2827
2828 SYSCTL_PROC(_kern, OID_AUTO, sched_preemption_disable_stats,
2829 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2830 0, 0, sysctl_sched_preemption_disable_stats, "I", "Preemption disablement statistics");
2831
2832 #endif /* SCHED_HYGIENE_DEBUG */
2833
2834 /* used for testing by exception_tests */
2835 extern uint32_t ipc_control_port_options;
2836 SYSCTL_INT(_kern, OID_AUTO, ipc_control_port_options,
2837 CTLFLAG_RD | CTLFLAG_LOCKED, &ipc_control_port_options, 0, "");
2838
2839 #endif /* DEVELOPMENT || DEBUG */
2840
2841 extern uint32_t task_exc_guard_default;
2842
2843 SYSCTL_INT(_kern, OID_AUTO, task_exc_guard_default,
2844 CTLFLAG_RD | CTLFLAG_LOCKED, &task_exc_guard_default, 0, "");
2845
2846
2847 static int
2848 sysctl_kern_tcsm_available SYSCTL_HANDLER_ARGS
2849 {
2850 #pragma unused(oidp, arg1, arg2)
2851 uint32_t value = machine_csv(CPUVN_CI) ? 1 : 0;
2852
2853 if (req->newptr) {
2854 return EINVAL;
2855 }
2856
2857 return SYSCTL_OUT(req, &value, sizeof(value));
2858 }
2859 SYSCTL_PROC(_kern, OID_AUTO, tcsm_available,
2860 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
2861 0, 0, sysctl_kern_tcsm_available, "I", "");
2862
2863
2864 static int
2865 sysctl_kern_tcsm_enable SYSCTL_HANDLER_ARGS
2866 {
2867 #pragma unused(oidp, arg1, arg2)
2868 uint32_t soflags = 0;
2869 uint32_t old_value = thread_get_no_smt() ? 1 : 0;
2870
2871 int error = SYSCTL_IN(req, &soflags, sizeof(soflags));
2872 if (error) {
2873 return error;
2874 }
2875
2876 if (soflags && machine_csv(CPUVN_CI)) {
2877 thread_set_no_smt(true);
2878 machine_tecs(current_thread());
2879 }
2880
2881 return SYSCTL_OUT(req, &old_value, sizeof(old_value));
2882 }
2883 SYSCTL_PROC(_kern, OID_AUTO, tcsm_enable,
2884 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
2885 0, 0, sysctl_kern_tcsm_enable, "I", "");
2886
2887 static int
2888 sysctl_kern_debug_get_preoslog SYSCTL_HANDLER_ARGS
2889 {
2890 #pragma unused(oidp, arg1, arg2)
2891 static bool oneshot_executed = false;
2892 size_t preoslog_size = 0;
2893 const char *preoslog = NULL;
2894 int ret = 0;
2895
2896 // DumpPanic passes a non-zero write value when it needs oneshot behaviour
2897 if (req->newptr != USER_ADDR_NULL) {
2898 uint8_t oneshot = 0;
2899 int error = SYSCTL_IN(req, &oneshot, sizeof(oneshot));
2900 if (error) {
2901 return error;
2902 }
2903
2904 if (oneshot) {
2905 if (!os_atomic_cmpxchg(&oneshot_executed, false, true, acq_rel)) {
2906 return EPERM;
2907 }
2908 }
2909 }
2910
2911 preoslog = sysctl_debug_get_preoslog(&preoslog_size);
2912 if (preoslog != NULL && preoslog_size == 0) {
2913 sysctl_debug_free_preoslog();
2914 return 0;
2915 }
2916
2917 if (preoslog == NULL || preoslog_size == 0) {
2918 return 0;
2919 }
2920
2921 if (req->oldptr == USER_ADDR_NULL) {
2922 req->oldidx = preoslog_size;
2923 return 0;
2924 }
2925
2926 ret = SYSCTL_OUT(req, preoslog, preoslog_size);
2927 sysctl_debug_free_preoslog();
2928 return ret;
2929 }
2930
2931 SYSCTL_PROC(_kern, OID_AUTO, preoslog, CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2932 0, 0, sysctl_kern_debug_get_preoslog, "-", "");
2933
2934 #if DEVELOPMENT || DEBUG
2935 extern void sysctl_task_set_no_smt(char no_smt);
2936 extern char sysctl_task_get_no_smt(void);
2937
2938 static int
2939 sysctl_kern_sched_task_set_no_smt SYSCTL_HANDLER_ARGS
2940 {
2941 #pragma unused(oidp, arg1, arg2)
2942 char buff[4];
2943
2944 int error = SYSCTL_IN(req, buff, 1);
2945 if (error) {
2946 return error;
2947 }
2948 char no_smt = buff[0];
2949
2950 if (!req->newptr) {
2951 goto out;
2952 }
2953
2954 sysctl_task_set_no_smt(no_smt);
2955 out:
2956 no_smt = sysctl_task_get_no_smt();
2957 buff[0] = no_smt;
2958
2959 return SYSCTL_OUT(req, buff, 1);
2960 }
2961
2962 SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_no_smt, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
2963 0, 0, sysctl_kern_sched_task_set_no_smt, "A", "");
2964
2965 static int
sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2966 sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2967 {
2968 int new_value, changed;
2969 int old_value = thread_get_no_smt() ? 1 : 0;
2970 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2971
2972 if (changed) {
2973 thread_set_no_smt(!!new_value);
2974 }
2975
2976 return error;
2977 }
2978
2979 SYSCTL_PROC(_kern, OID_AUTO, sched_thread_set_no_smt,
2980 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
2981 0, 0, sysctl_kern_sched_thread_set_no_smt, "I", "");
2982
2983 #if CONFIG_SCHED_RT_ALLOW
2984
2985 #if DEVELOPMENT || DEBUG
2986 #define RT_ALLOW_CTLFLAGS CTLFLAG_RW
2987 #else
2988 #define RT_ALLOW_CTLFLAGS CTLFLAG_RD
2989 #endif /* DEVELOPMENT || DEBUG */
2990
2991 static int
sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)2992 sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid *oidp,
2993 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2994 {
2995 extern uint8_t rt_allow_limit_percent;
2996
2997 int new_value = 0;
2998 int old_value = rt_allow_limit_percent;
2999 int changed = 0;
3000
3001 int error = sysctl_io_number(req, old_value, sizeof(old_value),
3002 &new_value, &changed);
3003 if (error != 0) {
3004 return error;
3005 }
3006
3007 /* Only accept a percentage between 1 and 99 inclusive. */
3008 if (changed) {
3009 if (new_value >= 100 || new_value <= 0) {
3010 return EINVAL;
3011 }
3012
3013 rt_allow_limit_percent = (uint8_t)new_value;
3014 }
3015
3016 return 0;
3017 }
3018
3019 SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_percent,
3020 RT_ALLOW_CTLFLAGS | CTLTYPE_INT | CTLFLAG_LOCKED,
3021 0, 0, sysctl_kern_rt_allow_limit_percent, "I", "");
3022
3023 static int
sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)3024 sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid *oidp,
3025 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3026 {
3027 extern uint16_t rt_allow_limit_interval_ms;
3028
3029 uint64_t new_value = 0;
3030 uint64_t old_value = rt_allow_limit_interval_ms;
3031 int changed = 0;
3032
3033 int error = sysctl_io_number(req, old_value, sizeof(old_value),
3034 &new_value, &changed);
3035 if (error != 0) {
3036 return error;
3037 }
3038
3039 /* Value is in ns. Must be at least 1ms. */
3040 if (changed) {
3041 if (new_value < 1 || new_value > UINT16_MAX) {
3042 return EINVAL;
3043 }
3044
3045 rt_allow_limit_interval_ms = (uint16_t)new_value;
3046 }
3047
3048 return 0;
3049 }
3050
3051 SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_interval_ms,
3052 RT_ALLOW_CTLFLAGS | CTLTYPE_QUAD | CTLFLAG_LOCKED,
3053 0, 0, sysctl_kern_rt_allow_limit_interval_ms, "Q", "");
3054
3055 #endif /* CONFIG_SCHED_RT_ALLOW */
3056
3057
3058 static int
3059 sysctl_kern_task_set_filter_msg_flag SYSCTL_HANDLER_ARGS
3060 {
3061 #pragma unused(oidp, arg1, arg2)
3062 int new_value, changed;
3063 int old_value = task_get_filter_msg_flag(current_task()) ? 1 : 0;
3064 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3065
3066 if (changed) {
3067 task_set_filter_msg_flag(current_task(), !!new_value);
3068 }
3069
3070 return error;
3071 }
3072
3073 SYSCTL_PROC(_kern, OID_AUTO, task_set_filter_msg_flag, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3074 0, 0, sysctl_kern_task_set_filter_msg_flag, "I", "");
3075
3076 #if CONFIG_PROC_RESOURCE_LIMITS
3077
3078 extern mach_port_name_t current_task_get_fatal_port_name(void);
3079
3080 static int
3081 sysctl_kern_task_get_fatal_port SYSCTL_HANDLER_ARGS
3082 {
3083 #pragma unused(oidp, arg1, arg2)
3084 int port = 0;
3085 int flag = 0;
3086
3087 if (req->oldptr == USER_ADDR_NULL) {
3088 req->oldidx = sizeof(mach_port_t);
3089 return 0;
3090 }
3091
3092 int error = SYSCTL_IN(req, &flag, sizeof(flag));
3093 if (error) {
3094 return error;
3095 }
3096
3097 if (flag == 1) {
3098 port = (int)current_task_get_fatal_port_name();
3099 }
3100 return SYSCTL_OUT(req, &port, sizeof(port));
3101 }
3102
3103 SYSCTL_PROC(_machdep, OID_AUTO, task_get_fatal_port, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3104 0, 0, sysctl_kern_task_get_fatal_port, "I", "");
3105
3106 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
3107
3108 extern unsigned int ipc_entry_table_count_max(void);
3109
3110 static int
3111 sysctl_mach_max_port_table_size SYSCTL_HANDLER_ARGS
3112 {
3113 #pragma unused(oidp, arg1, arg2)
3114 int old_value = ipc_entry_table_count_max();
3115 int error = sysctl_io_number(req, old_value, sizeof(int), NULL, NULL);
3116
3117 return error;
3118 }
3119
3120 SYSCTL_PROC(_machdep, OID_AUTO, max_port_table_size, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3121 0, 0, sysctl_mach_max_port_table_size, "I", "");
3122
3123 #endif /* DEVELOPMENT || DEBUG */
3124
3125 #if defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(CONFIG_KDP_COREDUMP_ENCRYPTION)
3126
3127 #define COREDUMP_ENCRYPTION_KEY_ENTITLEMENT "com.apple.private.coredump-encryption-key"
3128
3129 static int
3130 sysctl_coredump_encryption_key_update SYSCTL_HANDLER_ARGS
3131 {
3132 kern_return_t ret = KERN_SUCCESS;
3133 int error = 0;
3134 struct kdp_core_encryption_key_descriptor key_descriptor = {
3135 .kcekd_format = MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256,
3136 };
3137
3138 /* Need to be root and have entitlement */
3139 if (!kauth_cred_issuser(kauth_cred_get()) && !IOCurrentTaskHasEntitlement(COREDUMP_ENCRYPTION_KEY_ENTITLEMENT)) {
3140 return EPERM;
3141 }
3142
3143 // Sanity-check the given key length
3144 if (req->newlen > UINT16_MAX) {
3145 return EINVAL;
3146 }
3147
3148 // It is allowed for the caller to pass in a NULL buffer.
3149 // This indicates that they want us to forget about any public key we might have.
3150 if (req->newptr) {
3151 key_descriptor.kcekd_size = (uint16_t) req->newlen;
3152 key_descriptor.kcekd_key = kalloc_data(key_descriptor.kcekd_size, Z_WAITOK);
3153
3154 if (key_descriptor.kcekd_key == NULL) {
3155 return ENOMEM;
3156 }
3157
3158 error = SYSCTL_IN(req, key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3159 if (error) {
3160 goto out;
3161 }
3162 }
3163
3164 ret = IOProvideCoreFileAccess(kdp_core_handle_new_encryption_key, (void *)&key_descriptor);
3165 if (KERN_SUCCESS != ret) {
3166 printf("Failed to handle the new encryption key. Error 0x%x", ret);
3167 error = EFAULT;
3168 }
3169
3170 out:
3171 kfree_data(key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3172 return 0;
3173 }
3174
3175 SYSCTL_PROC(_kern, OID_AUTO, coredump_encryption_key, CTLTYPE_OPAQUE | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED,
3176 0, 0, &sysctl_coredump_encryption_key_update, "-", "Set a new encryption key for coredumps");
3177
3178 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING && CONFIG_KDP_COREDUMP_ENCRYPTION*/
3179