1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_descrip.c 8.8 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/kernel.h>
79 #include <sys/vnode_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/kauth.h>
82 #include <sys/file_internal.h>
83 #include <sys/guarded.h>
84 #include <sys/priv.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/stat.h>
88 #include <sys/ioctl.h>
89 #include <sys/fcntl.h>
90 #include <sys/fsctl.h>
91 #include <sys/malloc.h>
92 #include <sys/mman.h>
93 #include <sys/mount.h>
94 #include <sys/syslog.h>
95 #include <sys/unistd.h>
96 #include <sys/resourcevar.h>
97 #include <sys/aio_kern.h>
98 #include <sys/ev.h>
99 #include <kern/locks.h>
100 #include <sys/uio_internal.h>
101 #include <sys/codesign.h>
102 #include <sys/codedir_internal.h>
103 #include <sys/mount_internal.h>
104 #include <sys/kdebug.h>
105 #include <sys/sysproto.h>
106 #include <sys/pipe.h>
107 #include <sys/spawn.h>
108 #include <sys/cprotect.h>
109 #include <sys/ubc_internal.h>
110
111 #include <kern/kern_types.h>
112 #include <kern/kalloc.h>
113 #include <kern/waitq.h>
114 #include <kern/ipc_misc.h>
115 #include <kern/ast.h>
116
117 #include <vm/vm_protos.h>
118 #include <mach/mach_port.h>
119
120 #include <security/audit/audit.h>
121 #if CONFIG_MACF
122 #include <security/mac_framework.h>
123 #endif
124
125 #include <stdbool.h>
126 #include <os/atomic_private.h>
127 #include <os/overflow.h>
128 #include <IOKit/IOBSD.h>
129
130 #define IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND 0x1
131 kern_return_t ipc_object_copyin(ipc_space_t, mach_port_name_t,
132 mach_msg_type_name_t, ipc_port_t *, mach_port_context_t, mach_msg_guard_flags_t *, uint32_t);
133 void ipc_port_release_send(ipc_port_t);
134
135 void fileport_releasefg(struct fileglob *fg);
136
137 /* flags for fp_close_and_unlock */
138 #define FD_DUP2RESV 1
139
140 /* We don't want these exported */
141
142 __private_extern__
143 int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int);
144
145 /* Conflict wait queue for when selects collide (opaque type) */
146 extern struct waitq select_conflict_queue;
147
148 #define f_flag fp_glob->fg_flag
149 #define f_type fp_glob->fg_ops->fo_type
150 #define f_cred fp_glob->fg_cred
151 #define f_ops fp_glob->fg_ops
152 #define f_offset fp_glob->fg_offset
153
154 ZONE_DEFINE_TYPE(fg_zone, "fileglob", struct fileglob, ZC_ZFREE_CLEARMEM);
155 ZONE_DEFINE_ID(ZONE_ID_FILEPROC, "fileproc", struct fileproc, ZC_ZFREE_CLEARMEM);
156
157 /*
158 * Descriptor management.
159 */
160 int nfiles; /* actual number of open files */
161 /*
162 * "uninitialized" ops -- ensure FILEGLOB_DTYPE(fg) always exists
163 */
164 static const struct fileops uninitops;
165
166 os_refgrp_decl(, f_refgrp, "files refcounts", NULL);
167 static LCK_GRP_DECLARE(file_lck_grp, "file");
168
169
170 #pragma mark fileglobs
171
172 /*!
173 * @function fg_free
174 *
175 * @brief
176 * Free a file structure.
177 */
178 static void
fg_free(struct fileglob * fg)179 fg_free(struct fileglob *fg)
180 {
181 os_atomic_dec(&nfiles, relaxed);
182
183 if (fg->fg_vn_data) {
184 fg_vn_data_free(fg->fg_vn_data);
185 fg->fg_vn_data = NULL;
186 }
187
188 kauth_cred_t cred = fg->fg_cred;
189 if (IS_VALID_CRED(cred)) {
190 kauth_cred_unref(&cred);
191 fg->fg_cred = NOCRED;
192 }
193 lck_mtx_destroy(&fg->fg_lock, &file_lck_grp);
194
195 #if CONFIG_MACF && CONFIG_VNGUARD
196 vng_file_label_destroy(fg);
197 #endif
198 zfree(fg_zone, fg);
199 }
200
201 OS_ALWAYS_INLINE
202 void
fg_ref(proc_t p,struct fileglob * fg)203 fg_ref(proc_t p, struct fileglob *fg)
204 {
205 #if DEBUG || DEVELOPMENT
206 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
207 #else
208 (void)p;
209 #endif
210 os_ref_retain_raw(&fg->fg_count, &f_refgrp);
211 }
212
213 void
fg_drop_live(struct fileglob * fg)214 fg_drop_live(struct fileglob *fg)
215 {
216 os_ref_release_live_raw(&fg->fg_count, &f_refgrp);
217 }
218
219 int
fg_drop(proc_t p,struct fileglob * fg)220 fg_drop(proc_t p, struct fileglob *fg)
221 {
222 struct vnode *vp;
223 struct vfs_context context;
224 int error = 0;
225
226 if (fg == NULL) {
227 return 0;
228 }
229
230 /* Set up context with cred stashed in fg */
231 if (p == current_proc()) {
232 context.vc_thread = current_thread();
233 } else {
234 context.vc_thread = NULL;
235 }
236 context.vc_ucred = fg->fg_cred;
237
238 /*
239 * POSIX record locking dictates that any close releases ALL
240 * locks owned by this process. This is handled by setting
241 * a flag in the unlock to free ONLY locks obeying POSIX
242 * semantics, and not to free BSD-style file locks.
243 * If the descriptor was in a message, POSIX-style locks
244 * aren't passed with the descriptor.
245 */
246 if (p && DTYPE_VNODE == FILEGLOB_DTYPE(fg) &&
247 (p->p_ladvflag & P_LADVLOCK)) {
248 struct flock lf = {
249 .l_whence = SEEK_SET,
250 .l_type = F_UNLCK,
251 };
252
253 vp = (struct vnode *)fg_get_data(fg);
254 if ((error = vnode_getwithref(vp)) == 0) {
255 (void)VNOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_POSIX, &context, NULL);
256 (void)vnode_put(vp);
257 }
258 }
259
260 if (os_ref_release_raw(&fg->fg_count, &f_refgrp) == 0) {
261 /*
262 * Since we ensure that fg->fg_ops is always initialized,
263 * it is safe to invoke fo_close on the fg
264 */
265 error = fo_close(fg, &context);
266
267 fg_free(fg);
268 }
269
270 return error;
271 }
272
273 inline
274 void
fg_set_data(struct fileglob * fg,void * fg_data)275 fg_set_data(
276 struct fileglob *fg,
277 void *fg_data)
278 {
279 uintptr_t *store = &fg->fg_data;
280
281 #if __has_feature(ptrauth_calls)
282 int type = FILEGLOB_DTYPE(fg);
283
284 if (fg_data) {
285 type ^= OS_PTRAUTH_DISCRIMINATOR("fileglob.fg_data");
286 fg_data = ptrauth_sign_unauthenticated(fg_data,
287 ptrauth_key_process_independent_data,
288 ptrauth_blend_discriminator(store, type));
289 }
290 #endif // __has_feature(ptrauth_calls)
291
292 *store = (uintptr_t)fg_data;
293 }
294
295 inline
296 void *
fg_get_data_volatile(struct fileglob * fg)297 fg_get_data_volatile(struct fileglob *fg)
298 {
299 uintptr_t *store = &fg->fg_data;
300 void *fg_data = (void *)*store;
301
302 #if __has_feature(ptrauth_calls)
303 int type = FILEGLOB_DTYPE(fg);
304
305 if (fg_data) {
306 type ^= OS_PTRAUTH_DISCRIMINATOR("fileglob.fg_data");
307 fg_data = ptrauth_auth_data(fg_data,
308 ptrauth_key_process_independent_data,
309 ptrauth_blend_discriminator(store, type));
310 }
311 #endif // __has_feature(ptrauth_calls)
312
313 return fg_data;
314 }
315
316 static void
fg_transfer_filelocks(proc_t p,struct fileglob * fg,thread_t thread)317 fg_transfer_filelocks(proc_t p, struct fileglob *fg, thread_t thread)
318 {
319 struct vnode *vp;
320 struct vfs_context context;
321 struct proc *old_proc = current_proc();
322
323 assert(fg != NULL);
324
325 assert(p != old_proc);
326 context.vc_thread = thread;
327 context.vc_ucred = fg->fg_cred;
328
329 /* Transfer all POSIX Style locks to new proc */
330 if (p && DTYPE_VNODE == FILEGLOB_DTYPE(fg) &&
331 (p->p_ladvflag & P_LADVLOCK)) {
332 struct flock lf = {
333 .l_whence = SEEK_SET,
334 .l_start = 0,
335 .l_len = 0,
336 .l_type = F_TRANSFER,
337 };
338
339 vp = (struct vnode *)fg_get_data(fg);
340 if (vnode_getwithref(vp) == 0) {
341 (void)VNOP_ADVLOCK(vp, (caddr_t)old_proc, F_TRANSFER, &lf, F_POSIX, &context, NULL);
342 (void)vnode_put(vp);
343 }
344 }
345
346 /* Transfer all OFD Style locks to new proc */
347 if (p && DTYPE_VNODE == FILEGLOB_DTYPE(fg) &&
348 (fg->fg_lflags & FG_HAS_OFDLOCK)) {
349 struct flock lf = {
350 .l_whence = SEEK_SET,
351 .l_start = 0,
352 .l_len = 0,
353 .l_type = F_TRANSFER,
354 };
355
356 vp = (struct vnode *)fg_get_data(fg);
357 if (vnode_getwithref(vp) == 0) {
358 (void)VNOP_ADVLOCK(vp, ofd_to_id(fg), F_TRANSFER, &lf, F_OFD_LOCK, &context, NULL);
359 (void)vnode_put(vp);
360 }
361 }
362 return;
363 }
364
365 bool
fg_sendable(struct fileglob * fg)366 fg_sendable(struct fileglob *fg)
367 {
368 switch (FILEGLOB_DTYPE(fg)) {
369 case DTYPE_VNODE:
370 case DTYPE_SOCKET:
371 case DTYPE_PIPE:
372 case DTYPE_PSXSHM:
373 case DTYPE_NETPOLICY:
374 return (fg->fg_lflags & FG_CONFINED) == 0;
375
376 default:
377 return false;
378 }
379 }
380
381 #pragma mark file descriptor table (static helpers)
382
383 static void
procfdtbl_reservefd(struct proc * p,int fd)384 procfdtbl_reservefd(struct proc * p, int fd)
385 {
386 p->p_fd.fd_ofiles[fd] = NULL;
387 p->p_fd.fd_ofileflags[fd] |= UF_RESERVED;
388 }
389
390 void
procfdtbl_releasefd(struct proc * p,int fd,struct fileproc * fp)391 procfdtbl_releasefd(struct proc * p, int fd, struct fileproc * fp)
392 {
393 if (fp != NULL) {
394 p->p_fd.fd_ofiles[fd] = fp;
395 }
396 p->p_fd.fd_ofileflags[fd] &= ~UF_RESERVED;
397 if ((p->p_fd.fd_ofileflags[fd] & UF_RESVWAIT) == UF_RESVWAIT) {
398 p->p_fd.fd_ofileflags[fd] &= ~UF_RESVWAIT;
399 wakeup(&p->p_fd);
400 }
401 }
402
403 static void
procfdtbl_waitfd(struct proc * p,int fd)404 procfdtbl_waitfd(struct proc * p, int fd)
405 {
406 p->p_fd.fd_ofileflags[fd] |= UF_RESVWAIT;
407 msleep(&p->p_fd, &p->p_fd.fd_lock, PRIBIO, "ftbl_waitfd", NULL);
408 }
409
410 static void
procfdtbl_clearfd(struct proc * p,int fd)411 procfdtbl_clearfd(struct proc * p, int fd)
412 {
413 int waiting;
414
415 waiting = (p->p_fd.fd_ofileflags[fd] & UF_RESVWAIT);
416 p->p_fd.fd_ofiles[fd] = NULL;
417 p->p_fd.fd_ofileflags[fd] = 0;
418 if (waiting == UF_RESVWAIT) {
419 wakeup(&p->p_fd);
420 }
421 }
422
423 /*
424 * fdrelse
425 *
426 * Description: Inline utility function to free an fd in a filedesc
427 *
428 * Parameters: fdp Pointer to filedesc fd lies in
429 * fd fd to free
430 * reserv fd should be reserved
431 *
432 * Returns: void
433 *
434 * Locks: Assumes proc_fdlock for process pointing to fdp is held by
435 * the caller
436 */
437 void
fdrelse(struct proc * p,int fd)438 fdrelse(struct proc * p, int fd)
439 {
440 struct filedesc *fdp = &p->p_fd;
441 int nfd = 0;
442
443 if (fd < fdp->fd_freefile) {
444 fdp->fd_freefile = fd;
445 }
446 #if DIAGNOSTIC
447 if (fd >= fdp->fd_afterlast) {
448 panic("fdrelse: fd_afterlast inconsistent");
449 }
450 #endif
451 procfdtbl_clearfd(p, fd);
452
453 nfd = fdp->fd_afterlast;
454 while (nfd > 0 && fdp->fd_ofiles[nfd - 1] == NULL &&
455 !(fdp->fd_ofileflags[nfd - 1] & UF_RESERVED)) {
456 nfd--;
457 }
458 fdp->fd_afterlast = nfd;
459
460 #if CONFIG_PROC_RESOURCE_LIMITS
461 fdp->fd_nfiles_open--;
462 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
463 }
464
465
466 /*
467 * finishdup
468 *
469 * Description: Common code for dup, dup2, and fcntl(F_DUPFD).
470 *
471 * Parameters: p Process performing the dup
472 * old The fd to dup
473 * new The fd to dup it to
474 * fp_flags Flags to augment the new fp
475 * retval Pointer to the call return area
476 *
477 * Returns: 0 Success
478 * EBADF
479 * ENOMEM
480 *
481 * Implicit returns:
482 * *retval (modified) The new descriptor
483 *
484 * Locks: Assumes proc_fdlock for process pointing to fdp is held by
485 * the caller
486 *
487 * Notes: This function may drop and reacquire this lock; it is unsafe
488 * for a caller to assume that other state protected by the lock
489 * has not been subsequently changed out from under it.
490 */
491 static int
finishdup(proc_t p,kauth_cred_t p_cred,int old,int new,fileproc_flags_t fp_flags,int32_t * retval)492 finishdup(
493 proc_t p,
494 kauth_cred_t p_cred,
495 int old,
496 int new,
497 fileproc_flags_t fp_flags,
498 int32_t *retval)
499 {
500 struct filedesc *fdp = &p->p_fd;
501 struct fileproc *nfp;
502 struct fileproc *ofp;
503 #if CONFIG_MACF
504 int error;
505 #endif
506
507 #if DIAGNOSTIC
508 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
509 #endif
510 if ((ofp = fdp->fd_ofiles[old]) == NULL ||
511 (fdp->fd_ofileflags[old] & UF_RESERVED)) {
512 fdrelse(p, new);
513 return EBADF;
514 }
515
516 #if CONFIG_MACF
517 error = mac_file_check_dup(p_cred, ofp->fp_glob, new);
518
519 if (error) {
520 fdrelse(p, new);
521 return error;
522 }
523 #else
524 (void)p_cred;
525 #endif
526
527 fg_ref(p, ofp->fp_glob);
528
529 proc_fdunlock(p);
530
531 nfp = fileproc_alloc_init();
532
533 if (fp_flags) {
534 nfp->fp_flags |= fp_flags;
535 }
536 nfp->fp_glob = ofp->fp_glob;
537
538 proc_fdlock(p);
539
540 #if DIAGNOSTIC
541 if (fdp->fd_ofiles[new] != 0) {
542 panic("finishdup: overwriting fd_ofiles with new %d", new);
543 }
544 if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0) {
545 panic("finishdup: unreserved fileflags with new %d", new);
546 }
547 #endif
548
549 if (new >= fdp->fd_afterlast) {
550 fdp->fd_afterlast = new + 1;
551 }
552 procfdtbl_releasefd(p, new, nfp);
553 *retval = new;
554 return 0;
555 }
556
557
558 #pragma mark file descriptor table (exported functions)
559
560 void
proc_dirs_lock_shared(proc_t p)561 proc_dirs_lock_shared(proc_t p)
562 {
563 lck_rw_lock_shared(&p->p_fd.fd_dirs_lock);
564 }
565
566 void
proc_dirs_unlock_shared(proc_t p)567 proc_dirs_unlock_shared(proc_t p)
568 {
569 lck_rw_unlock_shared(&p->p_fd.fd_dirs_lock);
570 }
571
572 void
proc_dirs_lock_exclusive(proc_t p)573 proc_dirs_lock_exclusive(proc_t p)
574 {
575 lck_rw_lock_exclusive(&p->p_fd.fd_dirs_lock);
576 }
577
578 void
proc_dirs_unlock_exclusive(proc_t p)579 proc_dirs_unlock_exclusive(proc_t p)
580 {
581 lck_rw_unlock_exclusive(&p->p_fd.fd_dirs_lock);
582 }
583
584 /*
585 * proc_fdlock, proc_fdlock_spin
586 *
587 * Description: Lock to control access to the per process struct fileproc
588 * and struct filedesc
589 *
590 * Parameters: p Process to take the lock on
591 *
592 * Returns: void
593 *
594 * Notes: The lock is initialized in forkproc() and destroyed in
595 * reap_child_process().
596 */
597 void
proc_fdlock(proc_t p)598 proc_fdlock(proc_t p)
599 {
600 lck_mtx_lock(&p->p_fd.fd_lock);
601 }
602
603 void
proc_fdlock_spin(proc_t p)604 proc_fdlock_spin(proc_t p)
605 {
606 lck_mtx_lock_spin(&p->p_fd.fd_lock);
607 }
608
609 void
proc_fdlock_assert(proc_t p,int assertflags)610 proc_fdlock_assert(proc_t p, int assertflags)
611 {
612 lck_mtx_assert(&p->p_fd.fd_lock, assertflags);
613 }
614
615
616 /*
617 * proc_fdunlock
618 *
619 * Description: Unlock the lock previously locked by a call to proc_fdlock()
620 *
621 * Parameters: p Process to drop the lock on
622 *
623 * Returns: void
624 */
625 void
proc_fdunlock(proc_t p)626 proc_fdunlock(proc_t p)
627 {
628 lck_mtx_unlock(&p->p_fd.fd_lock);
629 }
630
631 bool
fdt_available_locked(proc_t p,int n)632 fdt_available_locked(proc_t p, int n)
633 {
634 struct filedesc *fdp = &p->p_fd;
635 struct fileproc **fpp;
636 char *flags;
637 int i;
638 int lim = proc_limitgetcur_nofile(p);
639
640 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
641 return true;
642 }
643 fpp = &fdp->fd_ofiles[fdp->fd_freefile];
644 flags = &fdp->fd_ofileflags[fdp->fd_freefile];
645 for (i = fdp->fd_nfiles - fdp->fd_freefile; --i >= 0; fpp++, flags++) {
646 if (*fpp == NULL && !(*flags & UF_RESERVED) && --n <= 0) {
647 return true;
648 }
649 }
650 return false;
651 }
652
653
654 struct fdt_iterator
fdt_next(proc_t p,int fd,bool only_settled)655 fdt_next(proc_t p, int fd, bool only_settled)
656 {
657 struct fdt_iterator it;
658 struct filedesc *fdp = &p->p_fd;
659 struct fileproc *fp;
660 int nfds = fdp->fd_afterlast;
661
662 while (++fd < nfds) {
663 fp = fdp->fd_ofiles[fd];
664 if (fp == NULL || fp->fp_glob == NULL) {
665 continue;
666 }
667 if (only_settled && (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
668 continue;
669 }
670 it.fdti_fd = fd;
671 it.fdti_fp = fp;
672 return it;
673 }
674
675 it.fdti_fd = nfds;
676 it.fdti_fp = NULL;
677 return it;
678 }
679
680 struct fdt_iterator
fdt_prev(proc_t p,int fd,bool only_settled)681 fdt_prev(proc_t p, int fd, bool only_settled)
682 {
683 struct fdt_iterator it;
684 struct filedesc *fdp = &p->p_fd;
685 struct fileproc *fp;
686
687 while (--fd >= 0) {
688 fp = fdp->fd_ofiles[fd];
689 if (fp == NULL || fp->fp_glob == NULL) {
690 continue;
691 }
692 if (only_settled && (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
693 continue;
694 }
695 it.fdti_fd = fd;
696 it.fdti_fp = fp;
697 return it;
698 }
699
700 it.fdti_fd = -1;
701 it.fdti_fp = NULL;
702 return it;
703 }
704
705 void
fdt_init(proc_t p)706 fdt_init(proc_t p)
707 {
708 struct filedesc *fdp = &p->p_fd;
709
710 lck_mtx_init(&fdp->fd_kqhashlock, &proc_kqhashlock_grp, &proc_lck_attr);
711 lck_mtx_init(&fdp->fd_knhashlock, &proc_knhashlock_grp, &proc_lck_attr);
712 lck_mtx_init(&fdp->fd_lock, &proc_fdmlock_grp, &proc_lck_attr);
713 lck_rw_init(&fdp->fd_dirs_lock, &proc_dirslock_grp, &proc_lck_attr);
714 }
715
716 void
fdt_destroy(proc_t p)717 fdt_destroy(proc_t p)
718 {
719 struct filedesc *fdp = &p->p_fd;
720
721 lck_mtx_destroy(&fdp->fd_kqhashlock, &proc_kqhashlock_grp);
722 lck_mtx_destroy(&fdp->fd_knhashlock, &proc_knhashlock_grp);
723 lck_mtx_destroy(&fdp->fd_lock, &proc_fdmlock_grp);
724 lck_rw_destroy(&fdp->fd_dirs_lock, &proc_dirslock_grp);
725 }
726
727 void
fdt_exec(proc_t p,kauth_cred_t p_cred,short posix_spawn_flags,thread_t thread,bool in_exec)728 fdt_exec(proc_t p, kauth_cred_t p_cred, short posix_spawn_flags, thread_t thread, bool in_exec)
729 {
730 struct filedesc *fdp = &p->p_fd;
731 thread_t self = current_thread();
732 struct uthread *ut = get_bsdthread_info(self);
733 struct kqworkq *dealloc_kqwq = NULL;
734
735 /*
736 * If the current thread is bound as a workq/workloop
737 * servicing thread, we need to unbind it first.
738 */
739 if (ut->uu_kqr_bound && get_bsdthreadtask_info(self) == p) {
740 kqueue_threadreq_unbind(p, ut->uu_kqr_bound);
741 }
742
743 /*
744 * Deallocate the knotes for this process
745 * and mark the tables non-existent so
746 * subsequent kqueue closes go faster.
747 */
748 knotes_dealloc(p);
749 assert(fdp->fd_knlistsize == 0);
750 assert(fdp->fd_knhashmask == 0);
751
752 proc_fdlock(p);
753
754 /* Set the P_LADVLOCK flag if the flag set on old proc */
755 if (in_exec && (current_proc()->p_ladvflag & P_LADVLOCK)) {
756 os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed);
757 }
758
759 for (int i = fdp->fd_afterlast; i-- > 0;) {
760 struct fileproc *fp = fdp->fd_ofiles[i];
761 char *flagp = &fdp->fd_ofileflags[i];
762 bool inherit_file = true;
763
764 if (fp == FILEPROC_NULL) {
765 continue;
766 }
767
768 /*
769 * no file descriptor should be in flux when in exec,
770 * because we stopped all other threads
771 */
772 if (*flagp & ~UF_INHERIT) {
773 panic("file %d/%p in flux during exec of %p", i, fp, p);
774 }
775
776 if (fp->fp_flags & FP_CLOEXEC) {
777 inherit_file = false;
778 } else if ((posix_spawn_flags & POSIX_SPAWN_CLOEXEC_DEFAULT) &&
779 !(*flagp & UF_INHERIT)) {
780 /*
781 * Reverse the usual semantics of file descriptor
782 * inheritance - all of them should be closed
783 * except files marked explicitly as "inherit" and
784 * not marked close-on-exec.
785 */
786 inherit_file = false;
787 #if CONFIG_MACF
788 } else if (mac_file_check_inherit(p_cred, fp->fp_glob)) {
789 inherit_file = false;
790 #endif
791 }
792
793 *flagp = 0; /* clear UF_INHERIT */
794
795 if (!inherit_file) {
796 fp_close_and_unlock(p, p_cred, i, fp, 0);
797 proc_fdlock(p);
798 } else if (in_exec) {
799 /* Transfer F_POSIX style lock to new proc */
800 proc_fdunlock(p);
801 fg_transfer_filelocks(p, fp->fp_glob, thread);
802 proc_fdlock(p);
803 }
804 }
805
806 /* release the per-process workq kq */
807 if (fdp->fd_wqkqueue) {
808 dealloc_kqwq = fdp->fd_wqkqueue;
809 fdp->fd_wqkqueue = NULL;
810 }
811
812 proc_fdunlock(p);
813
814 /* Anything to free? */
815 if (dealloc_kqwq) {
816 kqworkq_dealloc(dealloc_kqwq);
817 }
818 }
819
820
821 int
fdt_fork(struct filedesc * newfdp,proc_t p,vnode_t uth_cdir,bool in_exec)822 fdt_fork(struct filedesc *newfdp, proc_t p, vnode_t uth_cdir, bool in_exec)
823 {
824 struct filedesc *fdp = &p->p_fd;
825 struct fileproc **ofiles;
826 char *ofileflags;
827 int n_files, afterlast, freefile;
828 vnode_t v_dir;
829 #if CONFIG_PROC_RESOURCE_LIMITS
830 int fd_nfiles_open = 0;
831 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
832 proc_fdlock(p);
833
834 newfdp->fd_flags = (fdp->fd_flags & FILEDESC_FORK_INHERITED_MASK);
835 newfdp->fd_cmask = fdp->fd_cmask;
836 #if CONFIG_PROC_RESOURCE_LIMITS
837 newfdp->fd_nfiles_soft_limit = fdp->fd_nfiles_soft_limit;
838 newfdp->fd_nfiles_hard_limit = fdp->fd_nfiles_hard_limit;
839
840 newfdp->kqwl_dyn_soft_limit = fdp->kqwl_dyn_soft_limit;
841 newfdp->kqwl_dyn_hard_limit = fdp->kqwl_dyn_hard_limit;
842 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
843
844 /*
845 * For both fd_cdir and fd_rdir make sure we get
846 * a valid reference... if we can't, than set
847 * set the pointer(s) to NULL in the child... this
848 * will keep us from using a non-referenced vp
849 * and allows us to do the vnode_rele only on
850 * a properly referenced vp
851 */
852 if ((v_dir = fdp->fd_rdir)) {
853 if (vnode_getwithref(v_dir) == 0) {
854 if (vnode_ref(v_dir) == 0) {
855 newfdp->fd_rdir = v_dir;
856 }
857 vnode_put(v_dir);
858 }
859 if (newfdp->fd_rdir == NULL) {
860 /*
861 * We couldn't get a new reference on
862 * the chroot directory being
863 * inherited... this is fatal, since
864 * otherwise it would constitute an
865 * escape from a chroot environment by
866 * the new process.
867 */
868 proc_fdunlock(p);
869 return EPERM;
870 }
871 }
872
873 /*
874 * If we are running with per-thread current working directories,
875 * inherit the new current working directory from the current thread.
876 */
877 if ((v_dir = uth_cdir ? uth_cdir : fdp->fd_cdir)) {
878 if (vnode_getwithref(v_dir) == 0) {
879 if (vnode_ref(v_dir) == 0) {
880 newfdp->fd_cdir = v_dir;
881 }
882 vnode_put(v_dir);
883 }
884 if (newfdp->fd_cdir == NULL && v_dir == fdp->fd_cdir) {
885 /*
886 * we couldn't get a new reference on
887 * the current working directory being
888 * inherited... we might as well drop
889 * our reference from the parent also
890 * since the vnode has gone DEAD making
891 * it useless... by dropping it we'll
892 * be that much closer to recycling it
893 */
894 vnode_rele(fdp->fd_cdir);
895 fdp->fd_cdir = NULL;
896 }
897 }
898
899 /*
900 * If the number of open files fits in the internal arrays
901 * of the open file structure, use them, otherwise allocate
902 * additional memory for the number of descriptors currently
903 * in use.
904 */
905 afterlast = fdp->fd_afterlast;
906 freefile = fdp->fd_freefile;
907 if (afterlast <= NDFILE) {
908 n_files = NDFILE;
909 } else {
910 n_files = roundup(afterlast, NDEXTENT);
911 }
912
913 proc_fdunlock(p);
914
915 ofiles = kalloc_type(struct fileproc *, n_files, Z_WAITOK | Z_ZERO);
916 ofileflags = kalloc_data(n_files, Z_WAITOK | Z_ZERO);
917 if (ofiles == NULL || ofileflags == NULL) {
918 kfree_type(struct fileproc *, n_files, ofiles);
919 kfree_data(ofileflags, n_files);
920 if (newfdp->fd_cdir) {
921 vnode_rele(newfdp->fd_cdir);
922 newfdp->fd_cdir = NULL;
923 }
924 if (newfdp->fd_rdir) {
925 vnode_rele(newfdp->fd_rdir);
926 newfdp->fd_rdir = NULL;
927 }
928 return ENOMEM;
929 }
930
931 proc_fdlock(p);
932
933 for (int i = afterlast; i-- > 0;) {
934 struct fileproc *ofp, *nfp;
935 char flags;
936
937 ofp = fdp->fd_ofiles[i];
938 flags = fdp->fd_ofileflags[i];
939
940 if (ofp == NULL ||
941 (ofp->fp_glob->fg_lflags & FG_CONFINED) ||
942 ((ofp->fp_flags & FP_CLOFORK) && !in_exec) ||
943 ((ofp->fp_flags & FP_CLOEXEC) && in_exec) ||
944 (flags & UF_RESERVED)) {
945 if (i + 1 == afterlast) {
946 afterlast = i;
947 }
948 if (i < freefile) {
949 freefile = i;
950 }
951
952 continue;
953 }
954
955 nfp = fileproc_alloc_init();
956 nfp->fp_glob = ofp->fp_glob;
957 if (in_exec) {
958 nfp->fp_flags = (ofp->fp_flags & (FP_CLOEXEC | FP_CLOFORK));
959 if (ofp->fp_guard_attrs) {
960 guarded_fileproc_copy_guard(ofp, nfp);
961 }
962 } else {
963 assert(ofp->fp_guard_attrs == 0);
964 nfp->fp_flags = (ofp->fp_flags & FP_CLOEXEC);
965 }
966 fg_ref(p, nfp->fp_glob);
967
968 ofiles[i] = nfp;
969 #if CONFIG_PROC_RESOURCE_LIMITS
970 fd_nfiles_open++;
971 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
972 }
973
974 proc_fdunlock(p);
975
976 newfdp->fd_ofiles = ofiles;
977 newfdp->fd_ofileflags = ofileflags;
978 newfdp->fd_nfiles = n_files;
979 newfdp->fd_afterlast = afterlast;
980 newfdp->fd_freefile = freefile;
981
982 #if CONFIG_PROC_RESOURCE_LIMITS
983 newfdp->fd_nfiles_open = fd_nfiles_open;
984 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
985
986 return 0;
987 }
988
989 void
fdt_invalidate(proc_t p)990 fdt_invalidate(proc_t p)
991 {
992 struct filedesc *fdp = &p->p_fd;
993 struct fileproc *fp, **ofiles;
994 kauth_cred_t p_cred;
995 char *ofileflags;
996 struct kqworkq *kqwq = NULL;
997 vnode_t vn1 = NULL, vn2 = NULL;
998 struct kqwllist *kqhash = NULL;
999 u_long kqhashmask = 0;
1000 int n_files = 0;
1001
1002 /*
1003 * deallocate all the knotes up front and claim empty
1004 * tables to make any subsequent kqueue closes faster.
1005 */
1006 knotes_dealloc(p);
1007 assert(fdp->fd_knlistsize == 0);
1008 assert(fdp->fd_knhashmask == 0);
1009
1010 /*
1011 * dealloc all workloops that have outstanding retains
1012 * when created with scheduling parameters.
1013 */
1014 kqworkloops_dealloc(p);
1015
1016 proc_fdlock(p);
1017
1018 /* proc_ucred_unsafe() is ok: process is terminating */
1019 p_cred = proc_ucred_unsafe(p);
1020
1021 /* close file descriptors */
1022 if (fdp->fd_nfiles > 0 && fdp->fd_ofiles) {
1023 for (int i = fdp->fd_afterlast; i-- > 0;) {
1024 if ((fp = fdp->fd_ofiles[i]) != NULL) {
1025 if (fdp->fd_ofileflags[i] & UF_RESERVED) {
1026 panic("fdfree: found fp with UF_RESERVED");
1027 }
1028 /* proc_ucred_unsafe() is ok: process is terminating */
1029 fp_close_and_unlock(p, p_cred, i, fp, 0);
1030 proc_fdlock(p);
1031 }
1032 }
1033 }
1034
1035 n_files = fdp->fd_nfiles;
1036 ofileflags = fdp->fd_ofileflags;
1037 ofiles = fdp->fd_ofiles;
1038 kqwq = fdp->fd_wqkqueue;
1039 vn1 = fdp->fd_cdir;
1040 vn2 = fdp->fd_rdir;
1041
1042 fdp->fd_ofileflags = NULL;
1043 fdp->fd_ofiles = NULL;
1044 fdp->fd_nfiles = 0;
1045 fdp->fd_wqkqueue = NULL;
1046 fdp->fd_cdir = NULL;
1047 fdp->fd_rdir = NULL;
1048
1049 proc_fdunlock(p);
1050
1051 lck_mtx_lock(&fdp->fd_kqhashlock);
1052
1053 kqhash = fdp->fd_kqhash;
1054 kqhashmask = fdp->fd_kqhashmask;
1055
1056 fdp->fd_kqhash = 0;
1057 fdp->fd_kqhashmask = 0;
1058
1059 lck_mtx_unlock(&fdp->fd_kqhashlock);
1060
1061 kfree_type(struct fileproc *, n_files, ofiles);
1062 kfree_data(ofileflags, n_files);
1063
1064 if (kqwq) {
1065 kqworkq_dealloc(kqwq);
1066 }
1067 if (vn1) {
1068 vnode_rele(vn1);
1069 }
1070 if (vn2) {
1071 vnode_rele(vn2);
1072 }
1073 if (kqhash) {
1074 for (uint32_t i = 0; i <= kqhashmask; i++) {
1075 assert(LIST_EMPTY(&kqhash[i]));
1076 }
1077 hashdestroy(kqhash, M_KQUEUE, kqhashmask);
1078 }
1079 }
1080
1081
1082 struct fileproc *
fileproc_alloc_init(void)1083 fileproc_alloc_init(void)
1084 {
1085 struct fileproc *fp;
1086
1087 fp = zalloc_id(ZONE_ID_FILEPROC, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1088 os_ref_init(&fp->fp_iocount, &f_refgrp);
1089 return fp;
1090 }
1091
1092
1093 void
fileproc_free(struct fileproc * fp)1094 fileproc_free(struct fileproc *fp)
1095 {
1096 os_ref_count_t __unused refc = os_ref_release(&fp->fp_iocount);
1097 #if DEVELOPMENT || DEBUG
1098 if (0 != refc) {
1099 panic("%s: pid %d refc: %u != 0",
1100 __func__, proc_pid(current_proc()), refc);
1101 }
1102 #endif
1103 if (fp->fp_guard_attrs) {
1104 guarded_fileproc_unguard(fp);
1105 }
1106 assert(fp->fp_wset == NULL);
1107 zfree_id(ZONE_ID_FILEPROC, fp);
1108 }
1109
1110
1111 /*
1112 * Statistics counter for the number of times a process calling fdalloc()
1113 * has resulted in an expansion of the per process open file table.
1114 *
1115 * XXX This would likely be of more use if it were per process
1116 */
1117 int fdexpand;
1118
1119 #if CONFIG_PROC_RESOURCE_LIMITS
1120 /*
1121 * Should be called only with the proc_fdlock held.
1122 */
1123 void
fd_check_limit_exceeded(struct filedesc * fdp)1124 fd_check_limit_exceeded(struct filedesc *fdp)
1125 {
1126 #if DIAGNOSTIC
1127 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
1128 #endif
1129
1130 if (!fd_above_soft_limit_notified(fdp) && fdp->fd_nfiles_soft_limit &&
1131 (fdp->fd_nfiles_open > fdp->fd_nfiles_soft_limit)) {
1132 fd_above_soft_limit_send_notification(fdp);
1133 act_set_astproc_resource(current_thread());
1134 } else if (!fd_above_hard_limit_notified(fdp) && fdp->fd_nfiles_hard_limit &&
1135 (fdp->fd_nfiles_open > fdp->fd_nfiles_hard_limit)) {
1136 fd_above_hard_limit_send_notification(fdp);
1137 act_set_astproc_resource(current_thread());
1138 }
1139 }
1140 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1141
1142 /*
1143 * fdalloc
1144 *
1145 * Description: Allocate a file descriptor for the process.
1146 *
1147 * Parameters: p Process to allocate the fd in
1148 * want The fd we would prefer to get
1149 * result Pointer to fd we got
1150 *
1151 * Returns: 0 Success
1152 * EMFILE
1153 * ENOMEM
1154 *
1155 * Implicit returns:
1156 * *result (modified) The fd which was allocated
1157 */
1158 int
fdalloc(proc_t p,int want,int * result)1159 fdalloc(proc_t p, int want, int *result)
1160 {
1161 struct filedesc *fdp = &p->p_fd;
1162 int i;
1163 int last, numfiles, oldnfiles;
1164 struct fileproc **newofiles;
1165 char *newofileflags;
1166 int lim = proc_limitgetcur_nofile(p);
1167
1168 /*
1169 * Search for a free descriptor starting at the higher
1170 * of want or fd_freefile. If that fails, consider
1171 * expanding the ofile array.
1172 */
1173 #if DIAGNOSTIC
1174 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
1175 #endif
1176
1177 for (;;) {
1178 last = (int)MIN((unsigned int)fdp->fd_nfiles, (unsigned int)lim);
1179 if ((i = want) < fdp->fd_freefile) {
1180 i = fdp->fd_freefile;
1181 }
1182 for (; i < last; i++) {
1183 if (fdp->fd_ofiles[i] == NULL && !(fdp->fd_ofileflags[i] & UF_RESERVED)) {
1184 procfdtbl_reservefd(p, i);
1185 if (i >= fdp->fd_afterlast) {
1186 fdp->fd_afterlast = i + 1;
1187 }
1188 if (want <= fdp->fd_freefile) {
1189 fdp->fd_freefile = i;
1190 }
1191 *result = i;
1192 #if CONFIG_PROC_RESOURCE_LIMITS
1193 fdp->fd_nfiles_open++;
1194 fd_check_limit_exceeded(fdp);
1195 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1196 return 0;
1197 }
1198 }
1199
1200 /*
1201 * No space in current array. Expand?
1202 */
1203 if ((rlim_t)fdp->fd_nfiles >= lim) {
1204 return EMFILE;
1205 }
1206 if (fdp->fd_nfiles < NDEXTENT) {
1207 numfiles = NDEXTENT;
1208 } else {
1209 numfiles = 2 * fdp->fd_nfiles;
1210 }
1211 /* Enforce lim */
1212 if ((rlim_t)numfiles > lim) {
1213 numfiles = (int)lim;
1214 }
1215 proc_fdunlock(p);
1216 newofiles = kalloc_type(struct fileproc *, numfiles, Z_WAITOK | Z_ZERO);
1217 newofileflags = kalloc_data(numfiles, Z_WAITOK | Z_ZERO);
1218 proc_fdlock(p);
1219 if (newofileflags == NULL || newofiles == NULL) {
1220 kfree_type(struct fileproc *, numfiles, newofiles);
1221 kfree_data(newofileflags, numfiles);
1222 return ENOMEM;
1223 }
1224 if (fdp->fd_nfiles >= numfiles) {
1225 kfree_type(struct fileproc *, numfiles, newofiles);
1226 kfree_data(newofileflags, numfiles);
1227 continue;
1228 }
1229
1230 /*
1231 * Copy the existing ofile and ofileflags arrays
1232 * and zero the new portion of each array.
1233 */
1234 oldnfiles = fdp->fd_nfiles;
1235 memcpy(newofiles, fdp->fd_ofiles,
1236 oldnfiles * sizeof(*fdp->fd_ofiles));
1237 memcpy(newofileflags, fdp->fd_ofileflags, oldnfiles);
1238
1239 kfree_type(struct fileproc *, oldnfiles, fdp->fd_ofiles);
1240 kfree_data(fdp->fd_ofileflags, oldnfiles);
1241 fdp->fd_ofiles = newofiles;
1242 fdp->fd_ofileflags = newofileflags;
1243 fdp->fd_nfiles = numfiles;
1244 fdexpand++;
1245 }
1246 }
1247
1248
1249 #pragma mark fileprocs
1250
1251 void
fileproc_modify_vflags(struct fileproc * fp,fileproc_vflags_t vflags,boolean_t clearflags)1252 fileproc_modify_vflags(struct fileproc *fp, fileproc_vflags_t vflags, boolean_t clearflags)
1253 {
1254 if (clearflags) {
1255 os_atomic_andnot(&fp->fp_vflags, vflags, relaxed);
1256 } else {
1257 os_atomic_or(&fp->fp_vflags, vflags, relaxed);
1258 }
1259 }
1260
1261 fileproc_vflags_t
fileproc_get_vflags(struct fileproc * fp)1262 fileproc_get_vflags(struct fileproc *fp)
1263 {
1264 return os_atomic_load(&fp->fp_vflags, relaxed);
1265 }
1266
1267 /*
1268 * falloc_withinit
1269 *
1270 * Create a new open file structure and allocate
1271 * a file descriptor for the process that refers to it.
1272 *
1273 * Returns: 0 Success
1274 *
1275 * Description: Allocate an entry in the per process open file table and
1276 * return the corresponding fileproc and fd.
1277 *
1278 * Parameters: p The process in whose open file
1279 * table the fd is to be allocated
1280 * resultfp Pointer to fileproc pointer
1281 * return area
1282 * resultfd Pointer to fd return area
1283 * ctx VFS context
1284 * fp_zalloc fileproc allocator to use
1285 * crarg allocator args
1286 *
1287 * Returns: 0 Success
1288 * ENFILE Too many open files in system
1289 * fdalloc:EMFILE Too many open files in process
1290 * fdalloc:ENOMEM M_OFILETABL zone exhausted
1291 * ENOMEM fp_zone or fg_zone zone
1292 * exhausted
1293 *
1294 * Implicit returns:
1295 * *resultfd (modified) Returned fileproc pointer
1296 * *resultfd (modified) Returned fd
1297 *
1298 * Notes: This function takes separate process and context arguments
1299 * solely to support kern_exec.c; otherwise, it would take
1300 * neither, and use the vfs_context_current() routine internally.
1301 */
1302 int
falloc_withinit(proc_t p,struct ucred * p_cred,struct vfs_context * ctx,struct fileproc ** resultfp,int * resultfd,fp_initfn_t fp_init,void * initarg)1303 falloc_withinit(
1304 proc_t p,
1305 struct ucred *p_cred,
1306 struct vfs_context *ctx,
1307 struct fileproc **resultfp,
1308 int *resultfd,
1309 fp_initfn_t fp_init,
1310 void *initarg)
1311 {
1312 struct fileproc *fp;
1313 struct fileglob *fg;
1314 int error, nfd;
1315
1316 /* Make sure we don't go beyond the system-wide limit */
1317 if (nfiles >= maxfiles) {
1318 tablefull("file");
1319 return ENFILE;
1320 }
1321
1322 proc_fdlock(p);
1323
1324 /* fdalloc will make sure the process stays below per-process limit */
1325 if ((error = fdalloc(p, 0, &nfd))) {
1326 proc_fdunlock(p);
1327 return error;
1328 }
1329
1330 #if CONFIG_MACF
1331 error = mac_file_check_create(p_cred);
1332 if (error) {
1333 proc_fdunlock(p);
1334 return error;
1335 }
1336 #else
1337 (void)p_cred;
1338 #endif
1339
1340 /*
1341 * Allocate a new file descriptor.
1342 * If the process has file descriptor zero open, add to the list
1343 * of open files at that point, otherwise put it at the front of
1344 * the list of open files.
1345 */
1346 proc_fdunlock(p);
1347
1348 fp = fileproc_alloc_init();
1349 if (fp_init) {
1350 fp_init(fp, initarg);
1351 }
1352
1353 fg = zalloc_flags(fg_zone, Z_WAITOK | Z_ZERO);
1354 lck_mtx_init(&fg->fg_lock, &file_lck_grp, LCK_ATTR_NULL);
1355
1356 os_ref_retain_locked(&fp->fp_iocount);
1357 os_ref_init_raw(&fg->fg_count, &f_refgrp);
1358 fg->fg_ops = &uninitops;
1359 fp->fp_glob = fg;
1360
1361 kauth_cred_ref(ctx->vc_ucred);
1362
1363 fp->f_cred = ctx->vc_ucred;
1364
1365 os_atomic_inc(&nfiles, relaxed);
1366
1367 proc_fdlock(p);
1368
1369 p->p_fd.fd_ofiles[nfd] = fp;
1370
1371 proc_fdunlock(p);
1372
1373 if (resultfp) {
1374 *resultfp = fp;
1375 }
1376 if (resultfd) {
1377 *resultfd = nfd;
1378 }
1379
1380 return 0;
1381 }
1382
1383 /*
1384 * fp_free
1385 *
1386 * Description: Release the fd and free the fileproc associated with the fd
1387 * in the per process open file table of the specified process;
1388 * these values must correspond.
1389 *
1390 * Parameters: p Process containing fd
1391 * fd fd to be released
1392 * fp fileproc to be freed
1393 */
1394 void
fp_free(proc_t p,int fd,struct fileproc * fp)1395 fp_free(proc_t p, int fd, struct fileproc * fp)
1396 {
1397 proc_fdlock_spin(p);
1398 fdrelse(p, fd);
1399 proc_fdunlock(p);
1400
1401 fg_free(fp->fp_glob);
1402 os_ref_release_live(&fp->fp_iocount);
1403 fileproc_free(fp);
1404 }
1405
1406
1407 struct fileproc *
fp_get_noref_locked(proc_t p,int fd)1408 fp_get_noref_locked(proc_t p, int fd)
1409 {
1410 struct filedesc *fdp = &p->p_fd;
1411 struct fileproc *fp;
1412
1413 if (fd < 0 || fd >= fdp->fd_nfiles ||
1414 (fp = fdp->fd_ofiles[fd]) == NULL ||
1415 (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1416 return NULL;
1417 }
1418
1419 zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1420 return fp;
1421 }
1422
1423 struct fileproc *
fp_get_noref_locked_with_iocount(proc_t p,int fd)1424 fp_get_noref_locked_with_iocount(proc_t p, int fd)
1425 {
1426 struct filedesc *fdp = &p->p_fd;
1427 struct fileproc *fp = NULL;
1428
1429 if (fd < 0 || fd >= fdp->fd_nfiles ||
1430 (fp = fdp->fd_ofiles[fd]) == NULL ||
1431 os_ref_get_count(&fp->fp_iocount) <= 1 ||
1432 ((fdp->fd_ofileflags[fd] & UF_RESERVED) &&
1433 !(fdp->fd_ofileflags[fd] & UF_CLOSING))) {
1434 panic("%s: caller without an ioccount on fileproc (%d/:%p)",
1435 __func__, fd, fp);
1436 }
1437
1438 zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1439 return fp;
1440 }
1441
1442
1443 /*
1444 * fp_lookup
1445 *
1446 * Description: Get fileproc pointer for a given fd from the per process
1447 * open file table of the specified process and if successful,
1448 * increment the fp_iocount
1449 *
1450 * Parameters: p Process in which fd lives
1451 * fd fd to get information for
1452 * resultfp Pointer to result fileproc
1453 * pointer area, or 0 if none
1454 * locked !0 if the caller holds the
1455 * proc_fdlock, 0 otherwise
1456 *
1457 * Returns: 0 Success
1458 * EBADF Bad file descriptor
1459 *
1460 * Implicit returns:
1461 * *resultfp (modified) Fileproc pointer
1462 *
1463 * Locks: If the argument 'locked' is non-zero, then the caller is
1464 * expected to have taken and held the proc_fdlock; if it is
1465 * zero, than this routine internally takes and drops this lock.
1466 */
1467 int
fp_lookup(proc_t p,int fd,struct fileproc ** resultfp,int locked)1468 fp_lookup(proc_t p, int fd, struct fileproc **resultfp, int locked)
1469 {
1470 struct filedesc *fdp = &p->p_fd;
1471 struct fileproc *fp;
1472
1473 if (!locked) {
1474 proc_fdlock_spin(p);
1475 }
1476 if (fd < 0 || fdp == NULL || fd >= fdp->fd_nfiles ||
1477 (fp = fdp->fd_ofiles[fd]) == NULL ||
1478 (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1479 if (!locked) {
1480 proc_fdunlock(p);
1481 }
1482 return EBADF;
1483 }
1484
1485 zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1486 os_ref_retain_locked(&fp->fp_iocount);
1487
1488 if (resultfp) {
1489 *resultfp = fp;
1490 }
1491 if (!locked) {
1492 proc_fdunlock(p);
1493 }
1494
1495 return 0;
1496 }
1497
1498
1499 int
fp_get_ftype(proc_t p,int fd,file_type_t ftype,int err,struct fileproc ** fpp)1500 fp_get_ftype(proc_t p, int fd, file_type_t ftype, int err, struct fileproc **fpp)
1501 {
1502 struct filedesc *fdp = &p->p_fd;
1503 struct fileproc *fp;
1504
1505 proc_fdlock_spin(p);
1506 if (fd < 0 || fd >= fdp->fd_nfiles ||
1507 (fp = fdp->fd_ofiles[fd]) == NULL ||
1508 (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1509 proc_fdunlock(p);
1510 return EBADF;
1511 }
1512
1513 if (fp->f_type != ftype) {
1514 proc_fdunlock(p);
1515 return err;
1516 }
1517
1518 zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1519 os_ref_retain_locked(&fp->fp_iocount);
1520 proc_fdunlock(p);
1521
1522 *fpp = fp;
1523 return 0;
1524 }
1525
1526
1527 /*
1528 * fp_drop
1529 *
1530 * Description: Drop the I/O reference previously taken by calling fp_lookup
1531 * et. al.
1532 *
1533 * Parameters: p Process in which the fd lives
1534 * fd fd associated with the fileproc
1535 * fp fileproc on which to set the
1536 * flag and drop the reference
1537 * locked flag to internally take and
1538 * drop proc_fdlock if it is not
1539 * already held by the caller
1540 *
1541 * Returns: 0 Success
1542 * EBADF Bad file descriptor
1543 *
1544 * Locks: This function internally takes and drops the proc_fdlock for
1545 * the supplied process if 'locked' is non-zero, and assumes that
1546 * the caller already holds this lock if 'locked' is non-zero.
1547 *
1548 * Notes: The fileproc must correspond to the fd in the supplied proc
1549 */
1550 int
fp_drop(proc_t p,int fd,struct fileproc * fp,int locked)1551 fp_drop(proc_t p, int fd, struct fileproc *fp, int locked)
1552 {
1553 struct filedesc *fdp = &p->p_fd;
1554 int needwakeup = 0;
1555
1556 if (!locked) {
1557 proc_fdlock_spin(p);
1558 }
1559 if ((fp == FILEPROC_NULL) && (fd < 0 || fd >= fdp->fd_nfiles ||
1560 (fp = fdp->fd_ofiles[fd]) == NULL ||
1561 ((fdp->fd_ofileflags[fd] & UF_RESERVED) &&
1562 !(fdp->fd_ofileflags[fd] & UF_CLOSING)))) {
1563 if (!locked) {
1564 proc_fdunlock(p);
1565 }
1566 return EBADF;
1567 }
1568
1569 if (1 == os_ref_release_locked(&fp->fp_iocount)) {
1570 if (fp->fp_flags & FP_SELCONFLICT) {
1571 fp->fp_flags &= ~FP_SELCONFLICT;
1572 }
1573
1574 if (fdp->fd_fpdrainwait) {
1575 fdp->fd_fpdrainwait = 0;
1576 needwakeup = 1;
1577 }
1578 }
1579 if (!locked) {
1580 proc_fdunlock(p);
1581 }
1582 if (needwakeup) {
1583 wakeup(&fdp->fd_fpdrainwait);
1584 }
1585
1586 return 0;
1587 }
1588
1589
1590 /*
1591 * fileproc_drain
1592 *
1593 * Description: Drain out pending I/O operations
1594 *
1595 * Parameters: p Process closing this file
1596 * fp fileproc struct for the open
1597 * instance on the file
1598 *
1599 * Returns: void
1600 *
1601 * Locks: Assumes the caller holds the proc_fdlock
1602 *
1603 * Notes: For character devices, this occurs on the last close of the
1604 * device; for all other file descriptors, this occurs on each
1605 * close to prevent fd's from being closed out from under
1606 * operations currently in progress and blocked
1607 *
1608 * See Also: file_vnode(), file_socket(), file_drop(), and the cautions
1609 * regarding their use and interaction with this function.
1610 */
1611 static void
fileproc_drain(proc_t p,struct fileproc * fp)1612 fileproc_drain(proc_t p, struct fileproc * fp)
1613 {
1614 struct filedesc *fdp = &p->p_fd;
1615 struct vfs_context context;
1616 thread_t thread;
1617 bool is_current_proc;
1618
1619 is_current_proc = (p == current_proc());
1620
1621 if (!is_current_proc) {
1622 proc_lock(p);
1623 thread = proc_thread(p); /* XXX */
1624 thread_reference(thread);
1625 proc_unlock(p);
1626 } else {
1627 thread = current_thread();
1628 }
1629
1630 context.vc_thread = thread;
1631 context.vc_ucred = fp->fp_glob->fg_cred;
1632
1633 /* Set the vflag for drain */
1634 fileproc_modify_vflags(fp, FPV_DRAIN, FALSE);
1635
1636 while (os_ref_get_count(&fp->fp_iocount) > 1) {
1637 lck_mtx_convert_spin(&fdp->fd_lock);
1638
1639 fo_drain(fp, &context);
1640 if ((fp->fp_flags & FP_INSELECT) == FP_INSELECT) {
1641 struct select_set *selset;
1642
1643 if (fp->fp_guard_attrs) {
1644 selset = fp->fp_guard->fpg_wset;
1645 } else {
1646 selset = fp->fp_wset;
1647 }
1648 if (waitq_wakeup64_all(selset, NO_EVENT64,
1649 THREAD_INTERRUPTED, WAITQ_WAKEUP_DEFAULT) == KERN_INVALID_ARGUMENT) {
1650 panic("bad wait queue for waitq_wakeup64_all %p (%sfp:%p)",
1651 selset, fp->fp_guard_attrs ? "guarded " : "", fp);
1652 }
1653 }
1654 if ((fp->fp_flags & FP_SELCONFLICT) == FP_SELCONFLICT) {
1655 if (waitq_wakeup64_all(&select_conflict_queue, NO_EVENT64,
1656 THREAD_INTERRUPTED, WAITQ_WAKEUP_DEFAULT) == KERN_INVALID_ARGUMENT) {
1657 panic("bad select_conflict_queue");
1658 }
1659 }
1660 fdp->fd_fpdrainwait = 1;
1661 msleep(&fdp->fd_fpdrainwait, &fdp->fd_lock, PRIBIO, "fpdrain", NULL);
1662 }
1663 #if DIAGNOSTIC
1664 if ((fp->fp_flags & FP_INSELECT) != 0) {
1665 panic("FP_INSELECT set on drained fp");
1666 }
1667 #endif
1668 if ((fp->fp_flags & FP_SELCONFLICT) == FP_SELCONFLICT) {
1669 fp->fp_flags &= ~FP_SELCONFLICT;
1670 }
1671
1672 if (!is_current_proc) {
1673 thread_deallocate(thread);
1674 }
1675 }
1676
1677
1678 int
fp_close_and_unlock(proc_t p,kauth_cred_t cred,int fd,struct fileproc * fp,int flags)1679 fp_close_and_unlock(proc_t p, kauth_cred_t cred, int fd, struct fileproc *fp, int flags)
1680 {
1681 struct filedesc *fdp = &p->p_fd;
1682 struct fileglob *fg = fp->fp_glob;
1683
1684 #if DIAGNOSTIC
1685 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
1686 #endif
1687
1688 /*
1689 * Keep most people from finding the filedesc while we are closing it.
1690 *
1691 * Callers are:
1692 *
1693 * - dup2() which always waits for UF_RESERVED to clear
1694 *
1695 * - close/guarded_close/... who will fail the fileproc lookup if
1696 * UF_RESERVED is set,
1697 *
1698 * - fdexec()/fdfree() who only run once all threads in the proc
1699 * are properly canceled, hence no fileproc in this proc should
1700 * be in flux.
1701 *
1702 * Which means that neither UF_RESERVED nor UF_CLOSING should be set.
1703 *
1704 * Callers of fp_get_noref_locked_with_iocount() can still find
1705 * this entry so that they can drop their I/O reference despite
1706 * not having remembered the fileproc pointer (namely select() and
1707 * file_drop()).
1708 */
1709 if (p->p_fd.fd_ofileflags[fd] & (UF_RESERVED | UF_CLOSING)) {
1710 panic("%s: called with fileproc in flux (%d/:%p)",
1711 __func__, fd, fp);
1712 }
1713 p->p_fd.fd_ofileflags[fd] |= (UF_RESERVED | UF_CLOSING);
1714
1715 if ((fp->fp_flags & FP_AIOISSUED) ||
1716 #if CONFIG_MACF
1717 (FILEGLOB_DTYPE(fg) == DTYPE_VNODE)
1718 #else
1719 kauth_authorize_fileop_has_listeners()
1720 #endif
1721 ) {
1722 proc_fdunlock(p);
1723
1724 if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE) {
1725 /*
1726 * call out to allow 3rd party notification of close.
1727 * Ignore result of kauth_authorize_fileop call.
1728 */
1729 #if CONFIG_MACF
1730 mac_file_notify_close(cred, fp->fp_glob);
1731 #else
1732 (void)cred;
1733 #endif
1734
1735 if (kauth_authorize_fileop_has_listeners() &&
1736 vnode_getwithref((vnode_t)fg_get_data(fg)) == 0) {
1737 u_int fileop_flags = 0;
1738 if (fg->fg_flag & FWASWRITTEN) {
1739 fileop_flags |= KAUTH_FILEOP_CLOSE_MODIFIED;
1740 }
1741 kauth_authorize_fileop(fg->fg_cred, KAUTH_FILEOP_CLOSE,
1742 (uintptr_t)fg_get_data(fg), (uintptr_t)fileop_flags);
1743
1744 vnode_put((vnode_t)fg_get_data(fg));
1745 }
1746 }
1747
1748 if (fp->fp_flags & FP_AIOISSUED) {
1749 /*
1750 * cancel all async IO requests that can be cancelled.
1751 */
1752 _aio_close( p, fd );
1753 }
1754
1755 proc_fdlock(p);
1756 }
1757
1758 if (fd < fdp->fd_knlistsize) {
1759 knote_fdclose(p, fd);
1760 }
1761
1762 fileproc_drain(p, fp);
1763
1764 if (flags & FD_DUP2RESV) {
1765 fdp->fd_ofiles[fd] = NULL;
1766 fdp->fd_ofileflags[fd] &= ~UF_CLOSING;
1767 } else {
1768 fdrelse(p, fd);
1769 }
1770
1771 proc_fdunlock(p);
1772
1773 if (ENTR_SHOULDTRACE && FILEGLOB_DTYPE(fg) == DTYPE_SOCKET) {
1774 KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_END,
1775 fd, 0, (int64_t)VM_KERNEL_ADDRPERM(fg_get_data(fg)));
1776 }
1777
1778 fileproc_free(fp);
1779
1780 return fg_drop(p, fg);
1781 }
1782
1783 /*
1784 * dupfdopen
1785 *
1786 * Description: Duplicate the specified descriptor to a free descriptor;
1787 * this is the second half of fdopen(), above.
1788 *
1789 * Parameters: p current process pointer
1790 * indx fd to dup to
1791 * dfd fd to dup from
1792 * mode mode to set on new fd
1793 * error command code
1794 *
1795 * Returns: 0 Success
1796 * EBADF Source fd is bad
1797 * EACCES Requested mode not allowed
1798 * !0 'error', if not ENODEV or
1799 * ENXIO
1800 *
1801 * Notes: XXX This is not thread safe; see fdopen() above
1802 */
1803 int
dupfdopen(proc_t p,int indx,int dfd,int flags,int error)1804 dupfdopen(proc_t p, int indx, int dfd, int flags, int error)
1805 {
1806 struct filedesc *fdp = &p->p_fd;
1807 struct fileproc *wfp;
1808 struct fileproc *fp;
1809 #if CONFIG_MACF
1810 int myerror;
1811 #endif
1812
1813 /*
1814 * If the to-be-dup'd fd number is greater than the allowed number
1815 * of file descriptors, or the fd to be dup'd has already been
1816 * closed, reject. Note, check for new == old is necessary as
1817 * falloc could allocate an already closed to-be-dup'd descriptor
1818 * as the new descriptor.
1819 */
1820 proc_fdlock(p);
1821
1822 fp = fdp->fd_ofiles[indx];
1823 if (dfd < 0 || dfd >= fdp->fd_nfiles ||
1824 (wfp = fdp->fd_ofiles[dfd]) == NULL || wfp == fp ||
1825 (fdp->fd_ofileflags[dfd] & UF_RESERVED)) {
1826 proc_fdunlock(p);
1827 return EBADF;
1828 }
1829 #if CONFIG_MACF
1830 myerror = mac_file_check_dup(kauth_cred_get(), wfp->fp_glob, dfd);
1831 if (myerror) {
1832 proc_fdunlock(p);
1833 return myerror;
1834 }
1835 #endif
1836 /*
1837 * There are two cases of interest here.
1838 *
1839 * For ENODEV simply dup (dfd) to file descriptor
1840 * (indx) and return.
1841 *
1842 * For ENXIO steal away the file structure from (dfd) and
1843 * store it in (indx). (dfd) is effectively closed by
1844 * this operation.
1845 *
1846 * Any other error code is just returned.
1847 */
1848 switch (error) {
1849 case ENODEV:
1850 if (fp_isguarded(wfp, GUARD_DUP)) {
1851 proc_fdunlock(p);
1852 return EPERM;
1853 }
1854
1855 /*
1856 * Check that the mode the file is being opened for is a
1857 * subset of the mode of the existing descriptor.
1858 */
1859 if (((flags & (FREAD | FWRITE)) | wfp->f_flag) != wfp->f_flag) {
1860 proc_fdunlock(p);
1861 return EACCES;
1862 }
1863 if (indx >= fdp->fd_afterlast) {
1864 fdp->fd_afterlast = indx + 1;
1865 }
1866
1867 if (fp->fp_glob) {
1868 fg_free(fp->fp_glob);
1869 }
1870 fg_ref(p, wfp->fp_glob);
1871 fp->fp_glob = wfp->fp_glob;
1872 /*
1873 * Historically, open(/dev/fd/<n>) preserves close on fork/exec,
1874 * unlike dup(), dup2() or fcntl(F_DUPFD).
1875 *
1876 * open1() already handled O_CLO{EXEC,FORK}
1877 */
1878 fp->fp_flags |= (wfp->fp_flags & (FP_CLOFORK | FP_CLOEXEC));
1879
1880 procfdtbl_releasefd(p, indx, NULL);
1881 fp_drop(p, indx, fp, 1);
1882 proc_fdunlock(p);
1883 return 0;
1884
1885 default:
1886 proc_fdunlock(p);
1887 return error;
1888 }
1889 /* NOTREACHED */
1890 }
1891
1892
1893 #pragma mark KPIS (sys/file.h)
1894
1895 /*
1896 * fg_get_vnode
1897 *
1898 * Description: Return vnode associated with the file structure, if
1899 * any. The lifetime of the returned vnode is bound to
1900 * the lifetime of the file structure.
1901 *
1902 * Parameters: fg Pointer to fileglob to
1903 * inspect
1904 *
1905 * Returns: vnode_t
1906 */
1907 vnode_t
fg_get_vnode(struct fileglob * fg)1908 fg_get_vnode(struct fileglob *fg)
1909 {
1910 if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE) {
1911 return (vnode_t)fg_get_data(fg);
1912 } else {
1913 return NULL;
1914 }
1915 }
1916
1917
1918 /*
1919 * fp_getfvp
1920 *
1921 * Description: Get fileproc and vnode pointer for a given fd from the per
1922 * process open file table of the specified process, and if
1923 * successful, increment the fp_iocount
1924 *
1925 * Parameters: p Process in which fd lives
1926 * fd fd to get information for
1927 * resultfp Pointer to result fileproc
1928 * pointer area, or 0 if none
1929 * resultvp Pointer to result vnode pointer
1930 * area, or 0 if none
1931 *
1932 * Returns: 0 Success
1933 * EBADF Bad file descriptor
1934 * ENOTSUP fd does not refer to a vnode
1935 *
1936 * Implicit returns:
1937 * *resultfp (modified) Fileproc pointer
1938 * *resultvp (modified) vnode pointer
1939 *
1940 * Notes: The resultfp and resultvp fields are optional, and may be
1941 * independently specified as NULL to skip returning information
1942 *
1943 * Locks: Internally takes and releases proc_fdlock
1944 */
1945 int
fp_getfvp(proc_t p,int fd,struct fileproc ** resultfp,struct vnode ** resultvp)1946 fp_getfvp(proc_t p, int fd, struct fileproc **resultfp, struct vnode **resultvp)
1947 {
1948 struct fileproc *fp;
1949 int error;
1950
1951 error = fp_get_ftype(p, fd, DTYPE_VNODE, ENOTSUP, &fp);
1952 if (error == 0) {
1953 if (resultfp) {
1954 *resultfp = fp;
1955 }
1956 if (resultvp) {
1957 *resultvp = (struct vnode *)fp_get_data(fp);
1958 }
1959 }
1960
1961 return error;
1962 }
1963
1964
1965 /*
1966 * fp_get_pipe_id
1967 *
1968 * Description: Get pipe id for a given fd from the per process open file table
1969 * of the specified process.
1970 *
1971 * Parameters: p Process in which fd lives
1972 * fd fd to get information for
1973 * result_pipe_id Pointer to result pipe id
1974 *
1975 * Returns: 0 Success
1976 * EIVAL NULL pointer arguments passed
1977 * fp_lookup:EBADF Bad file descriptor
1978 * ENOTSUP fd does not refer to a pipe
1979 *
1980 * Implicit returns:
1981 * *result_pipe_id (modified) pipe id
1982 *
1983 * Locks: Internally takes and releases proc_fdlock
1984 */
1985 int
fp_get_pipe_id(proc_t p,int fd,uint64_t * result_pipe_id)1986 fp_get_pipe_id(proc_t p, int fd, uint64_t *result_pipe_id)
1987 {
1988 struct fileproc *fp = FILEPROC_NULL;
1989 struct fileglob *fg = NULL;
1990 int error = 0;
1991
1992 if (p == NULL || result_pipe_id == NULL) {
1993 return EINVAL;
1994 }
1995
1996 proc_fdlock(p);
1997 if ((error = fp_lookup(p, fd, &fp, 1))) {
1998 proc_fdunlock(p);
1999 return error;
2000 }
2001 fg = fp->fp_glob;
2002
2003 if (FILEGLOB_DTYPE(fg) == DTYPE_PIPE) {
2004 *result_pipe_id = pipe_id((struct pipe*)fg_get_data(fg));
2005 } else {
2006 error = ENOTSUP;
2007 }
2008
2009 fp_drop(p, fd, fp, 1);
2010 proc_fdunlock(p);
2011 return error;
2012 }
2013
2014
2015 /*
2016 * file_vnode
2017 *
2018 * Description: Given an fd, look it up in the current process's per process
2019 * open file table, and return its internal vnode pointer.
2020 *
2021 * Parameters: fd fd to obtain vnode from
2022 * vpp pointer to vnode return area
2023 *
2024 * Returns: 0 Success
2025 * EINVAL The fd does not refer to a
2026 * vnode fileproc entry
2027 * fp_lookup:EBADF Bad file descriptor
2028 *
2029 * Implicit returns:
2030 * *vpp (modified) Returned vnode pointer
2031 *
2032 * Locks: This function internally takes and drops the proc_fdlock for
2033 * the current process
2034 *
2035 * Notes: If successful, this function increments the fp_iocount on the
2036 * fd's corresponding fileproc.
2037 *
2038 * The fileproc referenced is not returned; because of this, care
2039 * must be taken to not drop the last reference (e.g. by closing
2040 * the file). This is inherently unsafe, since the reference may
2041 * not be recoverable from the vnode, if there is a subsequent
2042 * close that destroys the associate fileproc. The caller should
2043 * therefore retain their own reference on the fileproc so that
2044 * the fp_iocount can be dropped subsequently. Failure to do this
2045 * can result in the returned pointer immediately becoming invalid
2046 * following the call.
2047 *
2048 * Use of this function is discouraged.
2049 */
2050 int
file_vnode(int fd,struct vnode ** vpp)2051 file_vnode(int fd, struct vnode **vpp)
2052 {
2053 return file_vnode_withvid(fd, vpp, NULL);
2054 }
2055
2056
2057 /*
2058 * file_vnode_withvid
2059 *
2060 * Description: Given an fd, look it up in the current process's per process
2061 * open file table, and return its internal vnode pointer.
2062 *
2063 * Parameters: fd fd to obtain vnode from
2064 * vpp pointer to vnode return area
2065 * vidp pointer to vid of the returned vnode
2066 *
2067 * Returns: 0 Success
2068 * EINVAL The fd does not refer to a
2069 * vnode fileproc entry
2070 * fp_lookup:EBADF Bad file descriptor
2071 *
2072 * Implicit returns:
2073 * *vpp (modified) Returned vnode pointer
2074 *
2075 * Locks: This function internally takes and drops the proc_fdlock for
2076 * the current process
2077 *
2078 * Notes: If successful, this function increments the fp_iocount on the
2079 * fd's corresponding fileproc.
2080 *
2081 * The fileproc referenced is not returned; because of this, care
2082 * must be taken to not drop the last reference (e.g. by closing
2083 * the file). This is inherently unsafe, since the reference may
2084 * not be recoverable from the vnode, if there is a subsequent
2085 * close that destroys the associate fileproc. The caller should
2086 * therefore retain their own reference on the fileproc so that
2087 * the fp_iocount can be dropped subsequently. Failure to do this
2088 * can result in the returned pointer immediately becoming invalid
2089 * following the call.
2090 *
2091 * Use of this function is discouraged.
2092 */
2093 int
file_vnode_withvid(int fd,struct vnode ** vpp,uint32_t * vidp)2094 file_vnode_withvid(int fd, struct vnode **vpp, uint32_t *vidp)
2095 {
2096 struct fileproc *fp;
2097 int error;
2098
2099 error = fp_get_ftype(current_proc(), fd, DTYPE_VNODE, EINVAL, &fp);
2100 if (error == 0) {
2101 if (vpp) {
2102 *vpp = (struct vnode *)fp_get_data(fp);
2103 }
2104 if (vidp) {
2105 *vidp = vnode_vid((struct vnode *)fp_get_data(fp));
2106 }
2107 }
2108 return error;
2109 }
2110
2111 /*
2112 * file_socket
2113 *
2114 * Description: Given an fd, look it up in the current process's per process
2115 * open file table, and return its internal socket pointer.
2116 *
2117 * Parameters: fd fd to obtain vnode from
2118 * sp pointer to socket return area
2119 *
2120 * Returns: 0 Success
2121 * ENOTSOCK Not a socket
2122 * fp_lookup:EBADF Bad file descriptor
2123 *
2124 * Implicit returns:
2125 * *sp (modified) Returned socket pointer
2126 *
2127 * Locks: This function internally takes and drops the proc_fdlock for
2128 * the current process
2129 *
2130 * Notes: If successful, this function increments the fp_iocount on the
2131 * fd's corresponding fileproc.
2132 *
2133 * The fileproc referenced is not returned; because of this, care
2134 * must be taken to not drop the last reference (e.g. by closing
2135 * the file). This is inherently unsafe, since the reference may
2136 * not be recoverable from the socket, if there is a subsequent
2137 * close that destroys the associate fileproc. The caller should
2138 * therefore retain their own reference on the fileproc so that
2139 * the fp_iocount can be dropped subsequently. Failure to do this
2140 * can result in the returned pointer immediately becoming invalid
2141 * following the call.
2142 *
2143 * Use of this function is discouraged.
2144 */
2145 int
file_socket(int fd,struct socket ** sp)2146 file_socket(int fd, struct socket **sp)
2147 {
2148 struct fileproc *fp;
2149 int error;
2150
2151 error = fp_get_ftype(current_proc(), fd, DTYPE_SOCKET, ENOTSOCK, &fp);
2152 if (error == 0) {
2153 if (sp) {
2154 *sp = (struct socket *)fp_get_data(fp);
2155 }
2156 }
2157 return error;
2158 }
2159
2160
2161 /*
2162 * file_flags
2163 *
2164 * Description: Given an fd, look it up in the current process's per process
2165 * open file table, and return its fileproc's flags field.
2166 *
2167 * Parameters: fd fd whose flags are to be
2168 * retrieved
2169 * flags pointer to flags data area
2170 *
2171 * Returns: 0 Success
2172 * ENOTSOCK Not a socket
2173 * fp_lookup:EBADF Bad file descriptor
2174 *
2175 * Implicit returns:
2176 * *flags (modified) Returned flags field
2177 *
2178 * Locks: This function internally takes and drops the proc_fdlock for
2179 * the current process
2180 */
2181 int
file_flags(int fd,int * flags)2182 file_flags(int fd, int *flags)
2183 {
2184 proc_t p = current_proc();
2185 struct fileproc *fp;
2186 int error = EBADF;
2187
2188 proc_fdlock_spin(p);
2189 fp = fp_get_noref_locked(p, fd);
2190 if (fp) {
2191 *flags = (int)fp->f_flag;
2192 error = 0;
2193 }
2194 proc_fdunlock(p);
2195
2196 return error;
2197 }
2198
2199
2200 /*
2201 * file_drop
2202 *
2203 * Description: Drop an iocount reference on an fd, and wake up any waiters
2204 * for draining (i.e. blocked in fileproc_drain() called during
2205 * the last attempt to close a file).
2206 *
2207 * Parameters: fd fd on which an ioreference is
2208 * to be dropped
2209 *
2210 * Returns: 0 Success
2211 *
2212 * Description: Given an fd, look it up in the current process's per process
2213 * open file table, and drop it's fileproc's fp_iocount by one
2214 *
2215 * Notes: This is intended as a corresponding operation to the functions
2216 * file_vnode() and file_socket() operations.
2217 *
2218 * If the caller can't possibly hold an I/O reference,
2219 * this function will panic the kernel rather than allowing
2220 * for memory corruption. Callers should always call this
2221 * because they acquired an I/O reference on this file before.
2222 *
2223 * Use of this function is discouraged.
2224 */
2225 int
file_drop(int fd)2226 file_drop(int fd)
2227 {
2228 struct fileproc *fp;
2229 proc_t p = current_proc();
2230 struct filedesc *fdp = &p->p_fd;
2231 int needwakeup = 0;
2232
2233 proc_fdlock_spin(p);
2234 fp = fp_get_noref_locked_with_iocount(p, fd);
2235
2236 if (1 == os_ref_release_locked(&fp->fp_iocount)) {
2237 if (fp->fp_flags & FP_SELCONFLICT) {
2238 fp->fp_flags &= ~FP_SELCONFLICT;
2239 }
2240
2241 if (fdp->fd_fpdrainwait) {
2242 fdp->fd_fpdrainwait = 0;
2243 needwakeup = 1;
2244 }
2245 }
2246 proc_fdunlock(p);
2247
2248 if (needwakeup) {
2249 wakeup(&fdp->fd_fpdrainwait);
2250 }
2251 return 0;
2252 }
2253
2254
2255 #pragma mark syscalls
2256
2257 #ifndef HFS_GET_BOOT_INFO
2258 #define HFS_GET_BOOT_INFO (FCNTL_FS_SPECIFIC_BASE + 0x00004)
2259 #endif
2260
2261 #ifndef HFS_SET_BOOT_INFO
2262 #define HFS_SET_BOOT_INFO (FCNTL_FS_SPECIFIC_BASE + 0x00005)
2263 #endif
2264
2265 #ifndef APFSIOC_REVERT_TO_SNAPSHOT
2266 #define APFSIOC_REVERT_TO_SNAPSHOT _IOW('J', 1, u_int64_t)
2267 #endif
2268
2269 #define CHECK_ADD_OVERFLOW_INT64L(x, y) \
2270 (((((x) > 0) && ((y) > 0) && ((x) > LLONG_MAX - (y))) || \
2271 (((x) < 0) && ((y) < 0) && ((x) < LLONG_MIN - (y)))) \
2272 ? 1 : 0)
2273
2274 /*
2275 * sys_getdtablesize
2276 *
2277 * Description: Returns the per process maximum size of the descriptor table
2278 *
2279 * Parameters: p Process being queried
2280 * retval Pointer to the call return area
2281 *
2282 * Returns: 0 Success
2283 *
2284 * Implicit returns:
2285 * *retval (modified) Size of dtable
2286 */
2287 int
sys_getdtablesize(proc_t p,__unused struct getdtablesize_args * uap,int32_t * retval)2288 sys_getdtablesize(proc_t p, __unused struct getdtablesize_args *uap, int32_t *retval)
2289 {
2290 *retval = proc_limitgetcur_nofile(p);
2291 return 0;
2292 }
2293
2294
2295 /*
2296 * check_file_seek_range
2297 *
2298 * Description: Checks if seek offsets are in the range of 0 to LLONG_MAX.
2299 *
2300 * Parameters: fl Flock structure.
2301 * cur_file_offset Current offset in the file.
2302 *
2303 * Returns: 0 on Success.
2304 * EOVERFLOW on overflow.
2305 * EINVAL on offset less than zero.
2306 */
2307
2308 static int
check_file_seek_range(struct flock * fl,off_t cur_file_offset)2309 check_file_seek_range(struct flock *fl, off_t cur_file_offset)
2310 {
2311 if (fl->l_whence == SEEK_CUR) {
2312 /* Check if the start marker is beyond LLONG_MAX. */
2313 if (CHECK_ADD_OVERFLOW_INT64L(fl->l_start, cur_file_offset)) {
2314 /* Check if start marker is negative */
2315 if (fl->l_start < 0) {
2316 return EINVAL;
2317 }
2318 return EOVERFLOW;
2319 }
2320 /* Check if the start marker is negative. */
2321 if (fl->l_start + cur_file_offset < 0) {
2322 return EINVAL;
2323 }
2324 /* Check if end marker is beyond LLONG_MAX. */
2325 if ((fl->l_len > 0) && (CHECK_ADD_OVERFLOW_INT64L(fl->l_start +
2326 cur_file_offset, fl->l_len - 1))) {
2327 return EOVERFLOW;
2328 }
2329 /* Check if the end marker is negative. */
2330 if ((fl->l_len <= 0) && (fl->l_start + cur_file_offset +
2331 fl->l_len < 0)) {
2332 return EINVAL;
2333 }
2334 } else if (fl->l_whence == SEEK_SET) {
2335 /* Check if the start marker is negative. */
2336 if (fl->l_start < 0) {
2337 return EINVAL;
2338 }
2339 /* Check if the end marker is beyond LLONG_MAX. */
2340 if ((fl->l_len > 0) &&
2341 CHECK_ADD_OVERFLOW_INT64L(fl->l_start, fl->l_len - 1)) {
2342 return EOVERFLOW;
2343 }
2344 /* Check if the end marker is negative. */
2345 if ((fl->l_len < 0) && fl->l_start + fl->l_len < 0) {
2346 return EINVAL;
2347 }
2348 }
2349 return 0;
2350 }
2351
2352
2353 /*
2354 * sys_dup
2355 *
2356 * Description: Duplicate a file descriptor.
2357 *
2358 * Parameters: p Process performing the dup
2359 * uap->fd The fd to dup
2360 * retval Pointer to the call return area
2361 *
2362 * Returns: 0 Success
2363 * !0 Errno
2364 *
2365 * Implicit returns:
2366 * *retval (modified) The new descriptor
2367 */
2368 int
sys_dup(proc_t p,struct dup_args * uap,int32_t * retval)2369 sys_dup(proc_t p, struct dup_args *uap, int32_t *retval)
2370 {
2371 int old = uap->fd;
2372 int new, error;
2373 struct fileproc *fp;
2374 kauth_cred_t p_cred;
2375
2376 proc_fdlock(p);
2377 if ((error = fp_lookup(p, old, &fp, 1))) {
2378 proc_fdunlock(p);
2379 return error;
2380 }
2381 if (fp_isguarded(fp, GUARD_DUP)) {
2382 error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
2383 (void) fp_drop(p, old, fp, 1);
2384 proc_fdunlock(p);
2385 return error;
2386 }
2387 if ((error = fdalloc(p, 0, &new))) {
2388 fp_drop(p, old, fp, 1);
2389 proc_fdunlock(p);
2390 return error;
2391 }
2392 p_cred = current_cached_proc_cred(p);
2393 error = finishdup(p, p_cred, old, new, 0, retval);
2394
2395 if (ENTR_SHOULDTRACE && FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) {
2396 KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_START,
2397 new, 0, (int64_t)VM_KERNEL_ADDRPERM(fp_get_data(fp)));
2398 }
2399
2400 fp_drop(p, old, fp, 1);
2401 proc_fdunlock(p);
2402
2403 return error;
2404 }
2405
2406 /*
2407 * sys_dup2
2408 *
2409 * Description: Duplicate a file descriptor to a particular value.
2410 *
2411 * Parameters: p Process performing the dup
2412 * uap->from The fd to dup
2413 * uap->to The fd to dup it to
2414 * retval Pointer to the call return area
2415 *
2416 * Returns: 0 Success
2417 * !0 Errno
2418 *
2419 * Implicit returns:
2420 * *retval (modified) The new descriptor
2421 */
2422 int
sys_dup2(proc_t p,struct dup2_args * uap,int32_t * retval)2423 sys_dup2(proc_t p, struct dup2_args *uap, int32_t *retval)
2424 {
2425 kauth_cred_t p_cred = current_cached_proc_cred(p);
2426
2427 return dup2(p, p_cred, uap->from, uap->to, retval);
2428 }
2429
2430 int
dup2(proc_t p,kauth_cred_t p_cred,int old,int new,int * retval)2431 dup2(proc_t p, kauth_cred_t p_cred, int old, int new, int *retval)
2432 {
2433 struct filedesc *fdp = &p->p_fd;
2434 struct fileproc *fp, *nfp;
2435 int i, error;
2436
2437 proc_fdlock(p);
2438
2439 startover:
2440 if ((error = fp_lookup(p, old, &fp, 1))) {
2441 proc_fdunlock(p);
2442 return error;
2443 }
2444 if (fp_isguarded(fp, GUARD_DUP)) {
2445 error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
2446 (void) fp_drop(p, old, fp, 1);
2447 proc_fdunlock(p);
2448 return error;
2449 }
2450 if (new < 0 || new >= proc_limitgetcur_nofile(p)) {
2451 fp_drop(p, old, fp, 1);
2452 proc_fdunlock(p);
2453 return EBADF;
2454 }
2455 if (old == new) {
2456 fp_drop(p, old, fp, 1);
2457 *retval = new;
2458 proc_fdunlock(p);
2459 return 0;
2460 }
2461 if (new < 0 || new >= fdp->fd_nfiles) {
2462 if ((error = fdalloc(p, new, &i))) {
2463 fp_drop(p, old, fp, 1);
2464 proc_fdunlock(p);
2465 return error;
2466 }
2467 if (new != i) {
2468 fdrelse(p, i);
2469 goto closeit;
2470 }
2471 } else {
2472 closeit:
2473 if ((fdp->fd_ofileflags[new] & UF_RESERVED) == UF_RESERVED) {
2474 fp_drop(p, old, fp, 1);
2475 procfdtbl_waitfd(p, new);
2476 #if DIAGNOSTIC
2477 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
2478 #endif
2479 goto startover;
2480 }
2481
2482 if ((nfp = fdp->fd_ofiles[new]) != NULL) {
2483 if (fp_isguarded(nfp, GUARD_CLOSE)) {
2484 fp_drop(p, old, fp, 1);
2485 error = fp_guard_exception(p,
2486 new, nfp, kGUARD_EXC_CLOSE);
2487 proc_fdunlock(p);
2488 return error;
2489 }
2490 (void)fp_close_and_unlock(p, p_cred, new, nfp, FD_DUP2RESV);
2491 proc_fdlock(p);
2492 assert(fdp->fd_ofileflags[new] & UF_RESERVED);
2493 } else {
2494 #if DIAGNOSTIC
2495 if (fdp->fd_ofiles[new] != NULL) {
2496 panic("dup2: no ref on fileproc %d", new);
2497 }
2498 #endif
2499 procfdtbl_reservefd(p, new);
2500 }
2501 }
2502 #if DIAGNOSTIC
2503 if (fdp->fd_ofiles[new] != 0) {
2504 panic("dup2: overwriting fd_ofiles with new %d", new);
2505 }
2506 if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0) {
2507 panic("dup2: unreserved fileflags with new %d", new);
2508 }
2509 #endif
2510 error = finishdup(p, p_cred, old, new, 0, retval);
2511 fp_drop(p, old, fp, 1);
2512 proc_fdunlock(p);
2513
2514 return error;
2515 }
2516
2517
2518 /*
2519 * fcntl
2520 *
2521 * Description: The file control system call.
2522 *
2523 * Parameters: p Process performing the fcntl
2524 * uap->fd The fd to operate against
2525 * uap->cmd The command to perform
2526 * uap->arg Pointer to the command argument
2527 * retval Pointer to the call return area
2528 *
2529 * Returns: 0 Success
2530 * !0 Errno (see fcntl_nocancel)
2531 *
2532 * Implicit returns:
2533 * *retval (modified) fcntl return value (if any)
2534 *
2535 * Notes: This system call differs from fcntl_nocancel() in that it
2536 * tests for cancellation prior to performing a potentially
2537 * blocking operation.
2538 */
2539 int
sys_fcntl(proc_t p,struct fcntl_args * uap,int32_t * retval)2540 sys_fcntl(proc_t p, struct fcntl_args *uap, int32_t *retval)
2541 {
2542 __pthread_testcancel(1);
2543 return sys_fcntl_nocancel(p, (struct fcntl_nocancel_args *)uap, retval);
2544 }
2545
2546 #define ACCOUNT_OPENFROM_ENTITLEMENT \
2547 "com.apple.private.vfs.role-account-openfrom"
2548
2549 /*
2550 * sys_fcntl_nocancel
2551 *
2552 * Description: A non-cancel-testing file control system call.
2553 *
2554 * Parameters: p Process performing the fcntl
2555 * uap->fd The fd to operate against
2556 * uap->cmd The command to perform
2557 * uap->arg Pointer to the command argument
2558 * retval Pointer to the call return area
2559 *
2560 * Returns: 0 Success
2561 * EINVAL
2562 * fp_lookup:EBADF Bad file descriptor
2563 * [F_DUPFD]
2564 * fdalloc:EMFILE
2565 * fdalloc:ENOMEM
2566 * finishdup:EBADF
2567 * finishdup:ENOMEM
2568 * [F_SETOWN]
2569 * ESRCH
2570 * [F_SETLK]
2571 * EBADF
2572 * EOVERFLOW
2573 * copyin:EFAULT
2574 * vnode_getwithref:???
2575 * VNOP_ADVLOCK:???
2576 * msleep:ETIMEDOUT
2577 * [F_GETLK]
2578 * EBADF
2579 * EOVERFLOW
2580 * copyin:EFAULT
2581 * copyout:EFAULT
2582 * vnode_getwithref:???
2583 * VNOP_ADVLOCK:???
2584 * [F_PREALLOCATE]
2585 * EBADF
2586 * EFBIG
2587 * EINVAL
2588 * ENOSPC
2589 * copyin:EFAULT
2590 * copyout:EFAULT
2591 * vnode_getwithref:???
2592 * VNOP_ALLOCATE:???
2593 * [F_SETSIZE,F_RDADVISE]
2594 * EBADF
2595 * EINVAL
2596 * copyin:EFAULT
2597 * vnode_getwithref:???
2598 * [F_RDAHEAD,F_NOCACHE]
2599 * EBADF
2600 * vnode_getwithref:???
2601 * [???]
2602 *
2603 * Implicit returns:
2604 * *retval (modified) fcntl return value (if any)
2605 */
2606 #define SYS_FCNTL_DECLARE_VFS_CONTEXT(context) \
2607 struct vfs_context context = { \
2608 .vc_thread = current_thread(), \
2609 .vc_ucred = fp->f_cred, \
2610 }
2611
2612 static user_addr_t
sys_fnctl_parse_arg(proc_t p,user_long_t arg)2613 sys_fnctl_parse_arg(proc_t p, user_long_t arg)
2614 {
2615 /*
2616 * Since the arg parameter is defined as a long but may be
2617 * either a long or a pointer we must take care to handle
2618 * sign extension issues. Our sys call munger will sign
2619 * extend a long when we are called from a 32-bit process.
2620 * Since we can never have an address greater than 32-bits
2621 * from a 32-bit process we lop off the top 32-bits to avoid
2622 * getting the wrong address
2623 */
2624 return proc_is64bit(p) ? arg : CAST_USER_ADDR_T((uint32_t)arg);
2625 }
2626
2627 /* cleanup code common to fnctl functions, for when the fdlock is still held */
2628 static int
sys_fcntl_out(proc_t p,int fd,struct fileproc * fp,int error)2629 sys_fcntl_out(proc_t p, int fd, struct fileproc *fp, int error)
2630 {
2631 fp_drop(p, fd, fp, 1);
2632 proc_fdunlock(p);
2633 return error;
2634 }
2635
2636 /* cleanup code common to fnctl acting on vnodes, once they unlocked the fdlock */
2637 static int
sys_fcntl_outdrop(proc_t p,int fd,struct fileproc * fp,struct vnode * vp,int error)2638 sys_fcntl_outdrop(proc_t p, int fd, struct fileproc *fp, struct vnode *vp, int error)
2639 {
2640 #pragma unused(vp)
2641
2642 AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1);
2643 fp_drop(p, fd, fp, 0);
2644 return error;
2645 }
2646
2647 typedef int (*sys_fnctl_handler_t)(proc_t p, int fd, int cmd, user_long_t arg,
2648 struct fileproc *fp, int32_t *retval);
2649
2650 typedef int (*sys_fnctl_vnode_handler_t)(proc_t p, int fd, int cmd,
2651 user_long_t arg, struct fileproc *fp, struct vnode *vp, int32_t *retval);
2652
2653 /*
2654 * SPI (private) for opening a file starting from a dir fd
2655 *
2656 * Note: do not inline to keep stack usage under control.
2657 */
2658 __attribute__((noinline))
2659 static int
sys_fcntl__OPENFROM(proc_t p,int fd,int cmd,user_long_t arg,struct fileproc * fp,struct vnode * vp,int32_t * retval)2660 sys_fcntl__OPENFROM(proc_t p, int fd, int cmd, user_long_t arg,
2661 struct fileproc *fp, struct vnode *vp, int32_t *retval)
2662 {
2663 #pragma unused(cmd)
2664
2665 user_addr_t argp = sys_fnctl_parse_arg(p, arg);
2666 struct user_fopenfrom fopen;
2667 struct vnode_attr *va;
2668 struct nameidata *nd;
2669 int error, cmode;
2670 bool has_entitlement;
2671
2672 /* Check if this isn't a valid file descriptor */
2673 if ((fp->f_flag & FREAD) == 0) {
2674 return sys_fcntl_out(p, fd, fp, EBADF);
2675 }
2676 proc_fdunlock(p);
2677
2678 if (vnode_getwithref(vp)) {
2679 error = ENOENT;
2680 goto outdrop;
2681 }
2682
2683 /* Only valid for directories */
2684 if (vp->v_type != VDIR) {
2685 vnode_put(vp);
2686 error = ENOTDIR;
2687 goto outdrop;
2688 }
2689
2690 /*
2691 * Only entitled apps may use the credentials of the thread
2692 * that opened the file descriptor.
2693 * Non-entitled threads will use their own context.
2694 */
2695 has_entitlement = IOCurrentTaskHasEntitlement(ACCOUNT_OPENFROM_ENTITLEMENT);
2696
2697 /* Get flags, mode and pathname arguments. */
2698 if (IS_64BIT_PROCESS(p)) {
2699 error = copyin(argp, &fopen, sizeof(fopen));
2700 } else {
2701 struct user32_fopenfrom fopen32;
2702
2703 error = copyin(argp, &fopen32, sizeof(fopen32));
2704 fopen.o_flags = fopen32.o_flags;
2705 fopen.o_mode = fopen32.o_mode;
2706 fopen.o_pathname = CAST_USER_ADDR_T(fopen32.o_pathname);
2707 }
2708 if (error) {
2709 vnode_put(vp);
2710 goto outdrop;
2711 }
2712
2713 /* open1() can have really deep stacks, so allocate those */
2714 va = kalloc_type(struct vnode_attr, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2715 nd = kalloc_type(struct nameidata, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2716
2717 AUDIT_ARG(fflags, fopen.o_flags);
2718 AUDIT_ARG(mode, fopen.o_mode);
2719 VATTR_INIT(va);
2720 /* Mask off all but regular access permissions */
2721 cmode = ((fopen.o_mode & ~p->p_fd.fd_cmask) & ALLPERMS) & ~S_ISTXT;
2722 VATTR_SET(va, va_mode, cmode & ACCESSPERMS);
2723
2724 SYS_FCNTL_DECLARE_VFS_CONTEXT(context);
2725
2726 /* Start the lookup relative to the file descriptor's vnode. */
2727 NDINIT(nd, LOOKUP, OP_OPEN, USEDVP | FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
2728 fopen.o_pathname, has_entitlement ? &context : vfs_context_current());
2729 nd->ni_dvp = vp;
2730
2731 error = open1(has_entitlement ? &context : vfs_context_current(),
2732 nd, fopen.o_flags, va, NULL, NULL, retval, AUTH_OPEN_NOAUTHFD);
2733
2734 kfree_type(struct vnode_attr, va);
2735 kfree_type(struct nameidata, nd);
2736
2737 vnode_put(vp);
2738
2739 outdrop:
2740 return sys_fcntl_outdrop(p, fd, fp, vp, error);
2741 }
2742
2743 int
sys_fcntl_nocancel(proc_t p,struct fcntl_nocancel_args * uap,int32_t * retval)2744 sys_fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval)
2745 {
2746 int fd = uap->fd;
2747 int cmd = uap->cmd;
2748 struct fileproc *fp;
2749 struct vnode *vp = NULLVP; /* for AUDIT_ARG() at end */
2750 unsigned int oflags, nflags;
2751 int i, tmp, error, error2, flg = 0;
2752 struct flock fl = {};
2753 struct flocktimeout fltimeout;
2754 struct timespec *timeout = NULL;
2755 off_t offset;
2756 int newmin;
2757 daddr64_t lbn, bn;
2758 unsigned int fflag;
2759 user_addr_t argp;
2760 boolean_t is64bit;
2761 int has_entitlement = 0;
2762 kauth_cred_t p_cred;
2763
2764 AUDIT_ARG(fd, uap->fd);
2765 AUDIT_ARG(cmd, uap->cmd);
2766
2767 proc_fdlock(p);
2768 if ((error = fp_lookup(p, fd, &fp, 1))) {
2769 proc_fdunlock(p);
2770 return error;
2771 }
2772
2773 SYS_FCNTL_DECLARE_VFS_CONTEXT(context);
2774
2775 is64bit = proc_is64bit(p);
2776 if (is64bit) {
2777 argp = uap->arg;
2778 } else {
2779 /*
2780 * Since the arg parameter is defined as a long but may be
2781 * either a long or a pointer we must take care to handle
2782 * sign extension issues. Our sys call munger will sign
2783 * extend a long when we are called from a 32-bit process.
2784 * Since we can never have an address greater than 32-bits
2785 * from a 32-bit process we lop off the top 32-bits to avoid
2786 * getting the wrong address
2787 */
2788 argp = CAST_USER_ADDR_T((uint32_t)uap->arg);
2789 }
2790
2791 #if CONFIG_MACF
2792 error = mac_file_check_fcntl(kauth_cred_get(), fp->fp_glob, cmd, uap->arg);
2793 if (error) {
2794 goto out;
2795 }
2796 #endif
2797
2798 switch (cmd) {
2799 case F_DUPFD:
2800 case F_DUPFD_CLOEXEC:
2801 if (fp_isguarded(fp, GUARD_DUP)) {
2802 error = fp_guard_exception(p, fd, fp, kGUARD_EXC_DUP);
2803 goto out;
2804 }
2805 newmin = CAST_DOWN_EXPLICIT(int, uap->arg); /* arg is an int, so we won't lose bits */
2806 AUDIT_ARG(value32, newmin);
2807 if (newmin < 0 || newmin >= proc_limitgetcur_nofile(p)) {
2808 error = EINVAL;
2809 goto out;
2810 }
2811 if ((error = fdalloc(p, newmin, &i))) {
2812 goto out;
2813 }
2814 p_cred = current_cached_proc_cred(p);
2815 error = finishdup(p, p_cred, fd, i,
2816 cmd == F_DUPFD_CLOEXEC ? FP_CLOEXEC : 0, retval);
2817 goto out;
2818
2819 case F_GETFD:
2820 *retval = (fp->fp_flags & FP_CLOEXEC) ? FD_CLOEXEC : 0;
2821 error = 0;
2822 goto out;
2823
2824 case F_SETFD:
2825 AUDIT_ARG(value32, (uint32_t)uap->arg);
2826 if (uap->arg & FD_CLOEXEC) {
2827 fp->fp_flags |= FP_CLOEXEC;
2828 error = 0;
2829 } else if (!fp->fp_guard_attrs) {
2830 fp->fp_flags &= ~FP_CLOEXEC;
2831 error = 0;
2832 } else {
2833 error = fp_guard_exception(p,
2834 fd, fp, kGUARD_EXC_NOCLOEXEC);
2835 }
2836 goto out;
2837
2838 case F_GETFL:
2839 fflag = fp->f_flag;
2840 if ((fflag & O_EVTONLY) && proc_disallow_rw_for_o_evtonly(p)) {
2841 /*
2842 * We insert back F_READ so that conversion back to open flags with
2843 * OFLAGS() will come out right. We only need to set 'FREAD' as the
2844 * 'O_RDONLY' is always implied.
2845 */
2846 fflag |= FREAD;
2847 }
2848 *retval = OFLAGS(fflag);
2849 error = 0;
2850 goto out;
2851
2852 case F_SETFL:
2853 // FIXME (rdar://54898652)
2854 //
2855 // this code is broken if fnctl(F_SETFL), ioctl() are
2856 // called concurrently for the same fileglob.
2857
2858 tmp = CAST_DOWN_EXPLICIT(int, uap->arg); /* arg is an int, so we won't lose bits */
2859 AUDIT_ARG(value32, tmp);
2860
2861 os_atomic_rmw_loop(&fp->f_flag, oflags, nflags, relaxed, {
2862 nflags = oflags & ~FCNTLFLAGS;
2863 nflags |= FFLAGS(tmp) & FCNTLFLAGS;
2864 });
2865 tmp = nflags & FNONBLOCK;
2866 error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
2867 if (error) {
2868 goto out;
2869 }
2870 tmp = nflags & FASYNC;
2871 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
2872 if (!error) {
2873 goto out;
2874 }
2875 os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed);
2876 tmp = 0;
2877 (void)fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
2878 goto out;
2879
2880 case F_GETOWN:
2881 if (fp->f_type == DTYPE_SOCKET) {
2882 *retval = ((struct socket *)fp_get_data(fp))->so_pgid;
2883 error = 0;
2884 goto out;
2885 }
2886 error = fo_ioctl(fp, TIOCGPGRP, (caddr_t)retval, &context);
2887 *retval = -*retval;
2888 goto out;
2889
2890 case F_SETOWN:
2891 tmp = CAST_DOWN_EXPLICIT(pid_t, uap->arg); /* arg is an int, so we won't lose bits */
2892 AUDIT_ARG(value32, tmp);
2893 if (fp->f_type == DTYPE_SOCKET) {
2894 ((struct socket *)fp_get_data(fp))->so_pgid = tmp;
2895 error = 0;
2896 goto out;
2897 }
2898 if (fp->f_type == DTYPE_PIPE) {
2899 error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
2900 goto out;
2901 }
2902
2903 if (tmp <= 0) {
2904 tmp = -tmp;
2905 } else {
2906 proc_t p1 = proc_find(tmp);
2907 if (p1 == 0) {
2908 error = ESRCH;
2909 goto out;
2910 }
2911 tmp = (int)p1->p_pgrpid;
2912 proc_rele(p1);
2913 }
2914 error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
2915 goto out;
2916
2917 case F_SETNOSIGPIPE:
2918 tmp = CAST_DOWN_EXPLICIT(int, uap->arg);
2919 if (fp->f_type == DTYPE_SOCKET) {
2920 #if SOCKETS
2921 error = sock_setsockopt((struct socket *)fp_get_data(fp),
2922 SOL_SOCKET, SO_NOSIGPIPE, &tmp, sizeof(tmp));
2923 #else
2924 error = EINVAL;
2925 #endif
2926 } else {
2927 struct fileglob *fg = fp->fp_glob;
2928
2929 lck_mtx_lock_spin(&fg->fg_lock);
2930 if (tmp) {
2931 fg->fg_lflags |= FG_NOSIGPIPE;
2932 } else {
2933 fg->fg_lflags &= ~FG_NOSIGPIPE;
2934 }
2935 lck_mtx_unlock(&fg->fg_lock);
2936 error = 0;
2937 }
2938 goto out;
2939
2940 case F_GETNOSIGPIPE:
2941 if (fp->f_type == DTYPE_SOCKET) {
2942 #if SOCKETS
2943 int retsize = sizeof(*retval);
2944 error = sock_getsockopt((struct socket *)fp_get_data(fp),
2945 SOL_SOCKET, SO_NOSIGPIPE, retval, &retsize);
2946 #else
2947 error = EINVAL;
2948 #endif
2949 } else {
2950 *retval = (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) ?
2951 1 : 0;
2952 error = 0;
2953 }
2954 goto out;
2955
2956 case F_SETCONFINED:
2957 /*
2958 * If this is the only reference to this fglob in the process
2959 * and it's already marked as close-on-fork then mark it as
2960 * (immutably) "confined" i.e. any fd that points to it will
2961 * forever be close-on-fork, and attempts to use an IPC
2962 * mechanism to move the descriptor elsewhere will fail.
2963 */
2964 if (CAST_DOWN_EXPLICIT(int, uap->arg)) {
2965 struct fileglob *fg = fp->fp_glob;
2966
2967 lck_mtx_lock_spin(&fg->fg_lock);
2968 if (fg->fg_lflags & FG_CONFINED) {
2969 error = 0;
2970 } else if (1 != os_ref_get_count_raw(&fg->fg_count)) {
2971 error = EAGAIN; /* go close the dup .. */
2972 } else if (fp->fp_flags & FP_CLOFORK) {
2973 fg->fg_lflags |= FG_CONFINED;
2974 error = 0;
2975 } else {
2976 error = EBADF; /* open without O_CLOFORK? */
2977 }
2978 lck_mtx_unlock(&fg->fg_lock);
2979 } else {
2980 /*
2981 * Other subsystems may have built on the immutability
2982 * of FG_CONFINED; clearing it may be tricky.
2983 */
2984 error = EPERM; /* immutable */
2985 }
2986 goto out;
2987
2988 case F_GETCONFINED:
2989 *retval = (fp->fp_glob->fg_lflags & FG_CONFINED) ? 1 : 0;
2990 error = 0;
2991 goto out;
2992
2993 case F_SETLKWTIMEOUT:
2994 case F_SETLKW:
2995 case F_OFD_SETLKWTIMEOUT:
2996 case F_OFD_SETLKW:
2997 flg |= F_WAIT;
2998 OS_FALLTHROUGH;
2999
3000 case F_SETLK:
3001 case F_OFD_SETLK:
3002 if (fp->f_type != DTYPE_VNODE) {
3003 error = EBADF;
3004 goto out;
3005 }
3006 vp = (struct vnode *)fp_get_data(fp);
3007
3008 fflag = fp->f_flag;
3009 offset = fp->f_offset;
3010 proc_fdunlock(p);
3011
3012 /* Copy in the lock structure */
3013 if (F_SETLKWTIMEOUT == cmd || F_OFD_SETLKWTIMEOUT == cmd) {
3014 error = copyin(argp, (caddr_t) &fltimeout, sizeof(fltimeout));
3015 if (error) {
3016 goto outdrop;
3017 }
3018 fl = fltimeout.fl;
3019 timeout = &fltimeout.timeout;
3020 } else {
3021 error = copyin(argp, (caddr_t)&fl, sizeof(fl));
3022 if (error) {
3023 goto outdrop;
3024 }
3025 }
3026
3027 /* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */
3028 /* and ending byte for EOVERFLOW in SEEK_SET */
3029 error = check_file_seek_range(&fl, offset);
3030 if (error) {
3031 goto outdrop;
3032 }
3033
3034 if ((error = vnode_getwithref(vp))) {
3035 goto outdrop;
3036 }
3037 if (fl.l_whence == SEEK_CUR) {
3038 fl.l_start += offset;
3039 }
3040
3041 #if CONFIG_MACF
3042 error = mac_file_check_lock(kauth_cred_get(), fp->fp_glob,
3043 F_SETLK, &fl);
3044 if (error) {
3045 (void)vnode_put(vp);
3046 goto outdrop;
3047 }
3048 #endif
3049
3050 #if CONFIG_FILE_LEASES
3051 (void)vnode_breaklease(vp, O_WRONLY, vfs_context_current());
3052 #endif
3053
3054 switch (cmd) {
3055 case F_OFD_SETLK:
3056 case F_OFD_SETLKW:
3057 case F_OFD_SETLKWTIMEOUT:
3058 flg |= F_OFD_LOCK;
3059 if (fp->fp_glob->fg_lflags & FG_CONFINED) {
3060 flg |= F_CONFINED;
3061 }
3062 switch (fl.l_type) {
3063 case F_RDLCK:
3064 if ((fflag & FREAD) == 0) {
3065 error = EBADF;
3066 break;
3067 }
3068 error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3069 F_SETLK, &fl, flg, &context, timeout);
3070 break;
3071 case F_WRLCK:
3072 if ((fflag & FWRITE) == 0) {
3073 error = EBADF;
3074 break;
3075 }
3076 error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3077 F_SETLK, &fl, flg, &context, timeout);
3078 break;
3079 case F_UNLCK:
3080 error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3081 F_UNLCK, &fl, F_OFD_LOCK, &context,
3082 timeout);
3083 break;
3084 default:
3085 error = EINVAL;
3086 break;
3087 }
3088 if (0 == error &&
3089 (F_RDLCK == fl.l_type || F_WRLCK == fl.l_type)) {
3090 struct fileglob *fg = fp->fp_glob;
3091
3092 /*
3093 * arrange F_UNLCK on last close (once
3094 * set, FG_HAS_OFDLOCK is immutable)
3095 */
3096 if ((fg->fg_lflags & FG_HAS_OFDLOCK) == 0) {
3097 lck_mtx_lock_spin(&fg->fg_lock);
3098 fg->fg_lflags |= FG_HAS_OFDLOCK;
3099 lck_mtx_unlock(&fg->fg_lock);
3100 }
3101 }
3102 break;
3103 default:
3104 flg |= F_POSIX;
3105 switch (fl.l_type) {
3106 case F_RDLCK:
3107 if ((fflag & FREAD) == 0) {
3108 error = EBADF;
3109 break;
3110 }
3111 // XXX UInt32 unsafe for LP64 kernel
3112 os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed);
3113 error = VNOP_ADVLOCK(vp, (caddr_t)p,
3114 F_SETLK, &fl, flg, &context, timeout);
3115 break;
3116 case F_WRLCK:
3117 if ((fflag & FWRITE) == 0) {
3118 error = EBADF;
3119 break;
3120 }
3121 // XXX UInt32 unsafe for LP64 kernel
3122 os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed);
3123 error = VNOP_ADVLOCK(vp, (caddr_t)p,
3124 F_SETLK, &fl, flg, &context, timeout);
3125 break;
3126 case F_UNLCK:
3127 error = VNOP_ADVLOCK(vp, (caddr_t)p,
3128 F_UNLCK, &fl, F_POSIX, &context, timeout);
3129 break;
3130 default:
3131 error = EINVAL;
3132 break;
3133 }
3134 break;
3135 }
3136 (void) vnode_put(vp);
3137 goto outdrop;
3138
3139 case F_GETLK:
3140 case F_OFD_GETLK:
3141 case F_GETLKPID:
3142 case F_OFD_GETLKPID:
3143 if (fp->f_type != DTYPE_VNODE) {
3144 error = EBADF;
3145 goto out;
3146 }
3147 vp = (struct vnode *)fp_get_data(fp);
3148
3149 offset = fp->f_offset;
3150 proc_fdunlock(p);
3151
3152 /* Copy in the lock structure */
3153 error = copyin(argp, (caddr_t)&fl, sizeof(fl));
3154 if (error) {
3155 goto outdrop;
3156 }
3157
3158 /* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */
3159 /* and ending byte for EOVERFLOW in SEEK_SET */
3160 error = check_file_seek_range(&fl, offset);
3161 if (error) {
3162 goto outdrop;
3163 }
3164
3165 if ((fl.l_whence == SEEK_SET) && (fl.l_start < 0)) {
3166 error = EINVAL;
3167 goto outdrop;
3168 }
3169
3170 switch (fl.l_type) {
3171 case F_RDLCK:
3172 case F_UNLCK:
3173 case F_WRLCK:
3174 break;
3175 default:
3176 error = EINVAL;
3177 goto outdrop;
3178 }
3179
3180 switch (fl.l_whence) {
3181 case SEEK_CUR:
3182 case SEEK_SET:
3183 case SEEK_END:
3184 break;
3185 default:
3186 error = EINVAL;
3187 goto outdrop;
3188 }
3189
3190 if ((error = vnode_getwithref(vp)) == 0) {
3191 if (fl.l_whence == SEEK_CUR) {
3192 fl.l_start += offset;
3193 }
3194
3195 #if CONFIG_MACF
3196 error = mac_file_check_lock(kauth_cred_get(), fp->fp_glob,
3197 cmd, &fl);
3198 if (error == 0)
3199 #endif
3200 switch (cmd) {
3201 case F_OFD_GETLK:
3202 error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3203 F_GETLK, &fl, F_OFD_LOCK, &context, NULL);
3204 break;
3205 case F_OFD_GETLKPID:
3206 error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3207 F_GETLKPID, &fl, F_OFD_LOCK, &context, NULL);
3208 break;
3209 default:
3210 error = VNOP_ADVLOCK(vp, (caddr_t)p,
3211 cmd, &fl, F_POSIX, &context, NULL);
3212 break;
3213 }
3214
3215 (void)vnode_put(vp);
3216
3217 if (error == 0) {
3218 error = copyout((caddr_t)&fl, argp, sizeof(fl));
3219 }
3220 }
3221 goto outdrop;
3222
3223 case F_PREALLOCATE: {
3224 fstore_t alloc_struct; /* structure for allocate command */
3225 u_int32_t alloc_flags = 0;
3226
3227 if (fp->f_type != DTYPE_VNODE) {
3228 error = EBADF;
3229 goto out;
3230 }
3231
3232 vp = (struct vnode *)fp_get_data(fp);
3233 proc_fdunlock(p);
3234
3235 /* make sure that we have write permission */
3236 if ((fp->f_flag & FWRITE) == 0) {
3237 error = EBADF;
3238 goto outdrop;
3239 }
3240
3241 error = copyin(argp, (caddr_t)&alloc_struct, sizeof(alloc_struct));
3242 if (error) {
3243 goto outdrop;
3244 }
3245
3246 /* now set the space allocated to 0 */
3247 alloc_struct.fst_bytesalloc = 0;
3248
3249 /*
3250 * Do some simple parameter checking
3251 */
3252
3253 /* set up the flags */
3254
3255 alloc_flags |= PREALLOCATE;
3256
3257 if (alloc_struct.fst_flags & F_ALLOCATECONTIG) {
3258 alloc_flags |= ALLOCATECONTIG;
3259 }
3260
3261 if (alloc_struct.fst_flags & F_ALLOCATEALL) {
3262 alloc_flags |= ALLOCATEALL;
3263 }
3264
3265 if (alloc_struct.fst_flags & F_ALLOCATEPERSIST) {
3266 alloc_flags |= ALLOCATEPERSIST;
3267 }
3268
3269 /*
3270 * Do any position mode specific stuff. The only
3271 * position mode supported now is PEOFPOSMODE
3272 */
3273
3274 switch (alloc_struct.fst_posmode) {
3275 case F_PEOFPOSMODE:
3276 if (alloc_struct.fst_offset != 0) {
3277 error = EINVAL;
3278 goto outdrop;
3279 }
3280
3281 alloc_flags |= ALLOCATEFROMPEOF;
3282 break;
3283
3284 case F_VOLPOSMODE:
3285 if (alloc_struct.fst_offset <= 0) {
3286 error = EINVAL;
3287 goto outdrop;
3288 }
3289
3290 alloc_flags |= ALLOCATEFROMVOL;
3291 break;
3292
3293 default: {
3294 error = EINVAL;
3295 goto outdrop;
3296 }
3297 }
3298 if ((error = vnode_getwithref(vp)) == 0) {
3299 /*
3300 * call allocate to get the space
3301 */
3302 error = VNOP_ALLOCATE(vp, alloc_struct.fst_length, alloc_flags,
3303 &alloc_struct.fst_bytesalloc, alloc_struct.fst_offset,
3304 &context);
3305 (void)vnode_put(vp);
3306
3307 error2 = copyout((caddr_t)&alloc_struct, argp, sizeof(alloc_struct));
3308
3309 if (error == 0) {
3310 error = error2;
3311 }
3312 }
3313 goto outdrop;
3314 }
3315 case F_PUNCHHOLE: {
3316 fpunchhole_t args;
3317
3318 if (fp->f_type != DTYPE_VNODE) {
3319 error = EBADF;
3320 goto out;
3321 }
3322
3323 vp = (struct vnode *)fp_get_data(fp);
3324 proc_fdunlock(p);
3325
3326 /* need write permissions */
3327 if ((fp->f_flag & FWRITE) == 0) {
3328 error = EPERM;
3329 goto outdrop;
3330 }
3331
3332 if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3333 goto outdrop;
3334 }
3335
3336 if ((error = vnode_getwithref(vp))) {
3337 goto outdrop;
3338 }
3339
3340 #if CONFIG_MACF
3341 if ((error = mac_vnode_check_write(&context, fp->fp_glob->fg_cred, vp))) {
3342 (void)vnode_put(vp);
3343 goto outdrop;
3344 }
3345 #endif
3346
3347 error = VNOP_IOCTL(vp, F_PUNCHHOLE, (caddr_t)&args, 0, &context);
3348 (void)vnode_put(vp);
3349
3350 goto outdrop;
3351 }
3352 case F_TRIM_ACTIVE_FILE: {
3353 ftrimactivefile_t args;
3354
3355 if (priv_check_cred(kauth_cred_get(), PRIV_TRIM_ACTIVE_FILE, 0)) {
3356 error = EACCES;
3357 goto out;
3358 }
3359
3360 if (fp->f_type != DTYPE_VNODE) {
3361 error = EBADF;
3362 goto out;
3363 }
3364
3365 vp = (struct vnode *)fp_get_data(fp);
3366 proc_fdunlock(p);
3367
3368 /* need write permissions */
3369 if ((fp->f_flag & FWRITE) == 0) {
3370 error = EPERM;
3371 goto outdrop;
3372 }
3373
3374 if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3375 goto outdrop;
3376 }
3377
3378 if ((error = vnode_getwithref(vp))) {
3379 goto outdrop;
3380 }
3381
3382 error = VNOP_IOCTL(vp, F_TRIM_ACTIVE_FILE, (caddr_t)&args, 0, &context);
3383 (void)vnode_put(vp);
3384
3385 goto outdrop;
3386 }
3387 case F_SPECULATIVE_READ: {
3388 fspecread_t args;
3389 off_t temp_length = 0;
3390
3391 if (fp->f_type != DTYPE_VNODE) {
3392 error = EBADF;
3393 goto out;
3394 }
3395
3396 vp = (struct vnode *)fp_get_data(fp);
3397 proc_fdunlock(p);
3398
3399 if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3400 goto outdrop;
3401 }
3402
3403 /* Discard invalid offsets or lengths */
3404 if ((args.fsr_offset < 0) || (args.fsr_length < 0)) {
3405 error = EINVAL;
3406 goto outdrop;
3407 }
3408
3409 /*
3410 * Round the file offset down to a page-size boundary (or to 0).
3411 * The filesystem will need to round the length up to the end of the page boundary
3412 * or to the EOF of the file.
3413 */
3414 uint64_t foff = (((uint64_t)args.fsr_offset) & ~((uint64_t)PAGE_MASK));
3415 uint64_t foff_delta = args.fsr_offset - foff;
3416 args.fsr_offset = (off_t) foff;
3417
3418 /*
3419 * Now add in the delta to the supplied length. Since we may have adjusted the
3420 * offset, increase it by the amount that we adjusted.
3421 */
3422 if (os_add_overflow(args.fsr_length, foff_delta, &args.fsr_length)) {
3423 error = EOVERFLOW;
3424 goto outdrop;
3425 }
3426
3427 /*
3428 * Make sure (fsr_offset + fsr_length) does not overflow.
3429 */
3430 if (os_add_overflow(args.fsr_offset, args.fsr_length, &temp_length)) {
3431 error = EOVERFLOW;
3432 goto outdrop;
3433 }
3434
3435 if ((error = vnode_getwithref(vp))) {
3436 goto outdrop;
3437 }
3438 error = VNOP_IOCTL(vp, F_SPECULATIVE_READ, (caddr_t)&args, 0, &context);
3439 (void)vnode_put(vp);
3440
3441 goto outdrop;
3442 }
3443 case F_ATTRIBUTION_TAG: {
3444 fattributiontag_t args;
3445
3446 if (fp->f_type != DTYPE_VNODE) {
3447 error = EBADF;
3448 goto out;
3449 }
3450
3451 vp = (struct vnode *)fp_get_data(fp);
3452 proc_fdunlock(p);
3453
3454 if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3455 goto outdrop;
3456 }
3457
3458 if ((error = vnode_getwithref(vp))) {
3459 goto outdrop;
3460 }
3461
3462 error = VNOP_IOCTL(vp, F_ATTRIBUTION_TAG, (caddr_t)&args, 0, &context);
3463 (void)vnode_put(vp);
3464
3465 if (error == 0) {
3466 error = copyout((caddr_t)&args, argp, sizeof(args));
3467 }
3468
3469 goto outdrop;
3470 }
3471 case F_SETSIZE:
3472 if (fp->f_type != DTYPE_VNODE) {
3473 error = EBADF;
3474 goto out;
3475 }
3476
3477 if ((fp->fp_glob->fg_flag & FWRITE) == 0) {
3478 error = EBADF;
3479 goto out;
3480 }
3481 vp = (struct vnode *)fp_get_data(fp);
3482 proc_fdunlock(p);
3483
3484 error = copyin(argp, (caddr_t)&offset, sizeof(off_t));
3485 if (error) {
3486 goto outdrop;
3487 }
3488 AUDIT_ARG(value64, offset);
3489
3490 error = vnode_getwithref(vp);
3491 if (error) {
3492 goto outdrop;
3493 }
3494
3495 #if CONFIG_MACF
3496 error = mac_vnode_check_truncate(&context,
3497 fp->fp_glob->fg_cred, vp);
3498 if (error) {
3499 (void)vnode_put(vp);
3500 goto outdrop;
3501 }
3502 #endif
3503 /*
3504 * Make sure that we are root. Growing a file
3505 * without zero filling the data is a security hole.
3506 */
3507 if (!kauth_cred_issuser(kauth_cred_get())) {
3508 error = EACCES;
3509 } else {
3510 /*
3511 * Require privilege to change file size without zerofill,
3512 * else will change the file size and zerofill it.
3513 */
3514 error = priv_check_cred(kauth_cred_get(), PRIV_VFS_SETSIZE, 0);
3515 if (error == 0) {
3516 error = vnode_setsize(vp, offset, IO_NOZEROFILL, &context);
3517 } else {
3518 error = vnode_setsize(vp, offset, 0, &context);
3519 }
3520
3521 #if CONFIG_MACF
3522 if (error == 0) {
3523 mac_vnode_notify_truncate(&context, fp->fp_glob->fg_cred, vp);
3524 }
3525 #endif
3526 }
3527
3528 (void)vnode_put(vp);
3529 goto outdrop;
3530
3531 case F_RDAHEAD:
3532 if (fp->f_type != DTYPE_VNODE) {
3533 error = EBADF;
3534 goto out;
3535 }
3536 if (uap->arg) {
3537 os_atomic_andnot(&fp->fp_glob->fg_flag, FNORDAHEAD, relaxed);
3538 } else {
3539 os_atomic_or(&fp->fp_glob->fg_flag, FNORDAHEAD, relaxed);
3540 }
3541 goto out;
3542
3543 case F_NOCACHE:
3544 if (fp->f_type != DTYPE_VNODE) {
3545 error = EBADF;
3546 goto out;
3547 }
3548 if (uap->arg) {
3549 os_atomic_or(&fp->fp_glob->fg_flag, FNOCACHE, relaxed);
3550 } else {
3551 os_atomic_andnot(&fp->fp_glob->fg_flag, FNOCACHE, relaxed);
3552 }
3553 goto out;
3554
3555 case F_NODIRECT:
3556 if (fp->f_type != DTYPE_VNODE) {
3557 error = EBADF;
3558 goto out;
3559 }
3560 if (uap->arg) {
3561 os_atomic_or(&fp->fp_glob->fg_flag, FNODIRECT, relaxed);
3562 } else {
3563 os_atomic_andnot(&fp->fp_glob->fg_flag, FNODIRECT, relaxed);
3564 }
3565 goto out;
3566
3567 case F_SINGLE_WRITER:
3568 if (fp->f_type != DTYPE_VNODE) {
3569 error = EBADF;
3570 goto out;
3571 }
3572 if (uap->arg) {
3573 os_atomic_or(&fp->fp_glob->fg_flag, FSINGLE_WRITER, relaxed);
3574 } else {
3575 os_atomic_andnot(&fp->fp_glob->fg_flag, FSINGLE_WRITER, relaxed);
3576 }
3577 goto out;
3578
3579 case F_GLOBAL_NOCACHE:
3580 if (fp->f_type != DTYPE_VNODE) {
3581 error = EBADF;
3582 goto out;
3583 }
3584 vp = (struct vnode *)fp_get_data(fp);
3585 proc_fdunlock(p);
3586
3587 if ((error = vnode_getwithref(vp)) == 0) {
3588 *retval = vnode_isnocache(vp);
3589
3590 if (uap->arg) {
3591 vnode_setnocache(vp);
3592 } else {
3593 vnode_clearnocache(vp);
3594 }
3595
3596 (void)vnode_put(vp);
3597 }
3598 goto outdrop;
3599
3600 case F_CHECK_OPENEVT:
3601 if (fp->f_type != DTYPE_VNODE) {
3602 error = EBADF;
3603 goto out;
3604 }
3605 vp = (struct vnode *)fp_get_data(fp);
3606 proc_fdunlock(p);
3607
3608 if ((error = vnode_getwithref(vp)) == 0) {
3609 *retval = vnode_is_openevt(vp);
3610
3611 if (uap->arg) {
3612 vnode_set_openevt(vp);
3613 } else {
3614 vnode_clear_openevt(vp);
3615 }
3616
3617 (void)vnode_put(vp);
3618 }
3619 goto outdrop;
3620
3621 case F_RDADVISE: {
3622 struct radvisory ra_struct;
3623
3624 if (fp->f_type != DTYPE_VNODE) {
3625 error = EBADF;
3626 goto out;
3627 }
3628 vp = (struct vnode *)fp_get_data(fp);
3629 proc_fdunlock(p);
3630
3631 if ((error = copyin(argp, (caddr_t)&ra_struct, sizeof(ra_struct)))) {
3632 goto outdrop;
3633 }
3634 if (ra_struct.ra_offset < 0 || ra_struct.ra_count < 0) {
3635 error = EINVAL;
3636 goto outdrop;
3637 }
3638 if ((error = vnode_getwithref(vp)) == 0) {
3639 error = VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, &context);
3640
3641 (void)vnode_put(vp);
3642 }
3643 goto outdrop;
3644 }
3645
3646 case F_FLUSH_DATA:
3647
3648 if (fp->f_type != DTYPE_VNODE) {
3649 error = EBADF;
3650 goto out;
3651 }
3652 vp = (struct vnode *)fp_get_data(fp);
3653 proc_fdunlock(p);
3654
3655 if ((error = vnode_getwithref(vp)) == 0) {
3656 error = VNOP_FSYNC(vp, MNT_NOWAIT, &context);
3657
3658 (void)vnode_put(vp);
3659 }
3660 goto outdrop;
3661
3662 case F_LOG2PHYS:
3663 case F_LOG2PHYS_EXT: {
3664 struct log2phys l2p_struct = {}; /* structure for allocate command */
3665 int devBlockSize;
3666
3667 off_t file_offset = 0;
3668 size_t a_size = 0;
3669 size_t run = 0;
3670
3671 if (cmd == F_LOG2PHYS_EXT) {
3672 error = copyin(argp, (caddr_t)&l2p_struct, sizeof(l2p_struct));
3673 if (error) {
3674 goto out;
3675 }
3676 file_offset = l2p_struct.l2p_devoffset;
3677 } else {
3678 file_offset = fp->f_offset;
3679 }
3680 if (fp->f_type != DTYPE_VNODE) {
3681 error = EBADF;
3682 goto out;
3683 }
3684 vp = (struct vnode *)fp_get_data(fp);
3685 proc_fdunlock(p);
3686 if ((error = vnode_getwithref(vp))) {
3687 goto outdrop;
3688 }
3689 error = VNOP_OFFTOBLK(vp, file_offset, &lbn);
3690 if (error) {
3691 (void)vnode_put(vp);
3692 goto outdrop;
3693 }
3694 error = VNOP_BLKTOOFF(vp, lbn, &offset);
3695 if (error) {
3696 (void)vnode_put(vp);
3697 goto outdrop;
3698 }
3699 devBlockSize = vfs_devblocksize(vnode_mount(vp));
3700 if (cmd == F_LOG2PHYS_EXT) {
3701 if (l2p_struct.l2p_contigbytes < 0) {
3702 vnode_put(vp);
3703 error = EINVAL;
3704 goto outdrop;
3705 }
3706
3707 a_size = (size_t)MIN((uint64_t)l2p_struct.l2p_contigbytes, SIZE_MAX);
3708 } else {
3709 a_size = devBlockSize;
3710 }
3711
3712 error = VNOP_BLOCKMAP(vp, offset, a_size, &bn, &run, NULL, 0, &context);
3713
3714 (void)vnode_put(vp);
3715
3716 if (!error) {
3717 l2p_struct.l2p_flags = 0; /* for now */
3718 if (cmd == F_LOG2PHYS_EXT) {
3719 l2p_struct.l2p_contigbytes = run - (file_offset - offset);
3720 } else {
3721 l2p_struct.l2p_contigbytes = 0; /* for now */
3722 }
3723
3724 /*
3725 * The block number being -1 suggests that the file offset is not backed
3726 * by any real blocks on-disk. As a result, just let it be passed back up wholesale.
3727 */
3728 if (bn == -1) {
3729 /* Don't multiply it by the block size */
3730 l2p_struct.l2p_devoffset = bn;
3731 } else {
3732 l2p_struct.l2p_devoffset = bn * devBlockSize;
3733 l2p_struct.l2p_devoffset += file_offset - offset;
3734 }
3735 error = copyout((caddr_t)&l2p_struct, argp, sizeof(l2p_struct));
3736 }
3737 goto outdrop;
3738 }
3739 case F_GETPATH:
3740 case F_GETPATH_NOFIRMLINK: {
3741 char *pathbufp;
3742 size_t pathlen;
3743
3744 if (fp->f_type != DTYPE_VNODE) {
3745 error = EBADF;
3746 goto out;
3747 }
3748 vp = (struct vnode *)fp_get_data(fp);
3749 proc_fdunlock(p);
3750
3751 pathlen = MAXPATHLEN;
3752 pathbufp = zalloc(ZV_NAMEI);
3753
3754 if ((error = vnode_getwithref(vp)) == 0) {
3755 error = vn_getpath_ext(vp, NULL, pathbufp,
3756 &pathlen, cmd == F_GETPATH_NOFIRMLINK ?
3757 VN_GETPATH_NO_FIRMLINK : 0);
3758 (void)vnode_put(vp);
3759
3760 if (error == 0) {
3761 error = copyout((caddr_t)pathbufp, argp, pathlen);
3762 }
3763 }
3764 zfree(ZV_NAMEI, pathbufp);
3765 goto outdrop;
3766 }
3767
3768 case F_PATHPKG_CHECK: {
3769 char *pathbufp;
3770 size_t pathlen;
3771
3772 if (fp->f_type != DTYPE_VNODE) {
3773 error = EBADF;
3774 goto out;
3775 }
3776 vp = (struct vnode *)fp_get_data(fp);
3777 proc_fdunlock(p);
3778
3779 pathlen = MAXPATHLEN;
3780 pathbufp = zalloc(ZV_NAMEI);
3781
3782 if ((error = copyinstr(argp, pathbufp, MAXPATHLEN, &pathlen)) == 0) {
3783 if ((error = vnode_getwithref(vp)) == 0) {
3784 AUDIT_ARG(text, pathbufp);
3785 error = vn_path_package_check(vp, pathbufp, (int)pathlen, retval);
3786
3787 (void)vnode_put(vp);
3788 }
3789 }
3790 zfree(ZV_NAMEI, pathbufp);
3791 goto outdrop;
3792 }
3793
3794 case F_CHKCLEAN: // used by regression tests to see if all dirty pages got cleaned by fsync()
3795 case F_FULLFSYNC: // fsync + flush the journal + DKIOCSYNCHRONIZE
3796 case F_BARRIERFSYNC: // fsync + barrier
3797 case F_FREEZE_FS: // freeze all other fs operations for the fs of this fd
3798 case F_THAW_FS: { // thaw all frozen fs operations for the fs of this fd
3799 if (fp->f_type != DTYPE_VNODE) {
3800 error = EBADF;
3801 goto out;
3802 }
3803 vp = (struct vnode *)fp_get_data(fp);
3804 proc_fdunlock(p);
3805
3806 if ((error = vnode_getwithref(vp)) == 0) {
3807 if ((cmd == F_BARRIERFSYNC) &&
3808 (vp->v_mount->mnt_supl_kern_flag & MNTK_SUPL_USE_FULLSYNC)) {
3809 cmd = F_FULLFSYNC;
3810 }
3811 error = VNOP_IOCTL(vp, cmd, (caddr_t)NULL, 0, &context);
3812
3813 /*
3814 * Promote F_BARRIERFSYNC to F_FULLFSYNC if the underlying
3815 * filesystem doesn't support it.
3816 */
3817 if ((error == ENOTTY || error == ENOTSUP || error == EINVAL) &&
3818 (cmd == F_BARRIERFSYNC)) {
3819 os_atomic_or(&vp->v_mount->mnt_supl_kern_flag,
3820 MNTK_SUPL_USE_FULLSYNC, relaxed);
3821
3822 error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, &context);
3823 }
3824
3825 (void)vnode_put(vp);
3826 }
3827 break;
3828 }
3829
3830 /*
3831 * SPI (private) for opening a file starting from a dir fd
3832 */
3833 case F_OPENFROM: {
3834 /* Check if this isn't a valid file descriptor */
3835 if (fp->f_type != DTYPE_VNODE) {
3836 error = EBADF;
3837 goto out;
3838 }
3839 vp = (struct vnode *)fp_get_data(fp);
3840
3841 return sys_fcntl__OPENFROM(p, fd, cmd, uap->arg, fp, vp, retval);
3842 }
3843
3844 /*
3845 * SPI (private) for unlinking a file starting from a dir fd
3846 */
3847 case F_UNLINKFROM: {
3848 user_addr_t pathname;
3849
3850 /* Check if this isn't a valid file descriptor */
3851 if ((fp->f_type != DTYPE_VNODE) ||
3852 (fp->f_flag & FREAD) == 0) {
3853 error = EBADF;
3854 goto out;
3855 }
3856 vp = (struct vnode *)fp_get_data(fp);
3857 proc_fdunlock(p);
3858
3859 if (vnode_getwithref(vp)) {
3860 error = ENOENT;
3861 goto outdrop;
3862 }
3863
3864 /* Only valid for directories */
3865 if (vp->v_type != VDIR) {
3866 vnode_put(vp);
3867 error = ENOTDIR;
3868 goto outdrop;
3869 }
3870
3871 /*
3872 * Only entitled apps may use the credentials of the thread
3873 * that opened the file descriptor.
3874 * Non-entitled threads will use their own context.
3875 */
3876 if (IOCurrentTaskHasEntitlement(ACCOUNT_OPENFROM_ENTITLEMENT)) {
3877 has_entitlement = 1;
3878 }
3879
3880 /* Get flags, mode and pathname arguments. */
3881 if (IS_64BIT_PROCESS(p)) {
3882 pathname = (user_addr_t)argp;
3883 } else {
3884 pathname = CAST_USER_ADDR_T(argp);
3885 }
3886
3887 /* Start the lookup relative to the file descriptor's vnode. */
3888 error = unlink1(has_entitlement ? &context : vfs_context_current(),
3889 vp, pathname, UIO_USERSPACE, 0);
3890
3891 vnode_put(vp);
3892 break;
3893 }
3894
3895 case F_ADDSIGS:
3896 case F_ADDFILESIGS:
3897 case F_ADDFILESIGS_FOR_DYLD_SIM:
3898 case F_ADDFILESIGS_RETURN:
3899 case F_ADDFILESIGS_INFO:
3900 {
3901 struct cs_blob *blob = NULL;
3902 struct user_fsignatures fs;
3903 kern_return_t kr;
3904 vm_offset_t kernel_blob_addr;
3905 vm_size_t kernel_blob_size;
3906 int blob_add_flags = 0;
3907 const size_t sizeof_fs = (cmd == F_ADDFILESIGS_INFO ?
3908 offsetof(struct user_fsignatures, fs_cdhash /* first output element */) :
3909 offsetof(struct user_fsignatures, fs_fsignatures_size /* compat */));
3910
3911 if (fp->f_type != DTYPE_VNODE) {
3912 error = EBADF;
3913 goto out;
3914 }
3915 vp = (struct vnode *)fp_get_data(fp);
3916 proc_fdunlock(p);
3917
3918 if (cmd == F_ADDFILESIGS_FOR_DYLD_SIM) {
3919 blob_add_flags |= MAC_VNODE_CHECK_DYLD_SIM;
3920 if ((proc_getcsflags(p) & CS_KILL) == 0) {
3921 proc_lock(p);
3922 proc_csflags_set(p, CS_KILL);
3923 proc_unlock(p);
3924 }
3925 }
3926
3927 error = vnode_getwithref(vp);
3928 if (error) {
3929 goto outdrop;
3930 }
3931
3932 if (IS_64BIT_PROCESS(p)) {
3933 error = copyin(argp, &fs, sizeof_fs);
3934 } else {
3935 if (cmd == F_ADDFILESIGS_INFO) {
3936 error = EINVAL;
3937 vnode_put(vp);
3938 goto outdrop;
3939 }
3940
3941 struct user32_fsignatures fs32;
3942
3943 error = copyin(argp, &fs32, sizeof(fs32));
3944 fs.fs_file_start = fs32.fs_file_start;
3945 fs.fs_blob_start = CAST_USER_ADDR_T(fs32.fs_blob_start);
3946 fs.fs_blob_size = fs32.fs_blob_size;
3947 }
3948
3949 if (error) {
3950 vnode_put(vp);
3951 goto outdrop;
3952 }
3953
3954 /*
3955 * First check if we have something loaded a this offset
3956 */
3957 blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, CPU_SUBTYPE_ANY, fs.fs_file_start);
3958 if (blob != NULL) {
3959 /* If this is for dyld_sim revalidate the blob */
3960 if (cmd == F_ADDFILESIGS_FOR_DYLD_SIM) {
3961 error = ubc_cs_blob_revalidate(vp, blob, NULL, blob_add_flags, proc_platform(p));
3962 if (error) {
3963 blob = NULL;
3964 if (error != EAGAIN) {
3965 vnode_put(vp);
3966 goto outdrop;
3967 }
3968 }
3969 }
3970 }
3971
3972 if (blob == NULL) {
3973 /*
3974 * An arbitrary limit, to prevent someone from mapping in a 20GB blob. This should cover
3975 * our use cases for the immediate future, but note that at the time of this commit, some
3976 * platforms are nearing 2MB blob sizes (with a prior soft limit of 2.5MB).
3977 *
3978 * We should consider how we can manage this more effectively; the above means that some
3979 * platforms are using megabytes of memory for signing data; it merely hasn't crossed the
3980 * threshold considered ridiculous at the time of this change.
3981 */
3982 #define CS_MAX_BLOB_SIZE (40ULL * 1024ULL * 1024ULL)
3983 if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
3984 error = E2BIG;
3985 vnode_put(vp);
3986 goto outdrop;
3987 }
3988
3989 kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
3990 kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
3991 if (kr != KERN_SUCCESS || kernel_blob_size < fs.fs_blob_size) {
3992 error = ENOMEM;
3993 vnode_put(vp);
3994 goto outdrop;
3995 }
3996
3997 if (cmd == F_ADDSIGS) {
3998 error = copyin(fs.fs_blob_start,
3999 (void *) kernel_blob_addr,
4000 fs.fs_blob_size);
4001 } else { /* F_ADDFILESIGS || F_ADDFILESIGS_RETURN || F_ADDFILESIGS_FOR_DYLD_SIM || F_ADDFILESIGS_INFO */
4002 int resid;
4003
4004 error = vn_rdwr(UIO_READ,
4005 vp,
4006 (caddr_t) kernel_blob_addr,
4007 (int)kernel_blob_size,
4008 fs.fs_file_start + fs.fs_blob_start,
4009 UIO_SYSSPACE,
4010 0,
4011 kauth_cred_get(),
4012 &resid,
4013 p);
4014 if ((error == 0) && resid) {
4015 /* kernel_blob_size rounded to a page size, but signature may be at end of file */
4016 memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid);
4017 }
4018 }
4019
4020 if (error) {
4021 ubc_cs_blob_deallocate(kernel_blob_addr,
4022 kernel_blob_size);
4023 vnode_put(vp);
4024 goto outdrop;
4025 }
4026
4027 blob = NULL;
4028 error = ubc_cs_blob_add(vp,
4029 proc_platform(p),
4030 CPU_TYPE_ANY, /* not for a specific architecture */
4031 CPU_SUBTYPE_ANY,
4032 fs.fs_file_start,
4033 &kernel_blob_addr,
4034 kernel_blob_size,
4035 NULL,
4036 blob_add_flags,
4037 &blob);
4038
4039 /* ubc_blob_add() has consumed "kernel_blob_addr" if it is zeroed */
4040 if (error) {
4041 if (kernel_blob_addr) {
4042 ubc_cs_blob_deallocate(kernel_blob_addr,
4043 kernel_blob_size);
4044 }
4045 vnode_put(vp);
4046 goto outdrop;
4047 } else {
4048 #if CHECK_CS_VALIDATION_BITMAP
4049 ubc_cs_validation_bitmap_allocate( vp );
4050 #endif
4051 }
4052 }
4053
4054 if (cmd == F_ADDFILESIGS_RETURN || cmd == F_ADDFILESIGS_FOR_DYLD_SIM ||
4055 cmd == F_ADDFILESIGS_INFO) {
4056 /*
4057 * The first element of the structure is a
4058 * off_t that happen to have the same size for
4059 * all archs. Lets overwrite that.
4060 */
4061 off_t end_offset = 0;
4062 if (blob) {
4063 end_offset = blob->csb_end_offset;
4064 }
4065 error = copyout(&end_offset, argp, sizeof(end_offset));
4066
4067 if (error) {
4068 vnode_put(vp);
4069 goto outdrop;
4070 }
4071 }
4072
4073 if (cmd == F_ADDFILESIGS_INFO) {
4074 /* Return information. What we copy out depends on the size of the
4075 * passed in structure, to keep binary compatibility. */
4076
4077 if (fs.fs_fsignatures_size >= sizeof(struct user_fsignatures)) {
4078 // enough room for fs_cdhash[20]+fs_hash_type
4079
4080 if (blob != NULL) {
4081 error = copyout(blob->csb_cdhash,
4082 (vm_address_t)argp + offsetof(struct user_fsignatures, fs_cdhash),
4083 USER_FSIGNATURES_CDHASH_LEN);
4084 if (error) {
4085 vnode_put(vp);
4086 goto outdrop;
4087 }
4088 int hashtype = cs_hash_type(blob->csb_hashtype);
4089 error = copyout(&hashtype,
4090 (vm_address_t)argp + offsetof(struct user_fsignatures, fs_hash_type),
4091 sizeof(int));
4092 if (error) {
4093 vnode_put(vp);
4094 goto outdrop;
4095 }
4096 }
4097 }
4098 }
4099
4100 (void) vnode_put(vp);
4101 break;
4102 }
4103 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4104 case F_ADDFILESUPPL:
4105 {
4106 struct vnode *ivp;
4107 struct cs_blob *blob = NULL;
4108 struct user_fsupplement fs;
4109 int orig_fd;
4110 struct fileproc* orig_fp = NULL;
4111 kern_return_t kr;
4112 vm_offset_t kernel_blob_addr;
4113 vm_size_t kernel_blob_size;
4114
4115 if (!IS_64BIT_PROCESS(p)) {
4116 error = EINVAL;
4117 goto out; // drop fp and unlock fds
4118 }
4119
4120 if (fp->f_type != DTYPE_VNODE) {
4121 error = EBADF;
4122 goto out;
4123 }
4124
4125 error = copyin(argp, &fs, sizeof(fs));
4126 if (error) {
4127 goto out;
4128 }
4129
4130 orig_fd = fs.fs_orig_fd;
4131 if ((error = fp_lookup(p, orig_fd, &orig_fp, 1))) {
4132 printf("CODE SIGNING: Failed to find original file for supplemental signature attachment\n");
4133 goto out;
4134 }
4135
4136 if (orig_fp->f_type != DTYPE_VNODE) {
4137 error = EBADF;
4138 fp_drop(p, orig_fd, orig_fp, 1);
4139 goto out;
4140 }
4141
4142 ivp = (struct vnode *)fp_get_data(orig_fp);
4143
4144 vp = (struct vnode *)fp_get_data(fp);
4145
4146 proc_fdunlock(p);
4147
4148 error = vnode_getwithref(ivp);
4149 if (error) {
4150 fp_drop(p, orig_fd, orig_fp, 0);
4151 goto outdrop; //drop fp
4152 }
4153
4154 error = vnode_getwithref(vp);
4155 if (error) {
4156 vnode_put(ivp);
4157 fp_drop(p, orig_fd, orig_fp, 0);
4158 goto outdrop;
4159 }
4160
4161 if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
4162 error = E2BIG;
4163 goto dropboth; // drop iocounts on vp and ivp, drop orig_fp then drop fp via outdrop
4164 }
4165
4166 kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
4167 kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
4168 if (kr != KERN_SUCCESS) {
4169 error = ENOMEM;
4170 goto dropboth;
4171 }
4172
4173 int resid;
4174 error = vn_rdwr(UIO_READ, vp,
4175 (caddr_t)kernel_blob_addr, (int)kernel_blob_size,
4176 fs.fs_file_start + fs.fs_blob_start,
4177 UIO_SYSSPACE, 0,
4178 kauth_cred_get(), &resid, p);
4179 if ((error == 0) && resid) {
4180 /* kernel_blob_size rounded to a page size, but signature may be at end of file */
4181 memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid);
4182 }
4183
4184 if (error) {
4185 ubc_cs_blob_deallocate(kernel_blob_addr,
4186 kernel_blob_size);
4187 goto dropboth;
4188 }
4189
4190 error = ubc_cs_blob_add_supplement(vp, ivp, fs.fs_file_start,
4191 &kernel_blob_addr, kernel_blob_size, &blob);
4192
4193 /* ubc_blob_add_supplement() has consumed kernel_blob_addr if it is zeroed */
4194 if (error) {
4195 if (kernel_blob_addr) {
4196 ubc_cs_blob_deallocate(kernel_blob_addr,
4197 kernel_blob_size);
4198 }
4199 goto dropboth;
4200 }
4201 vnode_put(ivp);
4202 vnode_put(vp);
4203 fp_drop(p, orig_fd, orig_fp, 0);
4204 break;
4205
4206 dropboth:
4207 vnode_put(ivp);
4208 vnode_put(vp);
4209 fp_drop(p, orig_fd, orig_fp, 0);
4210 goto outdrop;
4211 }
4212 #endif
4213 case F_GETCODEDIR:
4214 case F_FINDSIGS: {
4215 error = ENOTSUP;
4216 goto out;
4217 }
4218 case F_CHECK_LV: {
4219 struct fileglob *fg;
4220 fchecklv_t lv = {};
4221
4222 if (fp->f_type != DTYPE_VNODE) {
4223 error = EBADF;
4224 goto out;
4225 }
4226 fg = fp->fp_glob;
4227 proc_fdunlock(p);
4228
4229 if (IS_64BIT_PROCESS(p)) {
4230 error = copyin(argp, &lv, sizeof(lv));
4231 } else {
4232 struct user32_fchecklv lv32 = {};
4233
4234 error = copyin(argp, &lv32, sizeof(lv32));
4235 lv.lv_file_start = lv32.lv_file_start;
4236 lv.lv_error_message = (void *)(uintptr_t)lv32.lv_error_message;
4237 lv.lv_error_message_size = lv32.lv_error_message_size;
4238 }
4239 if (error) {
4240 goto outdrop;
4241 }
4242
4243 #if CONFIG_MACF
4244 error = mac_file_check_library_validation(p, fg, lv.lv_file_start,
4245 (user_long_t)lv.lv_error_message, lv.lv_error_message_size);
4246 #endif
4247
4248 break;
4249 }
4250 case F_GETSIGSINFO: {
4251 struct cs_blob *blob = NULL;
4252 fgetsigsinfo_t sigsinfo = {};
4253
4254 if (fp->f_type != DTYPE_VNODE) {
4255 error = EBADF;
4256 goto out;
4257 }
4258 vp = (struct vnode *)fp_get_data(fp);
4259 proc_fdunlock(p);
4260
4261 error = vnode_getwithref(vp);
4262 if (error) {
4263 goto outdrop;
4264 }
4265
4266 error = copyin(argp, &sigsinfo, sizeof(sigsinfo));
4267 if (error) {
4268 vnode_put(vp);
4269 goto outdrop;
4270 }
4271
4272 blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, CPU_SUBTYPE_ANY, sigsinfo.fg_file_start);
4273 if (blob == NULL) {
4274 error = ENOENT;
4275 vnode_put(vp);
4276 goto outdrop;
4277 }
4278 switch (sigsinfo.fg_info_request) {
4279 case GETSIGSINFO_PLATFORM_BINARY:
4280 sigsinfo.fg_sig_is_platform = blob->csb_platform_binary;
4281 error = copyout(&sigsinfo.fg_sig_is_platform,
4282 (vm_address_t)argp + offsetof(struct fgetsigsinfo, fg_sig_is_platform),
4283 sizeof(sigsinfo.fg_sig_is_platform));
4284 if (error) {
4285 vnode_put(vp);
4286 goto outdrop;
4287 }
4288 break;
4289 default:
4290 error = EINVAL;
4291 vnode_put(vp);
4292 goto outdrop;
4293 }
4294 vnode_put(vp);
4295 break;
4296 }
4297 #if CONFIG_PROTECT
4298 case F_GETPROTECTIONCLASS: {
4299 if (fp->f_type != DTYPE_VNODE) {
4300 error = EBADF;
4301 goto out;
4302 }
4303 vp = (struct vnode *)fp_get_data(fp);
4304
4305 proc_fdunlock(p);
4306
4307 if (vnode_getwithref(vp)) {
4308 error = ENOENT;
4309 goto outdrop;
4310 }
4311
4312 struct vnode_attr va;
4313
4314 VATTR_INIT(&va);
4315 VATTR_WANTED(&va, va_dataprotect_class);
4316 error = VNOP_GETATTR(vp, &va, &context);
4317 if (!error) {
4318 if (VATTR_IS_SUPPORTED(&va, va_dataprotect_class)) {
4319 *retval = va.va_dataprotect_class;
4320 } else {
4321 error = ENOTSUP;
4322 }
4323 }
4324
4325 vnode_put(vp);
4326 break;
4327 }
4328
4329 case F_SETPROTECTIONCLASS: {
4330 /* tmp must be a valid PROTECTION_CLASS_* */
4331 tmp = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
4332
4333 if (fp->f_type != DTYPE_VNODE) {
4334 error = EBADF;
4335 goto out;
4336 }
4337 vp = (struct vnode *)fp_get_data(fp);
4338
4339 proc_fdunlock(p);
4340
4341 if (vnode_getwithref(vp)) {
4342 error = ENOENT;
4343 goto outdrop;
4344 }
4345
4346 /* Only go forward if you have write access */
4347 vfs_context_t ctx = vfs_context_current();
4348 if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4349 vnode_put(vp);
4350 error = EBADF;
4351 goto outdrop;
4352 }
4353
4354 struct vnode_attr va;
4355
4356 VATTR_INIT(&va);
4357 VATTR_SET(&va, va_dataprotect_class, tmp);
4358
4359 error = VNOP_SETATTR(vp, &va, ctx);
4360
4361 vnode_put(vp);
4362 break;
4363 }
4364
4365 case F_TRANSCODEKEY: {
4366 if (fp->f_type != DTYPE_VNODE) {
4367 error = EBADF;
4368 goto out;
4369 }
4370
4371 vp = (struct vnode *)fp_get_data(fp);
4372 proc_fdunlock(p);
4373
4374 if (vnode_getwithref(vp)) {
4375 error = ENOENT;
4376 goto outdrop;
4377 }
4378
4379 cp_key_t k = {
4380 .len = CP_MAX_WRAPPEDKEYSIZE,
4381 };
4382
4383 k.key = kalloc_data(CP_MAX_WRAPPEDKEYSIZE, Z_WAITOK | Z_ZERO);
4384 if (k.key == NULL) {
4385 error = ENOMEM;
4386 } else {
4387 error = VNOP_IOCTL(vp, F_TRANSCODEKEY, (caddr_t)&k, 1, &context);
4388 }
4389
4390 vnode_put(vp);
4391
4392 if (error == 0) {
4393 error = copyout(k.key, argp, k.len);
4394 *retval = k.len;
4395 }
4396 kfree_data(k.key, CP_MAX_WRAPPEDKEYSIZE);
4397
4398 break;
4399 }
4400
4401 case F_GETPROTECTIONLEVEL: {
4402 if (fp->f_type != DTYPE_VNODE) {
4403 error = EBADF;
4404 goto out;
4405 }
4406
4407 vp = (struct vnode*)fp_get_data(fp);
4408 proc_fdunlock(p);
4409
4410 if (vnode_getwithref(vp)) {
4411 error = ENOENT;
4412 goto outdrop;
4413 }
4414
4415 error = VNOP_IOCTL(vp, F_GETPROTECTIONLEVEL, (caddr_t)retval, 0, &context);
4416
4417 vnode_put(vp);
4418 break;
4419 }
4420
4421 case F_GETDEFAULTPROTLEVEL: {
4422 if (fp->f_type != DTYPE_VNODE) {
4423 error = EBADF;
4424 goto out;
4425 }
4426
4427 vp = (struct vnode*)fp_get_data(fp);
4428 proc_fdunlock(p);
4429
4430 if (vnode_getwithref(vp)) {
4431 error = ENOENT;
4432 goto outdrop;
4433 }
4434
4435 /*
4436 * if cp_get_major_vers fails, error will be set to proper errno
4437 * and cp_version will still be 0.
4438 */
4439
4440 error = VNOP_IOCTL(vp, F_GETDEFAULTPROTLEVEL, (caddr_t)retval, 0, &context);
4441
4442 vnode_put(vp);
4443 break;
4444 }
4445
4446 #endif /* CONFIG_PROTECT */
4447
4448 case F_MOVEDATAEXTENTS: {
4449 struct fileproc *fp2 = NULL;
4450 struct vnode *src_vp = NULLVP;
4451 struct vnode *dst_vp = NULLVP;
4452 /* We need to grab the 2nd FD out of the arguments before moving on. */
4453 int fd2 = CAST_DOWN_EXPLICIT(int32_t, uap->arg);
4454
4455 error = priv_check_cred(kauth_cred_get(), PRIV_VFS_MOVE_DATA_EXTENTS, 0);
4456 if (error) {
4457 goto out;
4458 }
4459
4460 if (fp->f_type != DTYPE_VNODE) {
4461 error = EBADF;
4462 goto out;
4463 }
4464
4465 /*
4466 * For now, special case HFS+ and APFS only, since this
4467 * is SPI.
4468 */
4469 src_vp = (struct vnode *)fp_get_data(fp);
4470 if (src_vp->v_tag != VT_HFS && src_vp->v_tag != VT_APFS) {
4471 error = ENOTSUP;
4472 goto out;
4473 }
4474
4475 /*
4476 * Get the references before we start acquiring iocounts on the vnodes,
4477 * while we still hold the proc fd lock
4478 */
4479 if ((error = fp_lookup(p, fd2, &fp2, 1))) {
4480 error = EBADF;
4481 goto out;
4482 }
4483 if (fp2->f_type != DTYPE_VNODE) {
4484 fp_drop(p, fd2, fp2, 1);
4485 error = EBADF;
4486 goto out;
4487 }
4488 dst_vp = (struct vnode *)fp_get_data(fp2);
4489 if (dst_vp->v_tag != VT_HFS && dst_vp->v_tag != VT_APFS) {
4490 fp_drop(p, fd2, fp2, 1);
4491 error = ENOTSUP;
4492 goto out;
4493 }
4494
4495 #if CONFIG_MACF
4496 /* Re-do MAC checks against the new FD, pass in a fake argument */
4497 error = mac_file_check_fcntl(kauth_cred_get(), fp2->fp_glob, cmd, 0);
4498 if (error) {
4499 fp_drop(p, fd2, fp2, 1);
4500 goto out;
4501 }
4502 #endif
4503 /* Audit the 2nd FD */
4504 AUDIT_ARG(fd, fd2);
4505
4506 proc_fdunlock(p);
4507
4508 if (vnode_getwithref(src_vp)) {
4509 fp_drop(p, fd2, fp2, 0);
4510 error = ENOENT;
4511 goto outdrop;
4512 }
4513 if (vnode_getwithref(dst_vp)) {
4514 vnode_put(src_vp);
4515 fp_drop(p, fd2, fp2, 0);
4516 error = ENOENT;
4517 goto outdrop;
4518 }
4519
4520 /*
4521 * Basic asserts; validate they are not the same and that
4522 * both live on the same filesystem.
4523 */
4524 if (dst_vp == src_vp) {
4525 vnode_put(src_vp);
4526 vnode_put(dst_vp);
4527 fp_drop(p, fd2, fp2, 0);
4528 error = EINVAL;
4529 goto outdrop;
4530 }
4531
4532 if (dst_vp->v_mount != src_vp->v_mount) {
4533 vnode_put(src_vp);
4534 vnode_put(dst_vp);
4535 fp_drop(p, fd2, fp2, 0);
4536 error = EXDEV;
4537 goto outdrop;
4538 }
4539
4540 /* Now we have a legit pair of FDs. Go to work */
4541
4542 /* Now check for write access to the target files */
4543 if (vnode_authorize(src_vp, NULLVP,
4544 (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4545 vnode_put(src_vp);
4546 vnode_put(dst_vp);
4547 fp_drop(p, fd2, fp2, 0);
4548 error = EBADF;
4549 goto outdrop;
4550 }
4551
4552 if (vnode_authorize(dst_vp, NULLVP,
4553 (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4554 vnode_put(src_vp);
4555 vnode_put(dst_vp);
4556 fp_drop(p, fd2, fp2, 0);
4557 error = EBADF;
4558 goto outdrop;
4559 }
4560
4561 /* Verify that both vps point to files and not directories */
4562 if (!vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
4563 error = EINVAL;
4564 vnode_put(src_vp);
4565 vnode_put(dst_vp);
4566 fp_drop(p, fd2, fp2, 0);
4567 goto outdrop;
4568 }
4569
4570 /*
4571 * The exchangedata syscall handler passes in 0 for the flags to VNOP_EXCHANGE.
4572 * We'll pass in our special bit indicating that the new behavior is expected
4573 */
4574
4575 error = VNOP_EXCHANGE(src_vp, dst_vp, FSOPT_EXCHANGE_DATA_ONLY, &context);
4576
4577 vnode_put(src_vp);
4578 vnode_put(dst_vp);
4579 fp_drop(p, fd2, fp2, 0);
4580 break;
4581 }
4582
4583 case F_TRANSFEREXTENTS: {
4584 struct fileproc *fp2 = NULL;
4585 struct vnode *src_vp = NULLVP;
4586 struct vnode *dst_vp = NULLVP;
4587
4588 /* Get 2nd FD out of the arguments. */
4589 int fd2 = CAST_DOWN_EXPLICIT(int, uap->arg);
4590 if (fd2 < 0) {
4591 error = EINVAL;
4592 goto out;
4593 }
4594
4595 if (fp->f_type != DTYPE_VNODE) {
4596 error = EBADF;
4597 goto out;
4598 }
4599
4600 /*
4601 * Only allow this for APFS
4602 */
4603 src_vp = (struct vnode *)fp_get_data(fp);
4604 if (src_vp->v_tag != VT_APFS) {
4605 error = ENOTSUP;
4606 goto out;
4607 }
4608
4609 /*
4610 * Get the references before we start acquiring iocounts on the vnodes,
4611 * while we still hold the proc fd lock
4612 */
4613 if ((error = fp_lookup(p, fd2, &fp2, 1))) {
4614 error = EBADF;
4615 goto out;
4616 }
4617 if (fp2->f_type != DTYPE_VNODE) {
4618 fp_drop(p, fd2, fp2, 1);
4619 error = EBADF;
4620 goto out;
4621 }
4622 dst_vp = (struct vnode *)fp_get_data(fp2);
4623 if (dst_vp->v_tag != VT_APFS) {
4624 fp_drop(p, fd2, fp2, 1);
4625 error = ENOTSUP;
4626 goto out;
4627 }
4628
4629 #if CONFIG_MACF
4630 /* Re-do MAC checks against the new FD, pass in a fake argument */
4631 error = mac_file_check_fcntl(kauth_cred_get(), fp2->fp_glob, cmd, 0);
4632 if (error) {
4633 fp_drop(p, fd2, fp2, 1);
4634 goto out;
4635 }
4636 #endif
4637 /* Audit the 2nd FD */
4638 AUDIT_ARG(fd, fd2);
4639
4640 proc_fdunlock(p);
4641
4642 if (vnode_getwithref(src_vp)) {
4643 fp_drop(p, fd2, fp2, 0);
4644 error = ENOENT;
4645 goto outdrop;
4646 }
4647 if (vnode_getwithref(dst_vp)) {
4648 vnode_put(src_vp);
4649 fp_drop(p, fd2, fp2, 0);
4650 error = ENOENT;
4651 goto outdrop;
4652 }
4653
4654 /*
4655 * Validate they are not the same and that
4656 * both live on the same filesystem.
4657 */
4658 if (dst_vp == src_vp) {
4659 vnode_put(src_vp);
4660 vnode_put(dst_vp);
4661 fp_drop(p, fd2, fp2, 0);
4662 error = EINVAL;
4663 goto outdrop;
4664 }
4665 if (dst_vp->v_mount != src_vp->v_mount) {
4666 vnode_put(src_vp);
4667 vnode_put(dst_vp);
4668 fp_drop(p, fd2, fp2, 0);
4669 error = EXDEV;
4670 goto outdrop;
4671 }
4672
4673 /* Verify that both vps point to files and not directories */
4674 if (!vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
4675 error = EINVAL;
4676 vnode_put(src_vp);
4677 vnode_put(dst_vp);
4678 fp_drop(p, fd2, fp2, 0);
4679 goto outdrop;
4680 }
4681
4682
4683 /*
4684 * Okay, vps are legit. Check access. We'll require write access
4685 * to both files.
4686 */
4687 if (vnode_authorize(src_vp, NULLVP,
4688 (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4689 vnode_put(src_vp);
4690 vnode_put(dst_vp);
4691 fp_drop(p, fd2, fp2, 0);
4692 error = EBADF;
4693 goto outdrop;
4694 }
4695 if (vnode_authorize(dst_vp, NULLVP,
4696 (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4697 vnode_put(src_vp);
4698 vnode_put(dst_vp);
4699 fp_drop(p, fd2, fp2, 0);
4700 error = EBADF;
4701 goto outdrop;
4702 }
4703
4704 /* Pass it on through to the fs */
4705 error = VNOP_IOCTL(src_vp, cmd, (caddr_t)dst_vp, 0, &context);
4706
4707 vnode_put(src_vp);
4708 vnode_put(dst_vp);
4709 fp_drop(p, fd2, fp2, 0);
4710 break;
4711 }
4712
4713 /*
4714 * SPI for making a file compressed.
4715 */
4716 case F_MAKECOMPRESSED: {
4717 uint32_t gcounter = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
4718
4719 if (fp->f_type != DTYPE_VNODE) {
4720 error = EBADF;
4721 goto out;
4722 }
4723
4724 vp = (struct vnode*)fp_get_data(fp);
4725 proc_fdunlock(p);
4726
4727 /* get the vnode */
4728 if (vnode_getwithref(vp)) {
4729 error = ENOENT;
4730 goto outdrop;
4731 }
4732
4733 /* Is it a file? */
4734 if ((vnode_isreg(vp) == 0) && (vnode_islnk(vp) == 0)) {
4735 vnode_put(vp);
4736 error = EBADF;
4737 goto outdrop;
4738 }
4739
4740 /* invoke ioctl to pass off to FS */
4741 /* Only go forward if you have write access */
4742 vfs_context_t ctx = vfs_context_current();
4743 if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4744 vnode_put(vp);
4745 error = EBADF;
4746 goto outdrop;
4747 }
4748
4749 error = VNOP_IOCTL(vp, cmd, (caddr_t)&gcounter, 0, &context);
4750
4751 vnode_put(vp);
4752 break;
4753 }
4754
4755 /*
4756 * SPI (private) for indicating to a filesystem that subsequent writes to
4757 * the open FD will written to the Fastflow.
4758 */
4759 case F_SET_GREEDY_MODE:
4760 /* intentionally drop through to the same handler as F_SETSTATIC.
4761 * both fcntls should pass the argument and their selector into VNOP_IOCTL.
4762 */
4763
4764 /*
4765 * SPI (private) for indicating to a filesystem that subsequent writes to
4766 * the open FD will represent static content.
4767 */
4768 case F_SETSTATICCONTENT: {
4769 caddr_t ioctl_arg = NULL;
4770
4771 if (uap->arg) {
4772 ioctl_arg = (caddr_t) 1;
4773 }
4774
4775 if (fp->f_type != DTYPE_VNODE) {
4776 error = EBADF;
4777 goto out;
4778 }
4779 vp = (struct vnode *)fp_get_data(fp);
4780 proc_fdunlock(p);
4781
4782 error = vnode_getwithref(vp);
4783 if (error) {
4784 error = ENOENT;
4785 goto outdrop;
4786 }
4787
4788 /* Only go forward if you have write access */
4789 vfs_context_t ctx = vfs_context_current();
4790 if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4791 vnode_put(vp);
4792 error = EBADF;
4793 goto outdrop;
4794 }
4795
4796 error = VNOP_IOCTL(vp, cmd, ioctl_arg, 0, &context);
4797 (void)vnode_put(vp);
4798
4799 break;
4800 }
4801
4802 /*
4803 * SPI (private) for indicating to the lower level storage driver that the
4804 * subsequent writes should be of a particular IO type (burst, greedy, static),
4805 * or other flavors that may be necessary.
4806 */
4807 case F_SETIOTYPE: {
4808 caddr_t param_ptr;
4809 uint32_t param;
4810
4811 if (uap->arg) {
4812 /* extract 32 bits of flags from userland */
4813 param_ptr = (caddr_t) uap->arg;
4814 param = (uint32_t) param_ptr;
4815 } else {
4816 /* If no argument is specified, error out */
4817 error = EINVAL;
4818 goto out;
4819 }
4820
4821 /*
4822 * Validate the different types of flags that can be specified:
4823 * all of them are mutually exclusive for now.
4824 */
4825 switch (param) {
4826 case F_IOTYPE_ISOCHRONOUS:
4827 break;
4828
4829 default:
4830 error = EINVAL;
4831 goto out;
4832 }
4833
4834
4835 if (fp->f_type != DTYPE_VNODE) {
4836 error = EBADF;
4837 goto out;
4838 }
4839 vp = (struct vnode *)fp_get_data(fp);
4840 proc_fdunlock(p);
4841
4842 error = vnode_getwithref(vp);
4843 if (error) {
4844 error = ENOENT;
4845 goto outdrop;
4846 }
4847
4848 /* Only go forward if you have write access */
4849 vfs_context_t ctx = vfs_context_current();
4850 if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4851 vnode_put(vp);
4852 error = EBADF;
4853 goto outdrop;
4854 }
4855
4856 error = VNOP_IOCTL(vp, cmd, param_ptr, 0, &context);
4857 (void)vnode_put(vp);
4858
4859 break;
4860 }
4861
4862 /*
4863 * Set the vnode pointed to by 'fd'
4864 * and tag it as the (potentially future) backing store
4865 * for another filesystem
4866 */
4867 case F_SETBACKINGSTORE: {
4868 if (fp->f_type != DTYPE_VNODE) {
4869 error = EBADF;
4870 goto out;
4871 }
4872
4873 vp = (struct vnode *)fp_get_data(fp);
4874
4875 if (vp->v_tag != VT_HFS) {
4876 error = EINVAL;
4877 goto out;
4878 }
4879 proc_fdunlock(p);
4880
4881 if (vnode_getwithref(vp)) {
4882 error = ENOENT;
4883 goto outdrop;
4884 }
4885
4886 /* only proceed if you have write access */
4887 vfs_context_t ctx = vfs_context_current();
4888 if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4889 vnode_put(vp);
4890 error = EBADF;
4891 goto outdrop;
4892 }
4893
4894
4895 /* If arg != 0, set, otherwise unset */
4896 if (uap->arg) {
4897 error = VNOP_IOCTL(vp, cmd, (caddr_t)1, 0, &context);
4898 } else {
4899 error = VNOP_IOCTL(vp, cmd, (caddr_t)NULL, 0, &context);
4900 }
4901
4902 vnode_put(vp);
4903 break;
4904 }
4905
4906 /*
4907 * like F_GETPATH, but special semantics for
4908 * the mobile time machine handler.
4909 */
4910 case F_GETPATH_MTMINFO: {
4911 char *pathbufp;
4912 int pathlen;
4913
4914 if (fp->f_type != DTYPE_VNODE) {
4915 error = EBADF;
4916 goto out;
4917 }
4918 vp = (struct vnode *)fp_get_data(fp);
4919 proc_fdunlock(p);
4920
4921 pathlen = MAXPATHLEN;
4922 pathbufp = zalloc(ZV_NAMEI);
4923
4924 if ((error = vnode_getwithref(vp)) == 0) {
4925 int backingstore = 0;
4926
4927 /* Check for error from vn_getpath before moving on */
4928 if ((error = vn_getpath(vp, pathbufp, &pathlen)) == 0) {
4929 if (vp->v_tag == VT_HFS) {
4930 error = VNOP_IOCTL(vp, cmd, (caddr_t) &backingstore, 0, &context);
4931 }
4932 (void)vnode_put(vp);
4933
4934 if (error == 0) {
4935 error = copyout((caddr_t)pathbufp, argp, pathlen);
4936 }
4937 if (error == 0) {
4938 /*
4939 * If the copyout was successful, now check to ensure
4940 * that this vnode is not a BACKINGSTORE vnode. mtmd
4941 * wants the path regardless.
4942 */
4943 if (backingstore) {
4944 error = EBUSY;
4945 }
4946 }
4947 } else {
4948 (void)vnode_put(vp);
4949 }
4950 }
4951
4952 zfree(ZV_NAMEI, pathbufp);
4953 goto outdrop;
4954 }
4955
4956 case F_RECYCLE: {
4957 #if !DEBUG && !DEVELOPMENT
4958 bool allowed = false;
4959
4960 //
4961 // non-debug and non-development kernels have restrictions
4962 // on who can all this fcntl. the process has to be marked
4963 // with the dataless-manipulator entitlement and either the
4964 // process or thread have to be marked rapid-aging.
4965 //
4966 if (!vfs_context_is_dataless_manipulator(&context)) {
4967 error = EPERM;
4968 goto out;
4969 }
4970
4971 proc_t proc = vfs_context_proc(&context);
4972 if (proc && (proc->p_lflag & P_LRAGE_VNODES)) {
4973 allowed = true;
4974 } else {
4975 thread_t thr = vfs_context_thread(&context);
4976 if (thr) {
4977 struct uthread *ut = get_bsdthread_info(thr);
4978
4979 if (ut && (ut->uu_flag & UT_RAGE_VNODES)) {
4980 allowed = true;
4981 }
4982 }
4983 }
4984 if (!allowed) {
4985 error = EPERM;
4986 goto out;
4987 }
4988 #endif
4989
4990 if (fp->f_type != DTYPE_VNODE) {
4991 error = EBADF;
4992 goto out;
4993 }
4994 vp = (struct vnode *)fp_get_data(fp);
4995 proc_fdunlock(p);
4996
4997 vnode_recycle(vp);
4998 break;
4999 }
5000
5001 #if CONFIG_FILE_LEASES
5002 case F_SETLEASE: {
5003 struct fileglob *fg;
5004 int fl_type;
5005 int expcounts;
5006
5007 if (fp->f_type != DTYPE_VNODE) {
5008 error = EBADF;
5009 goto out;
5010 }
5011 vp = (struct vnode *)fp_get_data(fp);
5012 fg = fp->fp_glob;;
5013 proc_fdunlock(p);
5014
5015 /*
5016 * In order to allow a process to avoid breaking
5017 * its own leases, the expected open count needs
5018 * to be provided to F_SETLEASE when placing write lease.
5019 * Similarly, in order to allow a process to place a read lease
5020 * after opening the file multiple times in RW mode, the expected
5021 * write count needs to be provided to F_SETLEASE when placing a
5022 * read lease.
5023 *
5024 * We use the upper 30 bits of the integer argument (way more than
5025 * enough) as the expected open/write count.
5026 *
5027 * If the caller passed 0 for the expected open count,
5028 * assume 1.
5029 */
5030 fl_type = CAST_DOWN_EXPLICIT(int, uap->arg);
5031 expcounts = (unsigned int)fl_type >> 2;
5032 fl_type &= 3;
5033
5034 if (fl_type == F_WRLCK && expcounts == 0) {
5035 expcounts = 1;
5036 }
5037
5038 AUDIT_ARG(value32, fl_type);
5039
5040 if ((error = vnode_getwithref(vp))) {
5041 goto outdrop;
5042 }
5043
5044 /*
5045 * Only support for regular file/dir mounted on local-based filesystem.
5046 */
5047 if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR) ||
5048 !(vfs_flags(vnode_mount(vp)) & MNT_LOCAL)) {
5049 error = EBADF;
5050 vnode_put(vp);
5051 goto outdrop;
5052 }
5053
5054 /* For directory, we only support read lease. */
5055 if (vnode_vtype(vp) == VDIR && fl_type == F_WRLCK) {
5056 error = ENOTSUP;
5057 vnode_put(vp);
5058 goto outdrop;
5059 }
5060
5061 switch (fl_type) {
5062 case F_RDLCK:
5063 case F_WRLCK:
5064 case F_UNLCK:
5065 error = vnode_setlease(vp, fg, fl_type, expcounts,
5066 vfs_context_current());
5067 break;
5068 default:
5069 error = EINVAL;
5070 break;
5071 }
5072
5073 vnode_put(vp);
5074 goto outdrop;
5075 }
5076
5077 case F_GETLEASE: {
5078 if (fp->f_type != DTYPE_VNODE) {
5079 error = EBADF;
5080 goto out;
5081 }
5082 vp = (struct vnode *)fp_get_data(fp);
5083 proc_fdunlock(p);
5084
5085 if ((error = vnode_getwithref(vp))) {
5086 goto outdrop;
5087 }
5088
5089 if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR) ||
5090 !(vfs_flags(vnode_mount(vp)) & MNT_LOCAL)) {
5091 error = EBADF;
5092 vnode_put(vp);
5093 goto outdrop;
5094 }
5095
5096 error = 0;
5097 *retval = vnode_getlease(vp);
5098 vnode_put(vp);
5099 goto outdrop;
5100 }
5101 #endif /* CONFIG_FILE_LEASES */
5102
5103 /* SPI (private) for asserting background access to a file */
5104 case F_ASSERT_BG_ACCESS:
5105 /* SPI (private) for releasing background access to a file */
5106 case F_RELEASE_BG_ACCESS: {
5107 /*
5108 * Check if the process is platform code, which means
5109 * that it is considered part of the Operating System.
5110 */
5111 if (!csproc_get_platform_binary(p)) {
5112 error = EPERM;
5113 goto out;
5114 }
5115
5116 if (fp->f_type != DTYPE_VNODE) {
5117 error = EBADF;
5118 goto out;
5119 }
5120
5121 vp = (struct vnode *)fp_get_data(fp);
5122 proc_fdunlock(p);
5123
5124 if (vnode_getwithref(vp)) {
5125 error = ENOENT;
5126 goto outdrop;
5127 }
5128
5129 /* Verify that vp points to a file and not a directory */
5130 if (!vnode_isreg(vp)) {
5131 vnode_put(vp);
5132 error = EINVAL;
5133 goto outdrop;
5134 }
5135
5136 /* Only proceed if you have read access */
5137 if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_READ_DATA), &context) != 0) {
5138 vnode_put(vp);
5139 error = EBADF;
5140 goto outdrop;
5141 }
5142
5143 if (cmd == F_ASSERT_BG_ACCESS) {
5144 fassertbgaccess_t args;
5145
5146 if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
5147 vnode_put(vp);
5148 goto outdrop;
5149 }
5150
5151 error = VNOP_IOCTL(vp, F_ASSERT_BG_ACCESS, (caddr_t)&args, 0, &context);
5152 } else {
5153 // cmd == F_RELEASE_BG_ACCESS
5154 error = VNOP_IOCTL(vp, F_RELEASE_BG_ACCESS, (caddr_t)NULL, 0, &context);
5155 }
5156
5157 vnode_put(vp);
5158
5159 goto outdrop;
5160 }
5161
5162 default:
5163 /*
5164 * This is an fcntl() that we d not recognize at this level;
5165 * if this is a vnode, we send it down into the VNOP_IOCTL
5166 * for this vnode; this can include special devices, and will
5167 * effectively overload fcntl() to send ioctl()'s.
5168 */
5169 if ((cmd & IOC_VOID) && (cmd & IOC_INOUT)) {
5170 error = EINVAL;
5171 goto out;
5172 }
5173
5174 /*
5175 * Catch any now-invalid fcntl() selectors.
5176 * (When adding a selector to this list, it may be prudent
5177 * to consider adding it to the list in fsctl_internal() as well.)
5178 */
5179 switch (cmd) {
5180 case (int)APFSIOC_REVERT_TO_SNAPSHOT:
5181 case (int)FSIOC_FIOSEEKHOLE:
5182 case (int)FSIOC_FIOSEEKDATA:
5183 case (int)FSIOC_CAS_BSDFLAGS:
5184 case (int)FSIOC_KERNEL_ROOTAUTH:
5185 case (int)FSIOC_GRAFT_FS:
5186 case (int)FSIOC_UNGRAFT_FS:
5187 case (int)FSIOC_AUTH_FS:
5188 case HFS_GET_BOOT_INFO:
5189 case HFS_SET_BOOT_INFO:
5190 case FIOPINSWAP:
5191 case F_MARKDEPENDENCY:
5192 case TIOCREVOKE:
5193 case TIOCREVOKECLEAR:
5194 error = EINVAL;
5195 goto out;
5196 default:
5197 break;
5198 }
5199
5200 if (fp->f_type != DTYPE_VNODE) {
5201 error = EBADF;
5202 goto out;
5203 }
5204 vp = (struct vnode *)fp_get_data(fp);
5205 proc_fdunlock(p);
5206
5207 if ((error = vnode_getwithref(vp)) == 0) {
5208 #define STK_PARAMS 128
5209 char stkbuf[STK_PARAMS] = {0};
5210 unsigned int size;
5211 caddr_t data, memp;
5212 /*
5213 * For this to work properly, we have to copy in the
5214 * ioctl() cmd argument if there is one; we must also
5215 * check that a command parameter, if present, does
5216 * not exceed the maximum command length dictated by
5217 * the number of bits we have available in the command
5218 * to represent a structure length. Finally, we have
5219 * to copy the results back out, if it is that type of
5220 * ioctl().
5221 */
5222 size = IOCPARM_LEN(cmd);
5223 if (size > IOCPARM_MAX) {
5224 (void)vnode_put(vp);
5225 error = EINVAL;
5226 break;
5227 }
5228
5229 memp = NULL;
5230 if (size > sizeof(stkbuf)) {
5231 memp = (caddr_t)kalloc_data(size, Z_WAITOK);
5232 if (memp == 0) {
5233 (void)vnode_put(vp);
5234 error = ENOMEM;
5235 goto outdrop;
5236 }
5237 data = memp;
5238 } else {
5239 data = &stkbuf[0];
5240 }
5241
5242 if (cmd & IOC_IN) {
5243 if (size) {
5244 /* structure */
5245 error = copyin(argp, data, size);
5246 if (error) {
5247 (void)vnode_put(vp);
5248 if (memp) {
5249 kfree_data(memp, size);
5250 }
5251 goto outdrop;
5252 }
5253
5254 /* Bzero the section beyond that which was needed */
5255 if (size <= sizeof(stkbuf)) {
5256 bzero((((uint8_t*)data) + size), (sizeof(stkbuf) - size));
5257 }
5258 } else {
5259 /* int */
5260 if (is64bit) {
5261 *(user_addr_t *)data = argp;
5262 } else {
5263 *(uint32_t *)data = (uint32_t)argp;
5264 }
5265 };
5266 } else if ((cmd & IOC_OUT) && size) {
5267 /*
5268 * Zero the buffer so the user always
5269 * gets back something deterministic.
5270 */
5271 bzero(data, size);
5272 } else if (cmd & IOC_VOID) {
5273 if (is64bit) {
5274 *(user_addr_t *)data = argp;
5275 } else {
5276 *(uint32_t *)data = (uint32_t)argp;
5277 }
5278 }
5279
5280 error = VNOP_IOCTL(vp, cmd, CAST_DOWN(caddr_t, data), 0, &context);
5281
5282 (void)vnode_put(vp);
5283
5284 /* Copy any output data to user */
5285 if (error == 0 && (cmd & IOC_OUT) && size) {
5286 error = copyout(data, argp, size);
5287 }
5288 if (memp) {
5289 kfree_data(memp, size);
5290 }
5291 }
5292 break;
5293 }
5294
5295 outdrop:
5296 return sys_fcntl_outdrop(p, fd, fp, vp, error);
5297
5298 out:
5299 return sys_fcntl_out(p, fd, fp, error);
5300 }
5301
5302
5303 /*
5304 * sys_close
5305 *
5306 * Description: The implementation of the close(2) system call
5307 *
5308 * Parameters: p Process in whose per process file table
5309 * the close is to occur
5310 * uap->fd fd to be closed
5311 * retval <unused>
5312 *
5313 * Returns: 0 Success
5314 * fp_lookup:EBADF Bad file descriptor
5315 * fp_guard_exception:??? Guarded file descriptor
5316 * close_internal:EBADF
5317 * close_internal:??? Anything returnable by a per-fileops
5318 * close function
5319 */
5320 int
sys_close(proc_t p,struct close_args * uap,__unused int32_t * retval)5321 sys_close(proc_t p, struct close_args *uap, __unused int32_t *retval)
5322 {
5323 kauth_cred_t p_cred = current_cached_proc_cred(p);
5324
5325 __pthread_testcancel(1);
5326 return close_nocancel(p, p_cred, uap->fd);
5327 }
5328
5329 int
sys_close_nocancel(proc_t p,struct close_nocancel_args * uap,__unused int32_t * retval)5330 sys_close_nocancel(proc_t p, struct close_nocancel_args *uap, __unused int32_t *retval)
5331 {
5332 kauth_cred_t p_cred = current_cached_proc_cred(p);
5333
5334 return close_nocancel(p, p_cred, uap->fd);
5335 }
5336
5337 int
close_nocancel(proc_t p,kauth_cred_t p_cred,int fd)5338 close_nocancel(proc_t p, kauth_cred_t p_cred, int fd)
5339 {
5340 struct fileproc *fp;
5341
5342 AUDIT_SYSCLOSE(p, fd);
5343
5344 proc_fdlock(p);
5345 if ((fp = fp_get_noref_locked(p, fd)) == NULL) {
5346 proc_fdunlock(p);
5347 return EBADF;
5348 }
5349
5350 if (fp_isguarded(fp, GUARD_CLOSE)) {
5351 int error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
5352 proc_fdunlock(p);
5353 return error;
5354 }
5355
5356 return fp_close_and_unlock(p, p_cred, fd, fp, 0);
5357 }
5358
5359
5360 /*
5361 * fstat
5362 *
5363 * Description: Return status information about a file descriptor.
5364 *
5365 * Parameters: p The process doing the fstat
5366 * fd The fd to stat
5367 * ub The user stat buffer
5368 * xsecurity The user extended security
5369 * buffer, or 0 if none
5370 * xsecurity_size The size of xsecurity, or 0
5371 * if no xsecurity
5372 * isstat64 Flag to indicate 64 bit version
5373 * for inode size, etc.
5374 *
5375 * Returns: 0 Success
5376 * EBADF
5377 * EFAULT
5378 * fp_lookup:EBADF Bad file descriptor
5379 * vnode_getwithref:???
5380 * copyout:EFAULT
5381 * vnode_getwithref:???
5382 * vn_stat:???
5383 * soo_stat:???
5384 * pipe_stat:???
5385 * pshm_stat:???
5386 * kqueue_stat:???
5387 *
5388 * Notes: Internal implementation for all other fstat() related
5389 * functions
5390 *
5391 * XXX switch on node type is bogus; need a stat in struct
5392 * XXX fileops instead.
5393 */
5394 static int
fstat(proc_t p,int fd,user_addr_t ub,user_addr_t xsecurity,user_addr_t xsecurity_size,int isstat64)5395 fstat(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity,
5396 user_addr_t xsecurity_size, int isstat64)
5397 {
5398 struct fileproc *fp;
5399 union {
5400 struct stat sb;
5401 struct stat64 sb64;
5402 } source;
5403 union {
5404 struct user64_stat user64_sb;
5405 struct user32_stat user32_sb;
5406 struct user64_stat64 user64_sb64;
5407 struct user32_stat64 user32_sb64;
5408 } dest;
5409 int error, my_size;
5410 file_type_t type;
5411 caddr_t data;
5412 kauth_filesec_t fsec;
5413 user_size_t xsecurity_bufsize;
5414 vfs_context_t ctx = vfs_context_current();
5415 void * sbptr;
5416
5417
5418 AUDIT_ARG(fd, fd);
5419
5420 if ((error = fp_lookup(p, fd, &fp, 0)) != 0) {
5421 return error;
5422 }
5423 type = fp->f_type;
5424 data = (caddr_t)fp_get_data(fp);
5425 fsec = KAUTH_FILESEC_NONE;
5426
5427 sbptr = (void *)&source;
5428
5429 switch (type) {
5430 case DTYPE_VNODE:
5431 if ((error = vnode_getwithref((vnode_t)data)) == 0) {
5432 /*
5433 * If the caller has the file open, and is not
5434 * requesting extended security information, we are
5435 * going to let them get the basic stat information.
5436 */
5437 if (xsecurity == USER_ADDR_NULL) {
5438 error = vn_stat_noauth((vnode_t)data, sbptr, NULL, isstat64, 0, ctx,
5439 fp->fp_glob->fg_cred);
5440 } else {
5441 error = vn_stat((vnode_t)data, sbptr, &fsec, isstat64, 0, ctx);
5442 }
5443
5444 AUDIT_ARG(vnpath, (struct vnode *)data, ARG_VNODE1);
5445 (void)vnode_put((vnode_t)data);
5446 }
5447 break;
5448
5449 #if SOCKETS
5450 case DTYPE_SOCKET:
5451 error = soo_stat((struct socket *)data, sbptr, isstat64);
5452 break;
5453 #endif /* SOCKETS */
5454
5455 case DTYPE_PIPE:
5456 error = pipe_stat((void *)data, sbptr, isstat64);
5457 break;
5458
5459 case DTYPE_PSXSHM:
5460 error = pshm_stat((void *)data, sbptr, isstat64);
5461 break;
5462
5463 case DTYPE_KQUEUE:
5464 error = kqueue_stat((void *)data, sbptr, isstat64, p);
5465 break;
5466
5467 default:
5468 error = EBADF;
5469 goto out;
5470 }
5471 if (error == 0) {
5472 caddr_t sbp;
5473
5474 if (isstat64 != 0) {
5475 source.sb64.st_lspare = 0;
5476 source.sb64.st_qspare[0] = 0LL;
5477 source.sb64.st_qspare[1] = 0LL;
5478
5479 if (IS_64BIT_PROCESS(p)) {
5480 munge_user64_stat64(&source.sb64, &dest.user64_sb64);
5481 my_size = sizeof(dest.user64_sb64);
5482 sbp = (caddr_t)&dest.user64_sb64;
5483 } else {
5484 munge_user32_stat64(&source.sb64, &dest.user32_sb64);
5485 my_size = sizeof(dest.user32_sb64);
5486 sbp = (caddr_t)&dest.user32_sb64;
5487 }
5488 } else {
5489 source.sb.st_lspare = 0;
5490 source.sb.st_qspare[0] = 0LL;
5491 source.sb.st_qspare[1] = 0LL;
5492 if (IS_64BIT_PROCESS(p)) {
5493 munge_user64_stat(&source.sb, &dest.user64_sb);
5494 my_size = sizeof(dest.user64_sb);
5495 sbp = (caddr_t)&dest.user64_sb;
5496 } else {
5497 munge_user32_stat(&source.sb, &dest.user32_sb);
5498 my_size = sizeof(dest.user32_sb);
5499 sbp = (caddr_t)&dest.user32_sb;
5500 }
5501 }
5502
5503 error = copyout(sbp, ub, my_size);
5504 }
5505
5506 /* caller wants extended security information? */
5507 if (xsecurity != USER_ADDR_NULL) {
5508 /* did we get any? */
5509 if (fsec == KAUTH_FILESEC_NONE) {
5510 if (susize(xsecurity_size, 0) != 0) {
5511 error = EFAULT;
5512 goto out;
5513 }
5514 } else {
5515 /* find the user buffer size */
5516 xsecurity_bufsize = fusize(xsecurity_size);
5517
5518 /* copy out the actual data size */
5519 if (susize(xsecurity_size, KAUTH_FILESEC_COPYSIZE(fsec)) != 0) {
5520 error = EFAULT;
5521 goto out;
5522 }
5523
5524 /* if the caller supplied enough room, copy out to it */
5525 if (xsecurity_bufsize >= KAUTH_FILESEC_COPYSIZE(fsec)) {
5526 error = copyout(fsec, xsecurity, KAUTH_FILESEC_COPYSIZE(fsec));
5527 }
5528 }
5529 }
5530 out:
5531 fp_drop(p, fd, fp, 0);
5532 if (fsec != NULL) {
5533 kauth_filesec_free(fsec);
5534 }
5535 return error;
5536 }
5537
5538
5539 /*
5540 * sys_fstat_extended
5541 *
5542 * Description: Extended version of fstat supporting returning extended
5543 * security information
5544 *
5545 * Parameters: p The process doing the fstat
5546 * uap->fd The fd to stat
5547 * uap->ub The user stat buffer
5548 * uap->xsecurity The user extended security
5549 * buffer, or 0 if none
5550 * uap->xsecurity_size The size of xsecurity, or 0
5551 *
5552 * Returns: 0 Success
5553 * !0 Errno (see fstat)
5554 */
5555 int
sys_fstat_extended(proc_t p,struct fstat_extended_args * uap,__unused int32_t * retval)5556 sys_fstat_extended(proc_t p, struct fstat_extended_args *uap, __unused int32_t *retval)
5557 {
5558 return fstat(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 0);
5559 }
5560
5561
5562 /*
5563 * sys_fstat
5564 *
5565 * Description: Get file status for the file associated with fd
5566 *
5567 * Parameters: p The process doing the fstat
5568 * uap->fd The fd to stat
5569 * uap->ub The user stat buffer
5570 *
5571 * Returns: 0 Success
5572 * !0 Errno (see fstat)
5573 */
5574 int
sys_fstat(proc_t p,struct fstat_args * uap,__unused int32_t * retval)5575 sys_fstat(proc_t p, struct fstat_args *uap, __unused int32_t *retval)
5576 {
5577 return fstat(p, uap->fd, uap->ub, 0, 0, 0);
5578 }
5579
5580
5581 /*
5582 * sys_fstat64_extended
5583 *
5584 * Description: Extended version of fstat64 supporting returning extended
5585 * security information
5586 *
5587 * Parameters: p The process doing the fstat
5588 * uap->fd The fd to stat
5589 * uap->ub The user stat buffer
5590 * uap->xsecurity The user extended security
5591 * buffer, or 0 if none
5592 * uap->xsecurity_size The size of xsecurity, or 0
5593 *
5594 * Returns: 0 Success
5595 * !0 Errno (see fstat)
5596 */
5597 int
sys_fstat64_extended(proc_t p,struct fstat64_extended_args * uap,__unused int32_t * retval)5598 sys_fstat64_extended(proc_t p, struct fstat64_extended_args *uap, __unused int32_t *retval)
5599 {
5600 return fstat(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 1);
5601 }
5602
5603
5604 /*
5605 * sys_fstat64
5606 *
5607 * Description: Get 64 bit version of the file status for the file associated
5608 * with fd
5609 *
5610 * Parameters: p The process doing the fstat
5611 * uap->fd The fd to stat
5612 * uap->ub The user stat buffer
5613 *
5614 * Returns: 0 Success
5615 * !0 Errno (see fstat)
5616 */
5617 int
sys_fstat64(proc_t p,struct fstat64_args * uap,__unused int32_t * retval)5618 sys_fstat64(proc_t p, struct fstat64_args *uap, __unused int32_t *retval)
5619 {
5620 return fstat(p, uap->fd, uap->ub, 0, 0, 1);
5621 }
5622
5623
5624 /*
5625 * sys_fpathconf
5626 *
5627 * Description: Return pathconf information about a file descriptor.
5628 *
5629 * Parameters: p Process making the request
5630 * uap->fd fd to get information about
5631 * uap->name Name of information desired
5632 * retval Pointer to the call return area
5633 *
5634 * Returns: 0 Success
5635 * EINVAL
5636 * fp_lookup:EBADF Bad file descriptor
5637 * vnode_getwithref:???
5638 * vn_pathconf:???
5639 *
5640 * Implicit returns:
5641 * *retval (modified) Returned information (numeric)
5642 */
5643 int
sys_fpathconf(proc_t p,struct fpathconf_args * uap,int32_t * retval)5644 sys_fpathconf(proc_t p, struct fpathconf_args *uap, int32_t *retval)
5645 {
5646 int fd = uap->fd;
5647 struct fileproc *fp;
5648 struct vnode *vp;
5649 int error = 0;
5650 file_type_t type;
5651
5652
5653 AUDIT_ARG(fd, uap->fd);
5654 if ((error = fp_lookup(p, fd, &fp, 0))) {
5655 return error;
5656 }
5657 type = fp->f_type;
5658
5659 switch (type) {
5660 case DTYPE_SOCKET:
5661 if (uap->name != _PC_PIPE_BUF) {
5662 error = EINVAL;
5663 goto out;
5664 }
5665 *retval = PIPE_BUF;
5666 error = 0;
5667 goto out;
5668
5669 case DTYPE_PIPE:
5670 if (uap->name != _PC_PIPE_BUF) {
5671 error = EINVAL;
5672 goto out;
5673 }
5674 *retval = PIPE_BUF;
5675 error = 0;
5676 goto out;
5677
5678 case DTYPE_VNODE:
5679 vp = (struct vnode *)fp_get_data(fp);
5680
5681 if ((error = vnode_getwithref(vp)) == 0) {
5682 AUDIT_ARG(vnpath, vp, ARG_VNODE1);
5683
5684 error = vn_pathconf(vp, uap->name, retval, vfs_context_current());
5685
5686 (void)vnode_put(vp);
5687 }
5688 goto out;
5689
5690 default:
5691 error = EINVAL;
5692 goto out;
5693 }
5694 /*NOTREACHED*/
5695 out:
5696 fp_drop(p, fd, fp, 0);
5697 return error;
5698 }
5699
5700 /*
5701 * sys_flock
5702 *
5703 * Description: Apply an advisory lock on a file descriptor.
5704 *
5705 * Parameters: p Process making request
5706 * uap->fd fd on which the lock is to be
5707 * attempted
5708 * uap->how (Un)Lock bits, including type
5709 * retval Pointer to the call return area
5710 *
5711 * Returns: 0 Success
5712 * fp_getfvp:EBADF Bad file descriptor
5713 * fp_getfvp:ENOTSUP fd does not refer to a vnode
5714 * vnode_getwithref:???
5715 * VNOP_ADVLOCK:???
5716 *
5717 * Implicit returns:
5718 * *retval (modified) Size of dtable
5719 *
5720 * Notes: Just attempt to get a record lock of the requested type on
5721 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
5722 */
5723 int
sys_flock(proc_t p,struct flock_args * uap,__unused int32_t * retval)5724 sys_flock(proc_t p, struct flock_args *uap, __unused int32_t *retval)
5725 {
5726 int fd = uap->fd;
5727 int how = uap->how;
5728 struct fileproc *fp;
5729 struct vnode *vp;
5730 struct flock lf;
5731 vfs_context_t ctx = vfs_context_current();
5732 int error = 0;
5733
5734 AUDIT_ARG(fd, uap->fd);
5735 if ((error = fp_getfvp(p, fd, &fp, &vp))) {
5736 return error;
5737 }
5738 if ((error = vnode_getwithref(vp))) {
5739 goto out1;
5740 }
5741 AUDIT_ARG(vnpath, vp, ARG_VNODE1);
5742
5743 lf.l_whence = SEEK_SET;
5744 lf.l_start = 0;
5745 lf.l_len = 0;
5746 if (how & LOCK_UN) {
5747 lf.l_type = F_UNLCK;
5748 error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_UNLCK, &lf, F_FLOCK, ctx, NULL);
5749 goto out;
5750 }
5751 if (how & LOCK_EX) {
5752 lf.l_type = F_WRLCK;
5753 } else if (how & LOCK_SH) {
5754 lf.l_type = F_RDLCK;
5755 } else {
5756 error = EBADF;
5757 goto out;
5758 }
5759 #if CONFIG_MACF
5760 error = mac_file_check_lock(kauth_cred_get(), fp->fp_glob, F_SETLK, &lf);
5761 if (error) {
5762 goto out;
5763 }
5764 #endif
5765 error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_SETLK, &lf,
5766 (how & LOCK_NB ? F_FLOCK : F_FLOCK | F_WAIT),
5767 ctx, NULL);
5768 if (!error) {
5769 os_atomic_or(&fp->fp_glob->fg_flag, FWASLOCKED, relaxed);
5770 }
5771 out:
5772 (void)vnode_put(vp);
5773 out1:
5774 fp_drop(p, fd, fp, 0);
5775 return error;
5776 }
5777
5778 /*
5779 * sys_fileport_makeport
5780 *
5781 * Description: Obtain a Mach send right for a given file descriptor.
5782 *
5783 * Parameters: p Process calling fileport
5784 * uap->fd The fd to reference
5785 * uap->portnamep User address at which to place port name.
5786 *
5787 * Returns: 0 Success.
5788 * EBADF Bad file descriptor.
5789 * EINVAL File descriptor had type that cannot be sent, misc. other errors.
5790 * EFAULT Address at which to store port name is not valid.
5791 * EAGAIN Resource shortage.
5792 *
5793 * Implicit returns:
5794 * On success, name of send right is stored at user-specified address.
5795 */
5796 int
sys_fileport_makeport(proc_t p,struct fileport_makeport_args * uap,__unused int * retval)5797 sys_fileport_makeport(proc_t p, struct fileport_makeport_args *uap,
5798 __unused int *retval)
5799 {
5800 int err;
5801 int fd = uap->fd;
5802 user_addr_t user_portaddr = uap->portnamep;
5803 struct fileproc *fp = FILEPROC_NULL;
5804 struct fileglob *fg = NULL;
5805 ipc_port_t fileport;
5806 mach_port_name_t name = MACH_PORT_NULL;
5807
5808 proc_fdlock(p);
5809 err = fp_lookup(p, fd, &fp, 1);
5810 if (err != 0) {
5811 goto out_unlock;
5812 }
5813
5814 fg = fp->fp_glob;
5815 if (!fg_sendable(fg)) {
5816 err = EINVAL;
5817 goto out_unlock;
5818 }
5819
5820 if (fp_isguarded(fp, GUARD_FILEPORT)) {
5821 err = fp_guard_exception(p, fd, fp, kGUARD_EXC_FILEPORT);
5822 goto out_unlock;
5823 }
5824
5825 /* Dropped when port is deallocated */
5826 fg_ref(p, fg);
5827
5828 proc_fdunlock(p);
5829
5830 /* Allocate and initialize a port */
5831 fileport = fileport_alloc(fg);
5832 if (fileport == IPC_PORT_NULL) {
5833 fg_drop_live(fg);
5834 err = EAGAIN;
5835 goto out;
5836 }
5837
5838 /* Add an entry. Deallocates port on failure. */
5839 name = ipc_port_copyout_send(fileport, get_task_ipcspace(proc_task(p)));
5840 if (!MACH_PORT_VALID(name)) {
5841 err = EINVAL;
5842 goto out;
5843 }
5844
5845 err = copyout(&name, user_portaddr, sizeof(mach_port_name_t));
5846 if (err != 0) {
5847 goto out;
5848 }
5849
5850 /* Tag the fileglob for debugging purposes */
5851 lck_mtx_lock_spin(&fg->fg_lock);
5852 fg->fg_lflags |= FG_PORTMADE;
5853 lck_mtx_unlock(&fg->fg_lock);
5854
5855 fp_drop(p, fd, fp, 0);
5856
5857 return 0;
5858
5859 out_unlock:
5860 proc_fdunlock(p);
5861 out:
5862 if (MACH_PORT_VALID(name)) {
5863 /* Don't care if another thread races us to deallocate the entry */
5864 (void) mach_port_deallocate(get_task_ipcspace(proc_task(p)), name);
5865 }
5866
5867 if (fp != FILEPROC_NULL) {
5868 fp_drop(p, fd, fp, 0);
5869 }
5870
5871 return err;
5872 }
5873
5874 void
fileport_releasefg(struct fileglob * fg)5875 fileport_releasefg(struct fileglob *fg)
5876 {
5877 (void)fg_drop(PROC_NULL, fg);
5878 }
5879
5880 /*
5881 * fileport_makefd
5882 *
5883 * Description: Obtain the file descriptor for a given Mach send right.
5884 *
5885 * Returns: 0 Success
5886 * EINVAL Invalid Mach port name, or port is not for a file.
5887 * fdalloc:EMFILE
5888 * fdalloc:ENOMEM Unable to allocate fileproc or extend file table.
5889 *
5890 * Implicit returns:
5891 * *retval (modified) The new descriptor
5892 */
5893 int
fileport_makefd(proc_t p,ipc_port_t port,fileproc_flags_t fp_flags,int * retval)5894 fileport_makefd(proc_t p, ipc_port_t port, fileproc_flags_t fp_flags, int *retval)
5895 {
5896 struct fileglob *fg;
5897 struct fileproc *fp = FILEPROC_NULL;
5898 int fd;
5899 int err;
5900
5901 fg = fileport_port_to_fileglob(port);
5902 if (fg == NULL) {
5903 err = EINVAL;
5904 goto out;
5905 }
5906
5907 fp = fileproc_alloc_init();
5908
5909 proc_fdlock(p);
5910 err = fdalloc(p, 0, &fd);
5911 if (err != 0) {
5912 proc_fdunlock(p);
5913 goto out;
5914 }
5915 if (fp_flags) {
5916 fp->fp_flags |= fp_flags;
5917 }
5918
5919 fp->fp_glob = fg;
5920 fg_ref(p, fg);
5921
5922 procfdtbl_releasefd(p, fd, fp);
5923 proc_fdunlock(p);
5924
5925 *retval = fd;
5926 err = 0;
5927 out:
5928 if ((fp != NULL) && (0 != err)) {
5929 fileproc_free(fp);
5930 }
5931
5932 return err;
5933 }
5934
5935 /*
5936 * sys_fileport_makefd
5937 *
5938 * Description: Obtain the file descriptor for a given Mach send right.
5939 *
5940 * Parameters: p Process calling fileport
5941 * uap->port Name of send right to file port.
5942 *
5943 * Returns: 0 Success
5944 * EINVAL Invalid Mach port name, or port is not for a file.
5945 * fdalloc:EMFILE
5946 * fdalloc:ENOMEM Unable to allocate fileproc or extend file table.
5947 *
5948 * Implicit returns:
5949 * *retval (modified) The new descriptor
5950 */
5951 int
sys_fileport_makefd(proc_t p,struct fileport_makefd_args * uap,int32_t * retval)5952 sys_fileport_makefd(proc_t p, struct fileport_makefd_args *uap, int32_t *retval)
5953 {
5954 ipc_port_t port = IPC_PORT_NULL;
5955 mach_port_name_t send = uap->port;
5956 kern_return_t res;
5957 int err;
5958
5959 res = ipc_object_copyin(get_task_ipcspace(proc_task(p)),
5960 send, MACH_MSG_TYPE_COPY_SEND, &port, 0, NULL, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
5961
5962 if (res == KERN_SUCCESS) {
5963 err = fileport_makefd(p, port, FP_CLOEXEC, retval);
5964 } else {
5965 err = EINVAL;
5966 }
5967
5968 if (IPC_PORT_NULL != port) {
5969 ipc_port_release_send(port);
5970 }
5971
5972 return err;
5973 }
5974
5975
5976 #pragma mark fileops wrappers
5977
5978 /*
5979 * fo_read
5980 *
5981 * Description: Generic fileops read indirected through the fileops pointer
5982 * in the fileproc structure
5983 *
5984 * Parameters: fp fileproc structure pointer
5985 * uio user I/O structure pointer
5986 * flags FOF_ flags
5987 * ctx VFS context for operation
5988 *
5989 * Returns: 0 Success
5990 * !0 Errno from read
5991 */
5992 int
fo_read(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)5993 fo_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
5994 {
5995 return (*fp->f_ops->fo_read)(fp, uio, flags, ctx);
5996 }
5997
5998 int
fo_no_read(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)5999 fo_no_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6000 {
6001 #pragma unused(fp, uio, flags, ctx)
6002 return ENXIO;
6003 }
6004
6005
6006 /*
6007 * fo_write
6008 *
6009 * Description: Generic fileops write indirected through the fileops pointer
6010 * in the fileproc structure
6011 *
6012 * Parameters: fp fileproc structure pointer
6013 * uio user I/O structure pointer
6014 * flags FOF_ flags
6015 * ctx VFS context for operation
6016 *
6017 * Returns: 0 Success
6018 * !0 Errno from write
6019 */
6020 int
fo_write(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)6021 fo_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6022 {
6023 return (*fp->f_ops->fo_write)(fp, uio, flags, ctx);
6024 }
6025
6026 int
fo_no_write(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)6027 fo_no_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6028 {
6029 #pragma unused(fp, uio, flags, ctx)
6030 return ENXIO;
6031 }
6032
6033
6034 /*
6035 * fo_ioctl
6036 *
6037 * Description: Generic fileops ioctl indirected through the fileops pointer
6038 * in the fileproc structure
6039 *
6040 * Parameters: fp fileproc structure pointer
6041 * com ioctl command
6042 * data pointer to internalized copy
6043 * of user space ioctl command
6044 * parameter data in kernel space
6045 * ctx VFS context for operation
6046 *
6047 * Returns: 0 Success
6048 * !0 Errno from ioctl
6049 *
6050 * Locks: The caller is assumed to have held the proc_fdlock; this
6051 * function releases and reacquires this lock. If the caller
6052 * accesses data protected by this lock prior to calling this
6053 * function, it will need to revalidate/reacquire any cached
6054 * protected data obtained prior to the call.
6055 */
6056 int
fo_ioctl(struct fileproc * fp,u_long com,caddr_t data,vfs_context_t ctx)6057 fo_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx)
6058 {
6059 int error;
6060
6061 proc_fdunlock(vfs_context_proc(ctx));
6062 error = (*fp->f_ops->fo_ioctl)(fp, com, data, ctx);
6063 proc_fdlock(vfs_context_proc(ctx));
6064 return error;
6065 }
6066
6067 int
fo_no_ioctl(struct fileproc * fp,u_long com,caddr_t data,vfs_context_t ctx)6068 fo_no_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx)
6069 {
6070 #pragma unused(fp, com, data, ctx)
6071 return ENOTTY;
6072 }
6073
6074
6075 /*
6076 * fo_select
6077 *
6078 * Description: Generic fileops select indirected through the fileops pointer
6079 * in the fileproc structure
6080 *
6081 * Parameters: fp fileproc structure pointer
6082 * which select which
6083 * wql pointer to wait queue list
6084 * ctx VFS context for operation
6085 *
6086 * Returns: 0 Success
6087 * !0 Errno from select
6088 */
6089 int
fo_select(struct fileproc * fp,int which,void * wql,vfs_context_t ctx)6090 fo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
6091 {
6092 return (*fp->f_ops->fo_select)(fp, which, wql, ctx);
6093 }
6094
6095 int
fo_no_select(struct fileproc * fp,int which,void * wql,vfs_context_t ctx)6096 fo_no_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
6097 {
6098 #pragma unused(fp, which, wql, ctx)
6099 return ENOTSUP;
6100 }
6101
6102
6103 /*
6104 * fo_close
6105 *
6106 * Description: Generic fileops close indirected through the fileops pointer
6107 * in the fileproc structure
6108 *
6109 * Parameters: fp fileproc structure pointer for
6110 * file to close
6111 * ctx VFS context for operation
6112 *
6113 * Returns: 0 Success
6114 * !0 Errno from close
6115 */
6116 int
fo_close(struct fileglob * fg,vfs_context_t ctx)6117 fo_close(struct fileglob *fg, vfs_context_t ctx)
6118 {
6119 return (*fg->fg_ops->fo_close)(fg, ctx);
6120 }
6121
6122
6123 /*
6124 * fo_drain
6125 *
6126 * Description: Generic fileops kqueue filter indirected through the fileops
6127 * pointer in the fileproc structure
6128 *
6129 * Parameters: fp fileproc structure pointer
6130 * ctx VFS context for operation
6131 *
6132 * Returns: 0 Success
6133 * !0 errno from drain
6134 */
6135 int
fo_drain(struct fileproc * fp,vfs_context_t ctx)6136 fo_drain(struct fileproc *fp, vfs_context_t ctx)
6137 {
6138 return (*fp->f_ops->fo_drain)(fp, ctx);
6139 }
6140
6141 int
fo_no_drain(struct fileproc * fp,vfs_context_t ctx)6142 fo_no_drain(struct fileproc *fp, vfs_context_t ctx)
6143 {
6144 #pragma unused(fp, ctx)
6145 return ENOTSUP;
6146 }
6147
6148
6149 /*
6150 * fo_kqfilter
6151 *
6152 * Description: Generic fileops kqueue filter indirected through the fileops
6153 * pointer in the fileproc structure
6154 *
6155 * Parameters: fp fileproc structure pointer
6156 * kn pointer to knote to filter on
6157 *
6158 * Returns: (kn->kn_flags & EV_ERROR) error in kn->kn_data
6159 * 0 Filter is not active
6160 * !0 Filter is active
6161 */
6162 int
fo_kqfilter(struct fileproc * fp,struct knote * kn,struct kevent_qos_s * kev)6163 fo_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev)
6164 {
6165 return (*fp->f_ops->fo_kqfilter)(fp, kn, kev);
6166 }
6167
6168 int
fo_no_kqfilter(struct fileproc * fp,struct knote * kn,struct kevent_qos_s * kev)6169 fo_no_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev)
6170 {
6171 #pragma unused(fp, kev)
6172 knote_set_error(kn, ENOTSUP);
6173 return 0;
6174 }
6175