xref: /xnu-11417.121.6/bsd/kern/kern_descrip.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_descrip.c	8.8 (Berkeley) 2/14/95
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/kernel.h>
79 #include <sys/vnode_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/kauth.h>
82 #include <sys/file_internal.h>
83 #include <sys/guarded.h>
84 #include <sys/priv.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/stat.h>
88 #include <sys/ioctl.h>
89 #include <sys/fcntl.h>
90 #include <sys/fsctl.h>
91 #include <sys/malloc.h>
92 #include <sys/mman.h>
93 #include <sys/mount.h>
94 #include <sys/syslog.h>
95 #include <sys/unistd.h>
96 #include <sys/resourcevar.h>
97 #include <sys/aio_kern.h>
98 #include <sys/ev.h>
99 #include <kern/locks.h>
100 #include <sys/uio_internal.h>
101 #include <sys/codesign.h>
102 #include <sys/codedir_internal.h>
103 #include <sys/mount_internal.h>
104 #include <sys/kdebug.h>
105 #include <sys/sysproto.h>
106 #include <sys/pipe.h>
107 #include <sys/spawn.h>
108 #include <sys/cprotect.h>
109 #include <sys/ubc_internal.h>
110 
111 #include <kern/kern_types.h>
112 #include <kern/kalloc.h>
113 #include <kern/waitq.h>
114 #include <kern/ipc_kobject.h>
115 #include <kern/ipc_misc.h>
116 #include <kern/ast.h>
117 
118 #include <vm/vm_protos.h>
119 #include <mach/mach_port.h>
120 
121 #include <security/audit/audit.h>
122 #if CONFIG_MACF
123 #include <security/mac_framework.h>
124 #endif
125 
126 #include <stdbool.h>
127 #include <os/atomic_private.h>
128 #include <os/overflow.h>
129 #include <IOKit/IOBSD.h>
130 
131 void fileport_releasefg(struct fileglob *fg);
132 
133 /* flags for fp_close_and_unlock */
134 #define FD_DUP2RESV 1
135 
136 /* We don't want these exported */
137 
138 __private_extern__
139 int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int);
140 
141 /* Conflict wait queue for when selects collide (opaque type) */
142 extern struct waitq select_conflict_queue;
143 
144 #define f_flag fp_glob->fg_flag
145 #define f_type fp_glob->fg_ops->fo_type
146 #define f_cred fp_glob->fg_cred
147 #define f_ops fp_glob->fg_ops
148 #define f_offset fp_glob->fg_offset
149 
150 ZONE_DEFINE_TYPE(fg_zone, "fileglob", struct fileglob, ZC_ZFREE_CLEARMEM);
151 ZONE_DEFINE_ID(ZONE_ID_FILEPROC, "fileproc", struct fileproc, ZC_ZFREE_CLEARMEM);
152 
153 /*
154  * Descriptor management.
155  */
156 int nfiles;                     /* actual number of open files */
157 /*
158  * "uninitialized" ops -- ensure FILEGLOB_DTYPE(fg) always exists
159  */
160 static const struct fileops uninitops;
161 
162 os_refgrp_decl(, f_refgrp, "files refcounts", NULL);
163 static LCK_GRP_DECLARE(file_lck_grp, "file");
164 
165 
166 #pragma mark fileglobs
167 
168 /*!
169  * @function fg_alloc_init
170  *
171  * @brief
172  * Allocate and minimally initialize a file structure.
173  */
174 struct fileglob *
fg_alloc_init(vfs_context_t ctx)175 fg_alloc_init(vfs_context_t ctx)
176 {
177 	struct fileglob *fg;
178 
179 	fg = zalloc_flags(fg_zone, Z_WAITOK | Z_ZERO);
180 	lck_mtx_init(&fg->fg_lock, &file_lck_grp, LCK_ATTR_NULL);
181 
182 	os_ref_init_raw(&fg->fg_count, &f_refgrp);
183 	fg->fg_ops = &uninitops;
184 
185 	kauth_cred_ref(ctx->vc_ucred);
186 	fg->fg_cred = ctx->vc_ucred;
187 
188 	os_atomic_inc(&nfiles, relaxed);
189 
190 	return fg;
191 }
192 
193 /*!
194  * @function fg_free
195  *
196  * @brief
197  * Free a file structure.
198  */
199 static void
fg_free(struct fileglob * fg)200 fg_free(struct fileglob *fg)
201 {
202 	os_atomic_dec(&nfiles, relaxed);
203 
204 	if (fg->fg_vn_data) {
205 		fg_vn_data_free(fg->fg_vn_data);
206 		fg->fg_vn_data = NULL;
207 	}
208 
209 	kauth_cred_t cred = fg->fg_cred;
210 	if (IS_VALID_CRED(cred)) {
211 		kauth_cred_unref(&cred);
212 		fg->fg_cred = NOCRED;
213 	}
214 	lck_mtx_destroy(&fg->fg_lock, &file_lck_grp);
215 
216 #if CONFIG_MACF && CONFIG_VNGUARD
217 	vng_file_label_destroy(fg);
218 #endif
219 	zfree(fg_zone, fg);
220 }
221 
222 OS_ALWAYS_INLINE
223 void
fg_ref(proc_t p,struct fileglob * fg)224 fg_ref(proc_t p, struct fileglob *fg)
225 {
226 #if DEBUG || DEVELOPMENT
227 	/* Allow fileglob refs to be taken outside of a process context. */
228 	if (p != FG_NOPROC) {
229 		proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
230 	}
231 #else
232 	(void)p;
233 #endif
234 	os_ref_retain_raw(&fg->fg_count, &f_refgrp);
235 }
236 
237 void
fg_drop_live(struct fileglob * fg)238 fg_drop_live(struct fileglob *fg)
239 {
240 	os_ref_release_live_raw(&fg->fg_count, &f_refgrp);
241 }
242 
243 int
fg_drop(proc_t p,struct fileglob * fg)244 fg_drop(proc_t p, struct fileglob *fg)
245 {
246 	struct vnode *vp;
247 	struct vfs_context context;
248 	int error = 0;
249 
250 	if (fg == NULL) {
251 		return 0;
252 	}
253 
254 	/* Set up context with cred stashed in fg */
255 	if (p == current_proc()) {
256 		context.vc_thread = current_thread();
257 	} else {
258 		context.vc_thread = NULL;
259 	}
260 	context.vc_ucred = fg->fg_cred;
261 
262 	/*
263 	 * POSIX record locking dictates that any close releases ALL
264 	 * locks owned by this process.  This is handled by setting
265 	 * a flag in the unlock to free ONLY locks obeying POSIX
266 	 * semantics, and not to free BSD-style file locks.
267 	 * If the descriptor was in a message, POSIX-style locks
268 	 * aren't passed with the descriptor.
269 	 */
270 	if (p != FG_NOPROC && DTYPE_VNODE == FILEGLOB_DTYPE(fg) &&
271 	    (p->p_ladvflag & P_LADVLOCK)) {
272 		struct flock lf = {
273 			.l_whence = SEEK_SET,
274 			.l_type = F_UNLCK,
275 		};
276 
277 		vp = (struct vnode *)fg_get_data(fg);
278 		if ((error = vnode_getwithref(vp)) == 0) {
279 			(void)VNOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_POSIX, &context, NULL);
280 			(void)vnode_put(vp);
281 		}
282 	}
283 
284 	if (os_ref_release_raw(&fg->fg_count, &f_refgrp) == 0) {
285 		/*
286 		 * Since we ensure that fg->fg_ops is always initialized,
287 		 * it is safe to invoke fo_close on the fg
288 		 */
289 		error = fo_close(fg, &context);
290 
291 		fg_free(fg);
292 	}
293 
294 	return error;
295 }
296 
297 inline
298 void
fg_set_data(struct fileglob * fg,void * fg_data)299 fg_set_data(
300 	struct fileglob *fg,
301 	void *fg_data)
302 {
303 	uintptr_t *store = &fg->fg_data;
304 
305 #if __has_feature(ptrauth_calls)
306 	int type = FILEGLOB_DTYPE(fg);
307 
308 	if (fg_data) {
309 		type ^= OS_PTRAUTH_DISCRIMINATOR("fileglob.fg_data");
310 		fg_data = ptrauth_sign_unauthenticated(fg_data,
311 		    ptrauth_key_process_independent_data,
312 		    ptrauth_blend_discriminator(store, type));
313 	}
314 #endif // __has_feature(ptrauth_calls)
315 
316 	*store = (uintptr_t)fg_data;
317 }
318 
319 inline
320 void *
fg_get_data_volatile(struct fileglob * fg)321 fg_get_data_volatile(struct fileglob *fg)
322 {
323 	uintptr_t *store = &fg->fg_data;
324 	void *fg_data = (void *)*store;
325 
326 #if __has_feature(ptrauth_calls)
327 	int type = FILEGLOB_DTYPE(fg);
328 
329 	if (fg_data) {
330 		type ^= OS_PTRAUTH_DISCRIMINATOR("fileglob.fg_data");
331 		fg_data = ptrauth_auth_data(fg_data,
332 		    ptrauth_key_process_independent_data,
333 		    ptrauth_blend_discriminator(store, type));
334 	}
335 #endif // __has_feature(ptrauth_calls)
336 
337 	return fg_data;
338 }
339 
340 static void
fg_transfer_filelocks(proc_t p,struct fileglob * fg,thread_t thread)341 fg_transfer_filelocks(proc_t p, struct fileglob *fg, thread_t thread)
342 {
343 	struct vnode *vp;
344 	struct vfs_context context;
345 	struct proc *old_proc = current_proc();
346 
347 	assert(fg != NULL);
348 
349 	assert(p != old_proc);
350 	context.vc_thread = thread;
351 	context.vc_ucred = fg->fg_cred;
352 
353 	/* Transfer all POSIX Style locks to new proc */
354 	if (p && DTYPE_VNODE == FILEGLOB_DTYPE(fg) &&
355 	    (p->p_ladvflag & P_LADVLOCK)) {
356 		struct flock lf = {
357 			.l_whence = SEEK_SET,
358 			.l_start = 0,
359 			.l_len = 0,
360 			.l_type = F_TRANSFER,
361 		};
362 
363 		vp = (struct vnode *)fg_get_data(fg);
364 		if (vnode_getwithref(vp) == 0) {
365 			(void)VNOP_ADVLOCK(vp, (caddr_t)old_proc, F_TRANSFER, &lf, F_POSIX, &context, NULL);
366 			(void)vnode_put(vp);
367 		}
368 	}
369 
370 	/* Transfer all OFD Style locks to new proc */
371 	if (p && DTYPE_VNODE == FILEGLOB_DTYPE(fg) &&
372 	    (fg->fg_lflags & FG_HAS_OFDLOCK)) {
373 		struct flock lf = {
374 			.l_whence = SEEK_SET,
375 			.l_start = 0,
376 			.l_len = 0,
377 			.l_type = F_TRANSFER,
378 		};
379 
380 		vp = (struct vnode *)fg_get_data(fg);
381 		if (vnode_getwithref(vp) == 0) {
382 			(void)VNOP_ADVLOCK(vp, ofd_to_id(fg), F_TRANSFER, &lf, F_OFD_LOCK, &context, NULL);
383 			(void)vnode_put(vp);
384 		}
385 	}
386 	return;
387 }
388 
389 bool
fg_sendable(struct fileglob * fg)390 fg_sendable(struct fileglob *fg)
391 {
392 	switch (FILEGLOB_DTYPE(fg)) {
393 	case DTYPE_VNODE:
394 	case DTYPE_SOCKET:
395 	case DTYPE_PIPE:
396 	case DTYPE_PSXSHM:
397 	case DTYPE_NETPOLICY:
398 		return (fg->fg_lflags & FG_CONFINED) == 0;
399 
400 	default:
401 		return false;
402 	}
403 }
404 
405 #pragma mark file descriptor table (static helpers)
406 
407 static void
procfdtbl_reservefd(struct proc * p,int fd)408 procfdtbl_reservefd(struct proc * p, int fd)
409 {
410 	p->p_fd.fd_ofiles[fd] = NULL;
411 	p->p_fd.fd_ofileflags[fd] |= UF_RESERVED;
412 }
413 
414 void
procfdtbl_releasefd(struct proc * p,int fd,struct fileproc * fp)415 procfdtbl_releasefd(struct proc * p, int fd, struct fileproc * fp)
416 {
417 	if (fp != NULL) {
418 		p->p_fd.fd_ofiles[fd] = fp;
419 	}
420 	p->p_fd.fd_ofileflags[fd] &= ~UF_RESERVED;
421 	if ((p->p_fd.fd_ofileflags[fd] & UF_RESVWAIT) == UF_RESVWAIT) {
422 		p->p_fd.fd_ofileflags[fd] &= ~UF_RESVWAIT;
423 		wakeup(&p->p_fd);
424 	}
425 }
426 
427 static void
procfdtbl_waitfd(struct proc * p,int fd)428 procfdtbl_waitfd(struct proc * p, int fd)
429 {
430 	p->p_fd.fd_ofileflags[fd] |= UF_RESVWAIT;
431 	msleep(&p->p_fd, &p->p_fd.fd_lock, PRIBIO, "ftbl_waitfd", NULL);
432 }
433 
434 static void
procfdtbl_clearfd(struct proc * p,int fd)435 procfdtbl_clearfd(struct proc * p, int fd)
436 {
437 	int waiting;
438 
439 	waiting = (p->p_fd.fd_ofileflags[fd] & UF_RESVWAIT);
440 	p->p_fd.fd_ofiles[fd] = NULL;
441 	p->p_fd.fd_ofileflags[fd] = 0;
442 	if (waiting == UF_RESVWAIT) {
443 		wakeup(&p->p_fd);
444 	}
445 }
446 
447 /*
448  * fdrelse
449  *
450  * Description:	Inline utility function to free an fd in a filedesc
451  *
452  * Parameters:	fdp				Pointer to filedesc fd lies in
453  *		fd				fd to free
454  *		reserv				fd should be reserved
455  *
456  * Returns:	void
457  *
458  * Locks:	Assumes proc_fdlock for process pointing to fdp is held by
459  *		the caller
460  */
461 void
fdrelse(struct proc * p,int fd)462 fdrelse(struct proc * p, int fd)
463 {
464 	struct filedesc *fdp = &p->p_fd;
465 	int nfd = 0;
466 
467 	if (fd < fdp->fd_freefile) {
468 		fdp->fd_freefile = fd;
469 	}
470 #if DIAGNOSTIC
471 	if (fd >= fdp->fd_afterlast) {
472 		panic("fdrelse: fd_afterlast inconsistent");
473 	}
474 #endif
475 	procfdtbl_clearfd(p, fd);
476 
477 	nfd = fdp->fd_afterlast;
478 	while (nfd > 0 && fdp->fd_ofiles[nfd - 1] == NULL &&
479 	    !(fdp->fd_ofileflags[nfd - 1] & UF_RESERVED)) {
480 		nfd--;
481 	}
482 	fdp->fd_afterlast = nfd;
483 
484 #if CONFIG_PROC_RESOURCE_LIMITS
485 	fdp->fd_nfiles_open--;
486 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
487 }
488 
489 
490 /*
491  * finishdup
492  *
493  * Description:	Common code for dup, dup2, and fcntl(F_DUPFD).
494  *
495  * Parameters:	p				Process performing the dup
496  *		old				The fd to dup
497  *		new				The fd to dup it to
498  *		fp_flags			Flags to augment the new fp
499  *		retval				Pointer to the call return area
500  *
501  * Returns:	0				Success
502  *		EBADF
503  *		ENOMEM
504  *
505  * Implicit returns:
506  *		*retval (modified)		The new descriptor
507  *
508  * Locks:	Assumes proc_fdlock for process pointing to fdp is held by
509  *		the caller
510  *
511  * Notes:	This function may drop and reacquire this lock; it is unsafe
512  *		for a caller to assume that other state protected by the lock
513  *		has not been subsequently changed out from under it.
514  */
515 static int
finishdup(proc_t p,kauth_cred_t p_cred,int old,int new,fileproc_flags_t fp_flags,int32_t * retval)516 finishdup(
517 	proc_t                  p,
518 	kauth_cred_t            p_cred,
519 	int                     old,
520 	int                     new,
521 	fileproc_flags_t        fp_flags,
522 	int32_t                *retval)
523 {
524 	struct filedesc *fdp = &p->p_fd;
525 	struct fileproc *nfp;
526 	struct fileproc *ofp;
527 #if CONFIG_MACF
528 	int error;
529 #endif
530 
531 #if DIAGNOSTIC
532 	proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
533 #endif
534 	if ((ofp = fdp->fd_ofiles[old]) == NULL ||
535 	    (fdp->fd_ofileflags[old] & UF_RESERVED)) {
536 		fdrelse(p, new);
537 		return EBADF;
538 	}
539 
540 #if CONFIG_MACF
541 	error = mac_file_check_dup(p_cred, ofp->fp_glob, new);
542 
543 	if (error) {
544 		fdrelse(p, new);
545 		return error;
546 	}
547 #else
548 	(void)p_cred;
549 #endif
550 
551 	fg_ref(p, ofp->fp_glob);
552 
553 	proc_fdunlock(p);
554 
555 	nfp = fileproc_alloc_init();
556 
557 	if (fp_flags) {
558 		nfp->fp_flags |= fp_flags;
559 	}
560 	nfp->fp_glob = ofp->fp_glob;
561 
562 	proc_fdlock(p);
563 
564 #if DIAGNOSTIC
565 	if (fdp->fd_ofiles[new] != 0) {
566 		panic("finishdup: overwriting fd_ofiles with new %d", new);
567 	}
568 	if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0) {
569 		panic("finishdup: unreserved fileflags with new %d", new);
570 	}
571 #endif
572 
573 	if (new >= fdp->fd_afterlast) {
574 		fdp->fd_afterlast = new + 1;
575 	}
576 	procfdtbl_releasefd(p, new, nfp);
577 	*retval = new;
578 	return 0;
579 }
580 
581 
582 #pragma mark file descriptor table (exported functions)
583 
584 void
proc_dirs_lock_shared(proc_t p)585 proc_dirs_lock_shared(proc_t p)
586 {
587 	lck_rw_lock_shared(&p->p_fd.fd_dirs_lock);
588 }
589 
590 void
proc_dirs_unlock_shared(proc_t p)591 proc_dirs_unlock_shared(proc_t p)
592 {
593 	lck_rw_unlock_shared(&p->p_fd.fd_dirs_lock);
594 }
595 
596 void
proc_dirs_lock_exclusive(proc_t p)597 proc_dirs_lock_exclusive(proc_t p)
598 {
599 	lck_rw_lock_exclusive(&p->p_fd.fd_dirs_lock);
600 }
601 
602 void
proc_dirs_unlock_exclusive(proc_t p)603 proc_dirs_unlock_exclusive(proc_t p)
604 {
605 	lck_rw_unlock_exclusive(&p->p_fd.fd_dirs_lock);
606 }
607 
608 /*
609  * proc_fdlock, proc_fdlock_spin
610  *
611  * Description:	Lock to control access to the per process struct fileproc
612  *		and struct filedesc
613  *
614  * Parameters:	p				Process to take the lock on
615  *
616  * Returns:	void
617  *
618  * Notes:	The lock is initialized in forkproc() and destroyed in
619  *		reap_child_process().
620  */
621 void
proc_fdlock(proc_t p)622 proc_fdlock(proc_t p)
623 {
624 	lck_mtx_lock(&p->p_fd.fd_lock);
625 }
626 
627 void
proc_fdlock_spin(proc_t p)628 proc_fdlock_spin(proc_t p)
629 {
630 	lck_mtx_lock_spin(&p->p_fd.fd_lock);
631 }
632 
633 void
proc_fdlock_assert(proc_t p,int assertflags)634 proc_fdlock_assert(proc_t p, int assertflags)
635 {
636 	lck_mtx_assert(&p->p_fd.fd_lock, assertflags);
637 }
638 
639 
640 /*
641  * proc_fdunlock
642  *
643  * Description:	Unlock the lock previously locked by a call to proc_fdlock()
644  *
645  * Parameters:	p				Process to drop the lock on
646  *
647  * Returns:	void
648  */
649 void
proc_fdunlock(proc_t p)650 proc_fdunlock(proc_t p)
651 {
652 	lck_mtx_unlock(&p->p_fd.fd_lock);
653 }
654 
655 bool
fdt_available_locked(proc_t p,int n)656 fdt_available_locked(proc_t p, int n)
657 {
658 	struct filedesc *fdp = &p->p_fd;
659 	struct fileproc **fpp;
660 	char *flags;
661 	int i;
662 	int lim = proc_limitgetcur_nofile(p);
663 
664 	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
665 		return true;
666 	}
667 	fpp = &fdp->fd_ofiles[fdp->fd_freefile];
668 	flags = &fdp->fd_ofileflags[fdp->fd_freefile];
669 	for (i = fdp->fd_nfiles - fdp->fd_freefile; --i >= 0; fpp++, flags++) {
670 		if (*fpp == NULL && !(*flags & UF_RESERVED) && --n <= 0) {
671 			return true;
672 		}
673 	}
674 	return false;
675 }
676 
677 
678 struct fdt_iterator
fdt_next(proc_t p,int fd,bool only_settled)679 fdt_next(proc_t p, int fd, bool only_settled)
680 {
681 	struct fdt_iterator it;
682 	struct filedesc *fdp = &p->p_fd;
683 	struct fileproc *fp;
684 	int nfds = fdp->fd_afterlast;
685 
686 	while (++fd < nfds) {
687 		fp = fdp->fd_ofiles[fd];
688 		if (fp == NULL || fp->fp_glob == NULL) {
689 			continue;
690 		}
691 		if (only_settled && (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
692 			continue;
693 		}
694 		it.fdti_fd = fd;
695 		it.fdti_fp = fp;
696 		return it;
697 	}
698 
699 	it.fdti_fd = nfds;
700 	it.fdti_fp = NULL;
701 	return it;
702 }
703 
704 struct fdt_iterator
fdt_prev(proc_t p,int fd,bool only_settled)705 fdt_prev(proc_t p, int fd, bool only_settled)
706 {
707 	struct fdt_iterator it;
708 	struct filedesc *fdp = &p->p_fd;
709 	struct fileproc *fp;
710 
711 	while (--fd >= 0) {
712 		fp = fdp->fd_ofiles[fd];
713 		if (fp == NULL || fp->fp_glob == NULL) {
714 			continue;
715 		}
716 		if (only_settled && (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
717 			continue;
718 		}
719 		it.fdti_fd = fd;
720 		it.fdti_fp = fp;
721 		return it;
722 	}
723 
724 	it.fdti_fd = -1;
725 	it.fdti_fp = NULL;
726 	return it;
727 }
728 
729 void
fdt_init(proc_t p)730 fdt_init(proc_t p)
731 {
732 	struct filedesc *fdp = &p->p_fd;
733 
734 	lck_mtx_init(&fdp->fd_kqhashlock, &proc_kqhashlock_grp, &proc_lck_attr);
735 	lck_mtx_init(&fdp->fd_knhashlock, &proc_knhashlock_grp, &proc_lck_attr);
736 	lck_mtx_init(&fdp->fd_lock, &proc_fdmlock_grp, &proc_lck_attr);
737 	lck_rw_init(&fdp->fd_dirs_lock, &proc_dirslock_grp, &proc_lck_attr);
738 }
739 
740 void
fdt_destroy(proc_t p)741 fdt_destroy(proc_t p)
742 {
743 	struct filedesc *fdp = &p->p_fd;
744 
745 	lck_mtx_destroy(&fdp->fd_kqhashlock, &proc_kqhashlock_grp);
746 	lck_mtx_destroy(&fdp->fd_knhashlock, &proc_knhashlock_grp);
747 	lck_mtx_destroy(&fdp->fd_lock, &proc_fdmlock_grp);
748 	lck_rw_destroy(&fdp->fd_dirs_lock, &proc_dirslock_grp);
749 }
750 
751 void
fdt_exec(proc_t p,kauth_cred_t p_cred,short posix_spawn_flags,thread_t thread,bool in_exec)752 fdt_exec(proc_t p, kauth_cred_t p_cred, short posix_spawn_flags, thread_t thread, bool in_exec)
753 {
754 	struct filedesc *fdp = &p->p_fd;
755 	thread_t self = current_thread();
756 	struct uthread *ut = get_bsdthread_info(self);
757 	struct kqworkq *dealloc_kqwq = NULL;
758 
759 	/*
760 	 * If the current thread is bound as a workq/workloop
761 	 * servicing thread, we need to unbind it first.
762 	 */
763 	if (ut->uu_kqr_bound && get_bsdthreadtask_info(self) == p) {
764 		kqueue_threadreq_unbind(p, ut->uu_kqr_bound);
765 	}
766 
767 	/*
768 	 * Deallocate the knotes for this process
769 	 * and mark the tables non-existent so
770 	 * subsequent kqueue closes go faster.
771 	 */
772 	knotes_dealloc(p);
773 	assert(fdp->fd_knlistsize == 0);
774 	assert(fdp->fd_knhashmask == 0);
775 
776 	proc_fdlock(p);
777 
778 	/* Set the P_LADVLOCK flag if the flag set on old proc */
779 	if (in_exec && (current_proc()->p_ladvflag & P_LADVLOCK)) {
780 		os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed);
781 	}
782 
783 	for (int i = fdp->fd_afterlast; i-- > 0;) {
784 		struct fileproc *fp = fdp->fd_ofiles[i];
785 		char *flagp = &fdp->fd_ofileflags[i];
786 		bool inherit_file = true;
787 
788 		if (fp == FILEPROC_NULL) {
789 			continue;
790 		}
791 
792 		/*
793 		 * no file descriptor should be in flux when in exec,
794 		 * because we stopped all other threads
795 		 */
796 		if (*flagp & ~UF_INHERIT) {
797 			panic("file %d/%p in flux during exec of %p", i, fp, p);
798 		}
799 
800 		if (fp->fp_flags & FP_CLOEXEC) {
801 			inherit_file = false;
802 		} else if ((posix_spawn_flags & POSIX_SPAWN_CLOEXEC_DEFAULT) &&
803 		    !(*flagp & UF_INHERIT)) {
804 			/*
805 			 * Reverse the usual semantics of file descriptor
806 			 * inheritance - all of them should be closed
807 			 * except files marked explicitly as "inherit" and
808 			 * not marked close-on-exec.
809 			 */
810 			inherit_file = false;
811 #if CONFIG_MACF
812 		} else if (mac_file_check_inherit(p_cred, fp->fp_glob)) {
813 			inherit_file = false;
814 #endif
815 		}
816 
817 		*flagp = 0; /* clear UF_INHERIT */
818 
819 		if (!inherit_file) {
820 			fp_close_and_unlock(p, p_cred, i, fp, 0);
821 			proc_fdlock(p);
822 		} else if (in_exec) {
823 			/* Transfer F_POSIX style lock to new proc */
824 			proc_fdunlock(p);
825 			fg_transfer_filelocks(p, fp->fp_glob, thread);
826 			proc_fdlock(p);
827 		}
828 	}
829 
830 	/* release the per-process workq kq */
831 	if (fdp->fd_wqkqueue) {
832 		dealloc_kqwq = fdp->fd_wqkqueue;
833 		fdp->fd_wqkqueue = NULL;
834 	}
835 
836 	proc_fdunlock(p);
837 
838 	/* Anything to free? */
839 	if (dealloc_kqwq) {
840 		kqworkq_dealloc(dealloc_kqwq);
841 	}
842 }
843 
844 
845 int
fdt_fork(struct filedesc * newfdp,proc_t p,vnode_t uth_cdir,bool in_exec)846 fdt_fork(struct filedesc *newfdp, proc_t p, vnode_t uth_cdir, bool in_exec)
847 {
848 	struct filedesc *fdp = &p->p_fd;
849 	struct fileproc **ofiles;
850 	char *ofileflags;
851 	int n_files, afterlast, freefile;
852 	vnode_t v_dir;
853 #if CONFIG_PROC_RESOURCE_LIMITS
854 	int fd_nfiles_open = 0;
855 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
856 	proc_fdlock(p);
857 
858 	newfdp->fd_flags = (fdp->fd_flags & FILEDESC_FORK_INHERITED_MASK);
859 	newfdp->fd_cmask = fdp->fd_cmask;
860 #if CONFIG_PROC_RESOURCE_LIMITS
861 	newfdp->fd_nfiles_soft_limit = fdp->fd_nfiles_soft_limit;
862 	newfdp->fd_nfiles_hard_limit = fdp->fd_nfiles_hard_limit;
863 
864 	newfdp->kqwl_dyn_soft_limit = fdp->kqwl_dyn_soft_limit;
865 	newfdp->kqwl_dyn_hard_limit = fdp->kqwl_dyn_hard_limit;
866 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
867 
868 	/*
869 	 * For both fd_cdir and fd_rdir make sure we get
870 	 * a valid reference... if we can't, than set
871 	 * set the pointer(s) to NULL in the child... this
872 	 * will keep us from using a non-referenced vp
873 	 * and allows us to do the vnode_rele only on
874 	 * a properly referenced vp
875 	 */
876 	if ((v_dir = fdp->fd_rdir)) {
877 		if (vnode_getwithref(v_dir) == 0) {
878 			if (vnode_ref(v_dir) == 0) {
879 				newfdp->fd_rdir = v_dir;
880 			}
881 			vnode_put(v_dir);
882 		}
883 		if (newfdp->fd_rdir == NULL) {
884 			/*
885 			 * We couldn't get a new reference on
886 			 * the chroot directory being
887 			 * inherited... this is fatal, since
888 			 * otherwise it would constitute an
889 			 * escape from a chroot environment by
890 			 * the new process.
891 			 */
892 			proc_fdunlock(p);
893 			return EPERM;
894 		}
895 	}
896 
897 	/*
898 	 * If we are running with per-thread current working directories,
899 	 * inherit the new current working directory from the current thread.
900 	 */
901 	if ((v_dir = uth_cdir ? uth_cdir : fdp->fd_cdir)) {
902 		if (vnode_getwithref(v_dir) == 0) {
903 			if (vnode_ref(v_dir) == 0) {
904 				newfdp->fd_cdir = v_dir;
905 			}
906 			vnode_put(v_dir);
907 		}
908 		if (newfdp->fd_cdir == NULL && v_dir == fdp->fd_cdir) {
909 			/*
910 			 * we couldn't get a new reference on
911 			 * the current working directory being
912 			 * inherited... we might as well drop
913 			 * our reference from the parent also
914 			 * since the vnode has gone DEAD making
915 			 * it useless... by dropping it we'll
916 			 * be that much closer to recycling it
917 			 */
918 			vnode_rele(fdp->fd_cdir);
919 			fdp->fd_cdir = NULL;
920 		}
921 	}
922 
923 	/*
924 	 * If the number of open files fits in the internal arrays
925 	 * of the open file structure, use them, otherwise allocate
926 	 * additional memory for the number of descriptors currently
927 	 * in use.
928 	 */
929 	afterlast = fdp->fd_afterlast;
930 	freefile = fdp->fd_freefile;
931 	if (afterlast <= NDFILE) {
932 		n_files = NDFILE;
933 	} else {
934 		n_files = roundup(afterlast, NDEXTENT);
935 	}
936 
937 	proc_fdunlock(p);
938 
939 	ofiles = kalloc_type(struct fileproc *, n_files, Z_WAITOK | Z_ZERO);
940 	ofileflags = kalloc_data(n_files, Z_WAITOK | Z_ZERO);
941 	if (ofiles == NULL || ofileflags == NULL) {
942 		kfree_type(struct fileproc *, n_files, ofiles);
943 		kfree_data(ofileflags, n_files);
944 		if (newfdp->fd_cdir) {
945 			vnode_rele(newfdp->fd_cdir);
946 			newfdp->fd_cdir = NULL;
947 		}
948 		if (newfdp->fd_rdir) {
949 			vnode_rele(newfdp->fd_rdir);
950 			newfdp->fd_rdir = NULL;
951 		}
952 		return ENOMEM;
953 	}
954 
955 	proc_fdlock(p);
956 
957 	for (int i = afterlast; i-- > 0;) {
958 		struct fileproc *ofp, *nfp;
959 		char flags;
960 
961 		ofp = fdp->fd_ofiles[i];
962 		flags = fdp->fd_ofileflags[i];
963 
964 		if (ofp == NULL ||
965 		    (ofp->fp_glob->fg_lflags & FG_CONFINED) ||
966 		    ((ofp->fp_flags & FP_CLOFORK) && !in_exec) ||
967 		    ((ofp->fp_flags & FP_CLOEXEC) && in_exec) ||
968 		    (flags & UF_RESERVED)) {
969 			if (i + 1 == afterlast) {
970 				afterlast = i;
971 			}
972 			if (i < freefile) {
973 				freefile = i;
974 			}
975 
976 			continue;
977 		}
978 
979 		nfp = fileproc_alloc_init();
980 		nfp->fp_glob = ofp->fp_glob;
981 		if (in_exec) {
982 			nfp->fp_flags = (ofp->fp_flags & (FP_CLOEXEC | FP_CLOFORK));
983 			if (ofp->fp_guard_attrs) {
984 				guarded_fileproc_copy_guard(ofp, nfp);
985 			}
986 		} else {
987 			assert(ofp->fp_guard_attrs == 0);
988 			nfp->fp_flags = (ofp->fp_flags & FP_CLOEXEC);
989 		}
990 		fg_ref(p, nfp->fp_glob);
991 
992 		ofiles[i] = nfp;
993 #if CONFIG_PROC_RESOURCE_LIMITS
994 		fd_nfiles_open++;
995 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
996 	}
997 
998 	proc_fdunlock(p);
999 
1000 	newfdp->fd_ofiles = ofiles;
1001 	newfdp->fd_ofileflags = ofileflags;
1002 	newfdp->fd_nfiles = n_files;
1003 	newfdp->fd_afterlast = afterlast;
1004 	newfdp->fd_freefile = freefile;
1005 
1006 #if CONFIG_PROC_RESOURCE_LIMITS
1007 	newfdp->fd_nfiles_open = fd_nfiles_open;
1008 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1009 
1010 	return 0;
1011 }
1012 
1013 void
fdt_invalidate(proc_t p)1014 fdt_invalidate(proc_t p)
1015 {
1016 	struct filedesc *fdp = &p->p_fd;
1017 	struct fileproc *fp, **ofiles;
1018 	kauth_cred_t p_cred;
1019 	char *ofileflags;
1020 	struct kqworkq *kqwq = NULL;
1021 	vnode_t vn1 = NULL, vn2 = NULL;
1022 	struct kqwllist *kqhash = NULL;
1023 	u_long kqhashmask = 0;
1024 	int n_files = 0;
1025 
1026 	/*
1027 	 * deallocate all the knotes up front and claim empty
1028 	 * tables to make any subsequent kqueue closes faster.
1029 	 */
1030 	knotes_dealloc(p);
1031 	assert(fdp->fd_knlistsize == 0);
1032 	assert(fdp->fd_knhashmask == 0);
1033 
1034 	/*
1035 	 * dealloc all workloops that have outstanding retains
1036 	 * when created with scheduling parameters.
1037 	 */
1038 	kqworkloops_dealloc(p);
1039 
1040 	proc_fdlock(p);
1041 
1042 	/* proc_ucred_unsafe() is ok: process is terminating */
1043 	p_cred = proc_ucred_unsafe(p);
1044 
1045 	/* close file descriptors */
1046 	if (fdp->fd_nfiles > 0 && fdp->fd_ofiles) {
1047 		for (int i = fdp->fd_afterlast; i-- > 0;) {
1048 			if ((fp = fdp->fd_ofiles[i]) != NULL) {
1049 				if (fdp->fd_ofileflags[i] & UF_RESERVED) {
1050 					panic("fdfree: found fp with UF_RESERVED");
1051 				}
1052 				/* proc_ucred_unsafe() is ok: process is terminating */
1053 				fp_close_and_unlock(p, p_cred, i, fp, 0);
1054 				proc_fdlock(p);
1055 			}
1056 		}
1057 	}
1058 
1059 	n_files = fdp->fd_nfiles;
1060 	ofileflags = fdp->fd_ofileflags;
1061 	ofiles = fdp->fd_ofiles;
1062 	kqwq = fdp->fd_wqkqueue;
1063 	vn1 = fdp->fd_cdir;
1064 	vn2 = fdp->fd_rdir;
1065 
1066 	fdp->fd_ofileflags = NULL;
1067 	fdp->fd_ofiles = NULL;
1068 	fdp->fd_nfiles = 0;
1069 	fdp->fd_wqkqueue = NULL;
1070 	fdp->fd_cdir = NULL;
1071 	fdp->fd_rdir = NULL;
1072 
1073 	proc_fdunlock(p);
1074 
1075 	lck_mtx_lock(&fdp->fd_kqhashlock);
1076 
1077 	kqhash = fdp->fd_kqhash;
1078 	kqhashmask = fdp->fd_kqhashmask;
1079 
1080 	fdp->fd_kqhash = 0;
1081 	fdp->fd_kqhashmask = 0;
1082 
1083 	lck_mtx_unlock(&fdp->fd_kqhashlock);
1084 
1085 	kfree_type(struct fileproc *, n_files, ofiles);
1086 	kfree_data(ofileflags, n_files);
1087 
1088 	if (kqwq) {
1089 		kqworkq_dealloc(kqwq);
1090 	}
1091 	if (vn1) {
1092 		vnode_rele(vn1);
1093 	}
1094 	if (vn2) {
1095 		vnode_rele(vn2);
1096 	}
1097 	if (kqhash) {
1098 		for (uint32_t i = 0; i <= kqhashmask; i++) {
1099 			assert(LIST_EMPTY(&kqhash[i]));
1100 		}
1101 		hashdestroy(kqhash, M_KQUEUE, kqhashmask);
1102 	}
1103 }
1104 
1105 
1106 struct fileproc *
fileproc_alloc_init(void)1107 fileproc_alloc_init(void)
1108 {
1109 	struct fileproc *fp;
1110 
1111 	fp = zalloc_id(ZONE_ID_FILEPROC, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1112 	os_ref_init(&fp->fp_iocount, &f_refgrp);
1113 	return fp;
1114 }
1115 
1116 
1117 void
fileproc_free(struct fileproc * fp)1118 fileproc_free(struct fileproc *fp)
1119 {
1120 	os_ref_release_last(&fp->fp_iocount);
1121 	if (fp->fp_guard_attrs) {
1122 		guarded_fileproc_unguard(fp);
1123 	}
1124 	assert(fp->fp_wset == NULL);
1125 	zfree_id(ZONE_ID_FILEPROC, fp);
1126 }
1127 
1128 
1129 /*
1130  * Statistics counter for the number of times a process calling fdalloc()
1131  * has resulted in an expansion of the per process open file table.
1132  *
1133  * XXX This would likely be of more use if it were per process
1134  */
1135 int fdexpand;
1136 
1137 #if CONFIG_PROC_RESOURCE_LIMITS
1138 /*
1139  * Should be called only with the proc_fdlock held.
1140  */
1141 void
fd_check_limit_exceeded(struct filedesc * fdp)1142 fd_check_limit_exceeded(struct filedesc *fdp)
1143 {
1144 #if DIAGNOSTIC
1145 	proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
1146 #endif
1147 
1148 	if (!fd_above_soft_limit_notified(fdp) && fdp->fd_nfiles_soft_limit &&
1149 	    (fdp->fd_nfiles_open > fdp->fd_nfiles_soft_limit)) {
1150 		fd_above_soft_limit_send_notification(fdp);
1151 		act_set_astproc_resource(current_thread());
1152 	} else if (!fd_above_hard_limit_notified(fdp) && fdp->fd_nfiles_hard_limit &&
1153 	    (fdp->fd_nfiles_open > fdp->fd_nfiles_hard_limit)) {
1154 		fd_above_hard_limit_send_notification(fdp);
1155 		act_set_astproc_resource(current_thread());
1156 	}
1157 }
1158 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1159 
1160 /*
1161  * fdalloc
1162  *
1163  * Description:	Allocate a file descriptor for the process.
1164  *
1165  * Parameters:	p				Process to allocate the fd in
1166  *		want				The fd we would prefer to get
1167  *		result				Pointer to fd we got
1168  *
1169  * Returns:	0				Success
1170  *		EMFILE
1171  *		ENOMEM
1172  *
1173  * Implicit returns:
1174  *		*result (modified)		The fd which was allocated
1175  */
1176 int
fdalloc(proc_t p,int want,int * result)1177 fdalloc(proc_t p, int want, int *result)
1178 {
1179 	struct filedesc *fdp = &p->p_fd;
1180 	int i;
1181 	int last, numfiles, oldnfiles;
1182 	struct fileproc **newofiles;
1183 	char *newofileflags;
1184 	int lim = proc_limitgetcur_nofile(p);
1185 
1186 	/*
1187 	 * Search for a free descriptor starting at the higher
1188 	 * of want or fd_freefile.  If that fails, consider
1189 	 * expanding the ofile array.
1190 	 */
1191 #if DIAGNOSTIC
1192 	proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
1193 #endif
1194 
1195 	for (;;) {
1196 		last = (int)MIN((unsigned int)fdp->fd_nfiles, (unsigned int)lim);
1197 		if ((i = want) < fdp->fd_freefile) {
1198 			i = fdp->fd_freefile;
1199 		}
1200 		for (; i < last; i++) {
1201 			if (fdp->fd_ofiles[i] == NULL && !(fdp->fd_ofileflags[i] & UF_RESERVED)) {
1202 				procfdtbl_reservefd(p, i);
1203 				if (i >= fdp->fd_afterlast) {
1204 					fdp->fd_afterlast = i + 1;
1205 				}
1206 				if (want <= fdp->fd_freefile) {
1207 					fdp->fd_freefile = i;
1208 				}
1209 				*result = i;
1210 #if CONFIG_PROC_RESOURCE_LIMITS
1211 				fdp->fd_nfiles_open++;
1212 				fd_check_limit_exceeded(fdp);
1213 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1214 				return 0;
1215 			}
1216 		}
1217 
1218 		/*
1219 		 * No space in current array.  Expand?
1220 		 */
1221 		if ((rlim_t)fdp->fd_nfiles >= lim) {
1222 			return EMFILE;
1223 		}
1224 		if (fdp->fd_nfiles < NDEXTENT) {
1225 			numfiles = NDEXTENT;
1226 		} else {
1227 			numfiles = 2 * fdp->fd_nfiles;
1228 		}
1229 		/* Enforce lim */
1230 		if ((rlim_t)numfiles > lim) {
1231 			numfiles = (int)lim;
1232 		}
1233 		proc_fdunlock(p);
1234 		newofiles = kalloc_type(struct fileproc *, numfiles, Z_WAITOK | Z_ZERO);
1235 		newofileflags = kalloc_data(numfiles, Z_WAITOK | Z_ZERO);
1236 		proc_fdlock(p);
1237 		if (newofileflags == NULL || newofiles == NULL) {
1238 			kfree_type(struct fileproc *, numfiles, newofiles);
1239 			kfree_data(newofileflags, numfiles);
1240 			return ENOMEM;
1241 		}
1242 		if (fdp->fd_nfiles >= numfiles) {
1243 			kfree_type(struct fileproc *, numfiles, newofiles);
1244 			kfree_data(newofileflags, numfiles);
1245 			continue;
1246 		}
1247 
1248 		/*
1249 		 * Copy the existing ofile and ofileflags arrays
1250 		 * and zero the new portion of each array.
1251 		 */
1252 		oldnfiles = fdp->fd_nfiles;
1253 		memcpy(newofiles, fdp->fd_ofiles,
1254 		    oldnfiles * sizeof(*fdp->fd_ofiles));
1255 		memcpy(newofileflags, fdp->fd_ofileflags, oldnfiles);
1256 
1257 		kfree_type(struct fileproc *, oldnfiles, fdp->fd_ofiles);
1258 		kfree_data(fdp->fd_ofileflags, oldnfiles);
1259 		fdp->fd_ofiles = newofiles;
1260 		fdp->fd_ofileflags = newofileflags;
1261 		fdp->fd_nfiles = numfiles;
1262 		fdexpand++;
1263 	}
1264 }
1265 
1266 
1267 #pragma mark fileprocs
1268 
1269 void
fileproc_modify_vflags(struct fileproc * fp,fileproc_vflags_t vflags,boolean_t clearflags)1270 fileproc_modify_vflags(struct fileproc *fp, fileproc_vflags_t vflags, boolean_t clearflags)
1271 {
1272 	if (clearflags) {
1273 		os_atomic_andnot(&fp->fp_vflags, vflags, relaxed);
1274 	} else {
1275 		os_atomic_or(&fp->fp_vflags, vflags, relaxed);
1276 	}
1277 }
1278 
1279 fileproc_vflags_t
fileproc_get_vflags(struct fileproc * fp)1280 fileproc_get_vflags(struct fileproc *fp)
1281 {
1282 	return os_atomic_load(&fp->fp_vflags, relaxed);
1283 }
1284 
1285 /*
1286  * falloc_withinit
1287  *
1288  * Create a new open file structure and allocate
1289  * a file descriptor for the process that refers to it.
1290  *
1291  * Returns:	0			Success
1292  *
1293  * Description:	Allocate an entry in the per process open file table and
1294  *		return the corresponding fileproc and fd.
1295  *
1296  * Parameters:	p				The process in whose open file
1297  *						table the fd is to be allocated
1298  *		resultfp			Pointer to fileproc pointer
1299  *						return area
1300  *		resultfd			Pointer to fd return area
1301  *		ctx				VFS context
1302  *		fp_zalloc			fileproc allocator to use
1303  *		crarg				allocator args
1304  *
1305  * Returns:	0				Success
1306  *		ENFILE				Too many open files in system
1307  *		fdalloc:EMFILE			Too many open files in process
1308  *		fdalloc:ENOMEM			M_OFILETABL zone exhausted
1309  *		ENOMEM				fp_zone or fg_zone zone
1310  *						exhausted
1311  *
1312  * Implicit returns:
1313  *		*resultfd (modified)		Returned fileproc pointer
1314  *		*resultfd (modified)		Returned fd
1315  *
1316  * Notes:	This function takes separate process and context arguments
1317  *		solely to support kern_exec.c; otherwise, it would take
1318  *		neither, and use the vfs_context_current() routine internally.
1319  */
1320 int
falloc_withinit(proc_t p,struct ucred * p_cred,struct vfs_context * ctx,struct fileproc ** resultfp,int * resultfd,fp_initfn_t fp_init,void * initarg)1321 falloc_withinit(
1322 	proc_t                  p,
1323 	struct ucred           *p_cred,
1324 	struct vfs_context     *ctx,
1325 	struct fileproc       **resultfp,
1326 	int                    *resultfd,
1327 	fp_initfn_t             fp_init,
1328 	void                   *initarg)
1329 {
1330 	struct fileproc *fp;
1331 	struct fileglob *fg;
1332 	int error, nfd;
1333 
1334 	/* Make sure we don't go beyond the system-wide limit */
1335 	if (nfiles >= maxfiles) {
1336 		tablefull("file");
1337 		return ENFILE;
1338 	}
1339 
1340 	proc_fdlock(p);
1341 
1342 	/* fdalloc will make sure the process stays below per-process limit */
1343 	if ((error = fdalloc(p, 0, &nfd))) {
1344 		proc_fdunlock(p);
1345 		return error;
1346 	}
1347 
1348 #if CONFIG_MACF
1349 	error = mac_file_check_create(p_cred);
1350 	if (error) {
1351 		proc_fdunlock(p);
1352 		return error;
1353 	}
1354 #else
1355 	(void)p_cred;
1356 #endif
1357 
1358 	/*
1359 	 * Allocate a new file descriptor.
1360 	 * If the process has file descriptor zero open, add to the list
1361 	 * of open files at that point, otherwise put it at the front of
1362 	 * the list of open files.
1363 	 */
1364 	proc_fdunlock(p);
1365 
1366 	fp = fileproc_alloc_init();
1367 	if (fp_init) {
1368 		fp_init(fp, initarg);
1369 	}
1370 
1371 	fg = fg_alloc_init(ctx);
1372 
1373 	os_ref_retain_locked(&fp->fp_iocount);
1374 	fp->fp_glob = fg;
1375 
1376 	proc_fdlock(p);
1377 
1378 	p->p_fd.fd_ofiles[nfd] = fp;
1379 
1380 	proc_fdunlock(p);
1381 
1382 	if (resultfp) {
1383 		*resultfp = fp;
1384 	}
1385 	if (resultfd) {
1386 		*resultfd = nfd;
1387 	}
1388 
1389 	return 0;
1390 }
1391 
1392 /*
1393  * fp_free
1394  *
1395  * Description:	Release the fd and free the fileproc associated with the fd
1396  *		in the per process open file table of the specified process;
1397  *		these values must correspond.
1398  *
1399  * Parameters:	p				Process containing fd
1400  *		fd				fd to be released
1401  *		fp				fileproc to be freed
1402  */
1403 void
fp_free(proc_t p,int fd,struct fileproc * fp)1404 fp_free(proc_t p, int fd, struct fileproc * fp)
1405 {
1406 	proc_fdlock_spin(p);
1407 	fdrelse(p, fd);
1408 	proc_fdunlock(p);
1409 
1410 	fg_free(fp->fp_glob);
1411 	os_ref_release_live(&fp->fp_iocount);
1412 	fileproc_free(fp);
1413 }
1414 
1415 
1416 struct fileproc *
fp_get_noref_locked(proc_t p,int fd)1417 fp_get_noref_locked(proc_t p, int fd)
1418 {
1419 	struct filedesc *fdp = &p->p_fd;
1420 	struct fileproc *fp;
1421 
1422 	if (fd < 0 || fd >= fdp->fd_nfiles ||
1423 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
1424 	    (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1425 		return NULL;
1426 	}
1427 
1428 	zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1429 	return fp;
1430 }
1431 
1432 struct fileproc *
fp_get_noref_locked_with_iocount(proc_t p,int fd)1433 fp_get_noref_locked_with_iocount(proc_t p, int fd)
1434 {
1435 	struct filedesc *fdp = &p->p_fd;
1436 	struct fileproc *fp = NULL;
1437 
1438 	if (fd < 0 || fd >= fdp->fd_nfiles ||
1439 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
1440 	    os_ref_get_count(&fp->fp_iocount) <= 1 ||
1441 	    ((fdp->fd_ofileflags[fd] & UF_RESERVED) &&
1442 	    !(fdp->fd_ofileflags[fd] & UF_CLOSING))) {
1443 		panic("%s: caller without an ioccount on fileproc (%d/:%p)",
1444 		    __func__, fd, fp);
1445 	}
1446 
1447 	zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1448 	return fp;
1449 }
1450 
1451 
1452 /*
1453  * fp_lookup
1454  *
1455  * Description:	Get fileproc pointer for a given fd from the per process
1456  *		open file table of the specified process and if successful,
1457  *		increment the fp_iocount
1458  *
1459  * Parameters:	p				Process in which fd lives
1460  *		fd				fd to get information for
1461  *		resultfp			Pointer to result fileproc
1462  *						pointer area, or 0 if none
1463  *		locked				!0 if the caller holds the
1464  *						proc_fdlock, 0 otherwise
1465  *
1466  * Returns:	0			Success
1467  *		EBADF			Bad file descriptor
1468  *
1469  * Implicit returns:
1470  *		*resultfp (modified)		Fileproc pointer
1471  *
1472  * Locks:	If the argument 'locked' is non-zero, then the caller is
1473  *		expected to have taken and held the proc_fdlock; if it is
1474  *		zero, than this routine internally takes and drops this lock.
1475  */
1476 int
fp_lookup(proc_t p,int fd,struct fileproc ** resultfp,int locked)1477 fp_lookup(proc_t p, int fd, struct fileproc **resultfp, int locked)
1478 {
1479 	struct filedesc *fdp = &p->p_fd;
1480 	struct fileproc *fp;
1481 
1482 	if (!locked) {
1483 		proc_fdlock_spin(p);
1484 	}
1485 	if (fd < 0 || fdp == NULL || fd >= fdp->fd_nfiles ||
1486 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
1487 	    (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1488 		if (!locked) {
1489 			proc_fdunlock(p);
1490 		}
1491 		return EBADF;
1492 	}
1493 
1494 	zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1495 	os_ref_retain_locked(&fp->fp_iocount);
1496 
1497 	if (resultfp) {
1498 		*resultfp = fp;
1499 	}
1500 	if (!locked) {
1501 		proc_fdunlock(p);
1502 	}
1503 
1504 	return 0;
1505 }
1506 
1507 
1508 int
fp_get_ftype(proc_t p,int fd,file_type_t ftype,int err,struct fileproc ** fpp)1509 fp_get_ftype(proc_t p, int fd, file_type_t ftype, int err, struct fileproc **fpp)
1510 {
1511 	struct filedesc *fdp = &p->p_fd;
1512 	struct fileproc *fp;
1513 
1514 	proc_fdlock_spin(p);
1515 	if (fd < 0 || fd >= fdp->fd_nfiles ||
1516 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
1517 	    (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1518 		proc_fdunlock(p);
1519 		return EBADF;
1520 	}
1521 
1522 	if (fp->f_type != ftype) {
1523 		proc_fdunlock(p);
1524 		return err;
1525 	}
1526 
1527 	zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1528 	os_ref_retain_locked(&fp->fp_iocount);
1529 	proc_fdunlock(p);
1530 
1531 	*fpp = fp;
1532 	return 0;
1533 }
1534 
1535 
1536 /*
1537  * fp_drop
1538  *
1539  * Description:	Drop the I/O reference previously taken by calling fp_lookup
1540  *		et. al.
1541  *
1542  * Parameters:	p				Process in which the fd lives
1543  *		fd				fd associated with the fileproc
1544  *		fp				fileproc on which to set the
1545  *						flag and drop the reference
1546  *		locked				flag to internally take and
1547  *						drop proc_fdlock if it is not
1548  *						already held by the caller
1549  *
1550  * Returns:	0				Success
1551  *		EBADF				Bad file descriptor
1552  *
1553  * Locks:	This function internally takes and drops the proc_fdlock for
1554  *		the supplied process if 'locked' is non-zero, and assumes that
1555  *		the caller already holds this lock if 'locked' is non-zero.
1556  *
1557  * Notes:	The fileproc must correspond to the fd in the supplied proc
1558  */
1559 int
fp_drop(proc_t p,int fd,struct fileproc * fp,int locked)1560 fp_drop(proc_t p, int fd, struct fileproc *fp, int locked)
1561 {
1562 	struct filedesc *fdp = &p->p_fd;
1563 	int     needwakeup = 0;
1564 
1565 	if (!locked) {
1566 		proc_fdlock_spin(p);
1567 	}
1568 	if ((fp == FILEPROC_NULL) && (fd < 0 || fd >= fdp->fd_nfiles ||
1569 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
1570 	    ((fdp->fd_ofileflags[fd] & UF_RESERVED) &&
1571 	    !(fdp->fd_ofileflags[fd] & UF_CLOSING)))) {
1572 		if (!locked) {
1573 			proc_fdunlock(p);
1574 		}
1575 		return EBADF;
1576 	}
1577 
1578 	if (1 == os_ref_release_locked(&fp->fp_iocount)) {
1579 		if (fp->fp_flags & FP_SELCONFLICT) {
1580 			fp->fp_flags &= ~FP_SELCONFLICT;
1581 		}
1582 
1583 		if (fdp->fd_fpdrainwait) {
1584 			fdp->fd_fpdrainwait = 0;
1585 			needwakeup = 1;
1586 		}
1587 	}
1588 	if (!locked) {
1589 		proc_fdunlock(p);
1590 	}
1591 	if (needwakeup) {
1592 		wakeup(&fdp->fd_fpdrainwait);
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 
1599 /*
1600  * fileproc_drain
1601  *
1602  * Description:	Drain out pending I/O operations
1603  *
1604  * Parameters:	p				Process closing this file
1605  *		fp				fileproc struct for the open
1606  *						instance on the file
1607  *
1608  * Returns:	void
1609  *
1610  * Locks:	Assumes the caller holds the proc_fdlock
1611  *
1612  * Notes:	For character devices, this occurs on the last close of the
1613  *		device; for all other file descriptors, this occurs on each
1614  *		close to prevent fd's from being closed out from under
1615  *		operations currently in progress and blocked
1616  *
1617  * See Also:    file_vnode(), file_socket(), file_drop(), and the cautions
1618  *		regarding their use and interaction with this function.
1619  */
1620 static void
fileproc_drain(proc_t p,struct fileproc * fp)1621 fileproc_drain(proc_t p, struct fileproc * fp)
1622 {
1623 	struct filedesc *fdp = &p->p_fd;
1624 	struct vfs_context context;
1625 	thread_t thread;
1626 	bool is_current_proc;
1627 
1628 	is_current_proc = (p == current_proc());
1629 
1630 	if (!is_current_proc) {
1631 		proc_lock(p);
1632 		thread = proc_thread(p); /* XXX */
1633 		thread_reference(thread);
1634 		proc_unlock(p);
1635 	} else {
1636 		thread = current_thread();
1637 	}
1638 
1639 	context.vc_thread = thread;
1640 	context.vc_ucred = fp->fp_glob->fg_cred;
1641 
1642 	/* Set the vflag for drain */
1643 	fileproc_modify_vflags(fp, FPV_DRAIN, FALSE);
1644 
1645 	while (os_ref_get_count(&fp->fp_iocount) > 1) {
1646 		lck_mtx_convert_spin(&fdp->fd_lock);
1647 
1648 		fo_drain(fp, &context);
1649 		if ((fp->fp_flags & FP_INSELECT) == FP_INSELECT) {
1650 			struct select_set *selset;
1651 
1652 			if (fp->fp_guard_attrs) {
1653 				selset = fp->fp_guard->fpg_wset;
1654 			} else {
1655 				selset = fp->fp_wset;
1656 			}
1657 			if (waitq_wakeup64_all(selset, NO_EVENT64,
1658 			    THREAD_INTERRUPTED, WAITQ_WAKEUP_DEFAULT) == KERN_INVALID_ARGUMENT) {
1659 				panic("bad wait queue for waitq_wakeup64_all %p (%sfp:%p)",
1660 				    selset, fp->fp_guard_attrs ? "guarded " : "", fp);
1661 			}
1662 		}
1663 		if ((fp->fp_flags & FP_SELCONFLICT) == FP_SELCONFLICT) {
1664 			if (waitq_wakeup64_all(&select_conflict_queue, NO_EVENT64,
1665 			    THREAD_INTERRUPTED, WAITQ_WAKEUP_DEFAULT) == KERN_INVALID_ARGUMENT) {
1666 				panic("bad select_conflict_queue");
1667 			}
1668 		}
1669 		fdp->fd_fpdrainwait = 1;
1670 		msleep(&fdp->fd_fpdrainwait, &fdp->fd_lock, PRIBIO, "fpdrain", NULL);
1671 	}
1672 #if DIAGNOSTIC
1673 	if ((fp->fp_flags & FP_INSELECT) != 0) {
1674 		panic("FP_INSELECT set on drained fp");
1675 	}
1676 #endif
1677 	if ((fp->fp_flags & FP_SELCONFLICT) == FP_SELCONFLICT) {
1678 		fp->fp_flags &= ~FP_SELCONFLICT;
1679 	}
1680 
1681 	if (!is_current_proc) {
1682 		thread_deallocate(thread);
1683 	}
1684 }
1685 
1686 
1687 int
fp_close_and_unlock(proc_t p,kauth_cred_t cred,int fd,struct fileproc * fp,int flags)1688 fp_close_and_unlock(proc_t p, kauth_cred_t cred, int fd, struct fileproc *fp, int flags)
1689 {
1690 	struct filedesc *fdp = &p->p_fd;
1691 	struct fileglob *fg = fp->fp_glob;
1692 
1693 #if DIAGNOSTIC
1694 	proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
1695 #endif
1696 
1697 	/*
1698 	 * Keep most people from finding the filedesc while we are closing it.
1699 	 *
1700 	 * Callers are:
1701 	 *
1702 	 * - dup2() which always waits for UF_RESERVED to clear
1703 	 *
1704 	 * - close/guarded_close/... who will fail the fileproc lookup if
1705 	 *   UF_RESERVED is set,
1706 	 *
1707 	 * - fdexec()/fdfree() who only run once all threads in the proc
1708 	 *   are properly canceled, hence no fileproc in this proc should
1709 	 *   be in flux.
1710 	 *
1711 	 * Which means that neither UF_RESERVED nor UF_CLOSING should be set.
1712 	 *
1713 	 * Callers of fp_get_noref_locked_with_iocount() can still find
1714 	 * this entry so that they can drop their I/O reference despite
1715 	 * not having remembered the fileproc pointer (namely select() and
1716 	 * file_drop()).
1717 	 */
1718 	if (p->p_fd.fd_ofileflags[fd] & (UF_RESERVED | UF_CLOSING)) {
1719 		panic("%s: called with fileproc in flux (%d/:%p)",
1720 		    __func__, fd, fp);
1721 	}
1722 	p->p_fd.fd_ofileflags[fd] |= (UF_RESERVED | UF_CLOSING);
1723 
1724 	if ((fp->fp_flags & FP_AIOISSUED) ||
1725 #if CONFIG_MACF
1726 	    (FILEGLOB_DTYPE(fg) == DTYPE_VNODE)
1727 #else
1728 	    kauth_authorize_fileop_has_listeners()
1729 #endif
1730 	    ) {
1731 		proc_fdunlock(p);
1732 
1733 		if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE) {
1734 			/*
1735 			 * call out to allow 3rd party notification of close.
1736 			 * Ignore result of kauth_authorize_fileop call.
1737 			 */
1738 #if CONFIG_MACF
1739 			mac_file_notify_close(cred, fp->fp_glob);
1740 #else
1741 			(void)cred;
1742 #endif
1743 
1744 			if (kauth_authorize_fileop_has_listeners() &&
1745 			    vnode_getwithref((vnode_t)fg_get_data(fg)) == 0) {
1746 				u_int   fileop_flags = 0;
1747 				if (fg->fg_flag & FWASWRITTEN) {
1748 					fileop_flags |= KAUTH_FILEOP_CLOSE_MODIFIED;
1749 				}
1750 				kauth_authorize_fileop(fg->fg_cred, KAUTH_FILEOP_CLOSE,
1751 				    (uintptr_t)fg_get_data(fg), (uintptr_t)fileop_flags);
1752 
1753 				vnode_put((vnode_t)fg_get_data(fg));
1754 			}
1755 		}
1756 
1757 		if (fp->fp_flags & FP_AIOISSUED) {
1758 			/*
1759 			 * cancel all async IO requests that can be cancelled.
1760 			 */
1761 			_aio_close( p, fd );
1762 		}
1763 
1764 		proc_fdlock(p);
1765 	}
1766 
1767 	if (fd < fdp->fd_knlistsize) {
1768 		knote_fdclose(p, fd);
1769 	}
1770 
1771 	fileproc_drain(p, fp);
1772 
1773 	if (flags & FD_DUP2RESV) {
1774 		fdp->fd_ofiles[fd] = NULL;
1775 		fdp->fd_ofileflags[fd] &= ~UF_CLOSING;
1776 	} else {
1777 		fdrelse(p, fd);
1778 	}
1779 
1780 	proc_fdunlock(p);
1781 
1782 	if (ENTR_SHOULDTRACE && FILEGLOB_DTYPE(fg) == DTYPE_SOCKET) {
1783 		KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_END,
1784 		    fd, 0, (int64_t)VM_KERNEL_ADDRPERM(fg_get_data(fg)));
1785 	}
1786 
1787 	fileproc_free(fp);
1788 
1789 	return fg_drop(p, fg);
1790 }
1791 
1792 /*
1793  * dupfdopen
1794  *
1795  * Description:	Duplicate the specified descriptor to a free descriptor;
1796  *		this is the second half of fdopen(), above.
1797  *
1798  * Parameters:	p				current process pointer
1799  *		indx				fd to dup to
1800  *		dfd				fd to dup from
1801  *		mode				mode to set on new fd
1802  *		error				command code
1803  *
1804  * Returns:	0				Success
1805  *		EBADF				Source fd is bad
1806  *		EACCES				Requested mode not allowed
1807  *		!0				'error', if not ENODEV or
1808  *						ENXIO
1809  *
1810  * Notes:	XXX This is not thread safe; see fdopen() above
1811  */
1812 int
dupfdopen(proc_t p,int indx,int dfd,int flags,int error)1813 dupfdopen(proc_t p, int indx, int dfd, int flags, int error)
1814 {
1815 	struct filedesc *fdp = &p->p_fd;
1816 	struct fileproc *wfp;
1817 	struct fileproc *fp;
1818 #if CONFIG_MACF
1819 	int myerror;
1820 #endif
1821 
1822 	/*
1823 	 * If the to-be-dup'd fd number is greater than the allowed number
1824 	 * of file descriptors, or the fd to be dup'd has already been
1825 	 * closed, reject.  Note, check for new == old is necessary as
1826 	 * falloc could allocate an already closed to-be-dup'd descriptor
1827 	 * as the new descriptor.
1828 	 */
1829 	proc_fdlock(p);
1830 
1831 	fp = fdp->fd_ofiles[indx];
1832 	if (dfd < 0 || dfd >= fdp->fd_nfiles ||
1833 	    (wfp = fdp->fd_ofiles[dfd]) == NULL || wfp == fp ||
1834 	    (fdp->fd_ofileflags[dfd] & UF_RESERVED)) {
1835 		proc_fdunlock(p);
1836 		return EBADF;
1837 	}
1838 #if CONFIG_MACF
1839 	myerror = mac_file_check_dup(kauth_cred_get(), wfp->fp_glob, dfd);
1840 	if (myerror) {
1841 		proc_fdunlock(p);
1842 		return myerror;
1843 	}
1844 #endif
1845 	/*
1846 	 * There are two cases of interest here.
1847 	 *
1848 	 * For ENODEV simply dup (dfd) to file descriptor
1849 	 * (indx) and return.
1850 	 *
1851 	 * For ENXIO steal away the file structure from (dfd) and
1852 	 * store it in (indx).  (dfd) is effectively closed by
1853 	 * this operation.
1854 	 *
1855 	 * Any other error code is just returned.
1856 	 */
1857 	switch (error) {
1858 	case ENODEV:
1859 		if (fp_isguarded(wfp, GUARD_DUP)) {
1860 			proc_fdunlock(p);
1861 			return EPERM;
1862 		}
1863 
1864 		/*
1865 		 * Check that the mode the file is being opened for is a
1866 		 * subset of the mode of the existing descriptor.
1867 		 */
1868 		if (((flags & (FREAD | FWRITE)) | wfp->f_flag) != wfp->f_flag) {
1869 			proc_fdunlock(p);
1870 			return EACCES;
1871 		}
1872 		if (indx >= fdp->fd_afterlast) {
1873 			fdp->fd_afterlast = indx + 1;
1874 		}
1875 
1876 		if (fp->fp_glob) {
1877 			fg_free(fp->fp_glob);
1878 		}
1879 		fg_ref(p, wfp->fp_glob);
1880 		fp->fp_glob = wfp->fp_glob;
1881 		/*
1882 		 * Historically, open(/dev/fd/<n>) preserves close on fork/exec,
1883 		 * unlike dup(), dup2() or fcntl(F_DUPFD).
1884 		 *
1885 		 * open1() already handled O_CLO{EXEC,FORK}
1886 		 */
1887 		fp->fp_flags |= (wfp->fp_flags & (FP_CLOFORK | FP_CLOEXEC));
1888 
1889 		procfdtbl_releasefd(p, indx, NULL);
1890 		fp_drop(p, indx, fp, 1);
1891 		proc_fdunlock(p);
1892 		return 0;
1893 
1894 	default:
1895 		proc_fdunlock(p);
1896 		return error;
1897 	}
1898 	/* NOTREACHED */
1899 }
1900 
1901 
1902 #pragma mark KPIS (sys/file.h)
1903 
1904 /*
1905  * fg_get_vnode
1906  *
1907  * Description:	Return vnode associated with the file structure, if
1908  *		any.  The lifetime of the returned vnode is bound to
1909  *		the lifetime of the file structure.
1910  *
1911  * Parameters:	fg				Pointer to fileglob to
1912  *						inspect
1913  *
1914  * Returns:	vnode_t
1915  */
1916 vnode_t
fg_get_vnode(struct fileglob * fg)1917 fg_get_vnode(struct fileglob *fg)
1918 {
1919 	if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE) {
1920 		return (vnode_t)fg_get_data(fg);
1921 	} else {
1922 		return NULL;
1923 	}
1924 }
1925 
1926 
1927 /*
1928  * fp_getfvp
1929  *
1930  * Description:	Get fileproc and vnode pointer for a given fd from the per
1931  *		process open file table of the specified process, and if
1932  *		successful, increment the fp_iocount
1933  *
1934  * Parameters:	p				Process in which fd lives
1935  *		fd				fd to get information for
1936  *		resultfp			Pointer to result fileproc
1937  *						pointer area, or 0 if none
1938  *		resultvp			Pointer to result vnode pointer
1939  *						area, or 0 if none
1940  *
1941  * Returns:	0				Success
1942  *		EBADF				Bad file descriptor
1943  *		ENOTSUP				fd does not refer to a vnode
1944  *
1945  * Implicit returns:
1946  *		*resultfp (modified)		Fileproc pointer
1947  *		*resultvp (modified)		vnode pointer
1948  *
1949  * Notes:	The resultfp and resultvp fields are optional, and may be
1950  *		independently specified as NULL to skip returning information
1951  *
1952  * Locks:	Internally takes and releases proc_fdlock
1953  */
1954 int
fp_getfvp(proc_t p,int fd,struct fileproc ** resultfp,struct vnode ** resultvp)1955 fp_getfvp(proc_t p, int fd, struct fileproc **resultfp, struct vnode **resultvp)
1956 {
1957 	struct fileproc *fp;
1958 	int error;
1959 
1960 	error = fp_get_ftype(p, fd, DTYPE_VNODE, ENOTSUP, &fp);
1961 	if (error == 0) {
1962 		if (resultfp) {
1963 			*resultfp = fp;
1964 		}
1965 		if (resultvp) {
1966 			*resultvp = (struct vnode *)fp_get_data(fp);
1967 		}
1968 	}
1969 
1970 	return error;
1971 }
1972 
1973 
1974 /*
1975  * fp_get_pipe_id
1976  *
1977  * Description:	Get pipe id for a given fd from the per process open file table
1978  *		of the specified process.
1979  *
1980  * Parameters:	p				Process in which fd lives
1981  *		fd				fd to get information for
1982  *		result_pipe_id			Pointer to result pipe id
1983  *
1984  * Returns:	0				Success
1985  *		EIVAL				NULL pointer arguments passed
1986  *		fp_lookup:EBADF			Bad file descriptor
1987  *		ENOTSUP				fd does not refer to a pipe
1988  *
1989  * Implicit returns:
1990  *		*result_pipe_id (modified)	pipe id
1991  *
1992  * Locks:	Internally takes and releases proc_fdlock
1993  */
1994 int
fp_get_pipe_id(proc_t p,int fd,uint64_t * result_pipe_id)1995 fp_get_pipe_id(proc_t p, int fd, uint64_t *result_pipe_id)
1996 {
1997 	struct fileproc *fp = FILEPROC_NULL;
1998 	struct fileglob *fg = NULL;
1999 	int error = 0;
2000 
2001 	if (p == NULL || result_pipe_id == NULL) {
2002 		return EINVAL;
2003 	}
2004 
2005 	proc_fdlock(p);
2006 	if ((error = fp_lookup(p, fd, &fp, 1))) {
2007 		proc_fdunlock(p);
2008 		return error;
2009 	}
2010 	fg = fp->fp_glob;
2011 
2012 	if (FILEGLOB_DTYPE(fg) == DTYPE_PIPE) {
2013 		*result_pipe_id = pipe_id((struct pipe*)fg_get_data(fg));
2014 	} else {
2015 		error = ENOTSUP;
2016 	}
2017 
2018 	fp_drop(p, fd, fp, 1);
2019 	proc_fdunlock(p);
2020 	return error;
2021 }
2022 
2023 
2024 /*
2025  * file_vnode
2026  *
2027  * Description:	Given an fd, look it up in the current process's per process
2028  *		open file table, and return its internal vnode pointer.
2029  *
2030  * Parameters:	fd				fd to obtain vnode from
2031  *		vpp				pointer to vnode return area
2032  *
2033  * Returns:	0				Success
2034  *		EINVAL				The fd does not refer to a
2035  *						vnode fileproc entry
2036  *	fp_lookup:EBADF				Bad file descriptor
2037  *
2038  * Implicit returns:
2039  *		*vpp (modified)			Returned vnode pointer
2040  *
2041  * Locks:	This function internally takes and drops the proc_fdlock for
2042  *		the current process
2043  *
2044  * Notes:	If successful, this function increments the fp_iocount on the
2045  *		fd's corresponding fileproc.
2046  *
2047  *		The fileproc referenced is not returned; because of this, care
2048  *		must be taken to not drop the last reference (e.g. by closing
2049  *		the file).  This is inherently unsafe, since the reference may
2050  *		not be recoverable from the vnode, if there is a subsequent
2051  *		close that destroys the associate fileproc.  The caller should
2052  *		therefore retain their own reference on the fileproc so that
2053  *		the fp_iocount can be dropped subsequently.  Failure to do this
2054  *		can result in the returned pointer immediately becoming invalid
2055  *		following the call.
2056  *
2057  *		Use of this function is discouraged.
2058  */
2059 int
file_vnode(int fd,struct vnode ** vpp)2060 file_vnode(int fd, struct vnode **vpp)
2061 {
2062 	return file_vnode_withvid(fd, vpp, NULL);
2063 }
2064 
2065 
2066 /*
2067  * file_vnode_withvid
2068  *
2069  * Description:	Given an fd, look it up in the current process's per process
2070  *		open file table, and return its internal vnode pointer.
2071  *
2072  * Parameters:	fd				fd to obtain vnode from
2073  *		vpp				pointer to vnode return area
2074  *		vidp				pointer to vid of the returned vnode
2075  *
2076  * Returns:	0				Success
2077  *		EINVAL				The fd does not refer to a
2078  *						vnode fileproc entry
2079  *	fp_lookup:EBADF				Bad file descriptor
2080  *
2081  * Implicit returns:
2082  *		*vpp (modified)			Returned vnode pointer
2083  *
2084  * Locks:	This function internally takes and drops the proc_fdlock for
2085  *		the current process
2086  *
2087  * Notes:	If successful, this function increments the fp_iocount on the
2088  *		fd's corresponding fileproc.
2089  *
2090  *		The fileproc referenced is not returned; because of this, care
2091  *		must be taken to not drop the last reference (e.g. by closing
2092  *		the file).  This is inherently unsafe, since the reference may
2093  *		not be recoverable from the vnode, if there is a subsequent
2094  *		close that destroys the associate fileproc.  The caller should
2095  *		therefore retain their own reference on the fileproc so that
2096  *		the fp_iocount can be dropped subsequently.  Failure to do this
2097  *		can result in the returned pointer immediately becoming invalid
2098  *		following the call.
2099  *
2100  *		Use of this function is discouraged.
2101  */
2102 int
file_vnode_withvid(int fd,struct vnode ** vpp,uint32_t * vidp)2103 file_vnode_withvid(int fd, struct vnode **vpp, uint32_t *vidp)
2104 {
2105 	struct fileproc *fp;
2106 	int error;
2107 
2108 	error = fp_get_ftype(current_proc(), fd, DTYPE_VNODE, EINVAL, &fp);
2109 	if (error == 0) {
2110 		if (vpp) {
2111 			*vpp = (struct vnode *)fp_get_data(fp);
2112 		}
2113 		if (vidp) {
2114 			*vidp = vnode_vid((struct vnode *)fp_get_data(fp));
2115 		}
2116 	}
2117 	return error;
2118 }
2119 
2120 /*
2121  * file_socket
2122  *
2123  * Description:	Given an fd, look it up in the current process's per process
2124  *		open file table, and return its internal socket pointer.
2125  *
2126  * Parameters:	fd				fd to obtain vnode from
2127  *		sp				pointer to socket return area
2128  *
2129  * Returns:	0				Success
2130  *		ENOTSOCK			Not a socket
2131  *		fp_lookup:EBADF			Bad file descriptor
2132  *
2133  * Implicit returns:
2134  *		*sp (modified)			Returned socket pointer
2135  *
2136  * Locks:	This function internally takes and drops the proc_fdlock for
2137  *		the current process
2138  *
2139  * Notes:	If successful, this function increments the fp_iocount on the
2140  *		fd's corresponding fileproc.
2141  *
2142  *		The fileproc referenced is not returned; because of this, care
2143  *		must be taken to not drop the last reference (e.g. by closing
2144  *		the file).  This is inherently unsafe, since the reference may
2145  *		not be recoverable from the socket, if there is a subsequent
2146  *		close that destroys the associate fileproc.  The caller should
2147  *		therefore retain their own reference on the fileproc so that
2148  *		the fp_iocount can be dropped subsequently.  Failure to do this
2149  *		can result in the returned pointer immediately becoming invalid
2150  *		following the call.
2151  *
2152  *		Use of this function is discouraged.
2153  */
2154 int
file_socket(int fd,struct socket ** sp)2155 file_socket(int fd, struct socket **sp)
2156 {
2157 	struct fileproc *fp;
2158 	int error;
2159 
2160 	error = fp_get_ftype(current_proc(), fd, DTYPE_SOCKET, ENOTSOCK, &fp);
2161 	if (error == 0) {
2162 		if (sp) {
2163 			*sp = (struct socket *)fp_get_data(fp);
2164 		}
2165 	}
2166 	return error;
2167 }
2168 
2169 
2170 /*
2171  * file_flags
2172  *
2173  * Description:	Given an fd, look it up in the current process's per process
2174  *		open file table, and return its fileproc's flags field.
2175  *
2176  * Parameters:	fd				fd whose flags are to be
2177  *						retrieved
2178  *		flags				pointer to flags data area
2179  *
2180  * Returns:	0				Success
2181  *		ENOTSOCK			Not a socket
2182  *		fp_lookup:EBADF			Bad file descriptor
2183  *
2184  * Implicit returns:
2185  *		*flags (modified)		Returned flags field
2186  *
2187  * Locks:	This function internally takes and drops the proc_fdlock for
2188  *		the current process
2189  */
2190 int
file_flags(int fd,int * flags)2191 file_flags(int fd, int *flags)
2192 {
2193 	proc_t p = current_proc();
2194 	struct fileproc *fp;
2195 	int error = EBADF;
2196 
2197 	proc_fdlock_spin(p);
2198 	fp = fp_get_noref_locked(p, fd);
2199 	if (fp) {
2200 		*flags = (int)fp->f_flag;
2201 		error = 0;
2202 	}
2203 	proc_fdunlock(p);
2204 
2205 	return error;
2206 }
2207 
2208 
2209 /*
2210  * file_drop
2211  *
2212  * Description:	Drop an iocount reference on an fd, and wake up any waiters
2213  *		for draining (i.e. blocked in fileproc_drain() called during
2214  *		the last attempt to close a file).
2215  *
2216  * Parameters:	fd				fd on which an ioreference is
2217  *						to be dropped
2218  *
2219  * Returns:	0				Success
2220  *
2221  * Description:	Given an fd, look it up in the current process's per process
2222  *		open file table, and drop it's fileproc's fp_iocount by one
2223  *
2224  * Notes:	This is intended as a corresponding operation to the functions
2225  *		file_vnode() and file_socket() operations.
2226  *
2227  *		If the caller can't possibly hold an I/O reference,
2228  *		this function will panic the kernel rather than allowing
2229  *		for memory corruption. Callers should always call this
2230  *		because they acquired an I/O reference on this file before.
2231  *
2232  *		Use of this function is discouraged.
2233  */
2234 int
file_drop(int fd)2235 file_drop(int fd)
2236 {
2237 	struct fileproc *fp;
2238 	proc_t p = current_proc();
2239 	struct filedesc *fdp = &p->p_fd;
2240 	int     needwakeup = 0;
2241 
2242 	proc_fdlock_spin(p);
2243 	fp = fp_get_noref_locked_with_iocount(p, fd);
2244 
2245 	if (1 == os_ref_release_locked(&fp->fp_iocount)) {
2246 		if (fp->fp_flags & FP_SELCONFLICT) {
2247 			fp->fp_flags &= ~FP_SELCONFLICT;
2248 		}
2249 
2250 		if (fdp->fd_fpdrainwait) {
2251 			fdp->fd_fpdrainwait = 0;
2252 			needwakeup = 1;
2253 		}
2254 	}
2255 	proc_fdunlock(p);
2256 
2257 	if (needwakeup) {
2258 		wakeup(&fdp->fd_fpdrainwait);
2259 	}
2260 	return 0;
2261 }
2262 
2263 
2264 #pragma mark syscalls
2265 
2266 #ifndef HFS_GET_BOOT_INFO
2267 #define HFS_GET_BOOT_INFO   (FCNTL_FS_SPECIFIC_BASE + 0x00004)
2268 #endif
2269 
2270 #ifndef HFS_SET_BOOT_INFO
2271 #define HFS_SET_BOOT_INFO   (FCNTL_FS_SPECIFIC_BASE + 0x00005)
2272 #endif
2273 
2274 #ifndef APFSIOC_REVERT_TO_SNAPSHOT
2275 #define APFSIOC_REVERT_TO_SNAPSHOT  _IOW('J', 1, u_int64_t)
2276 #endif
2277 
2278 #ifndef APFSIOC_IS_GRAFT_SUPPORTED
2279 #define APFSIOC_IS_GRAFT_SUPPORTED _IO('J', 133)
2280 #endif
2281 
2282 #define CHECK_ADD_OVERFLOW_INT64L(x, y) \
2283 	        (((((x) > 0) && ((y) > 0) && ((x) > LLONG_MAX - (y))) || \
2284 	        (((x) < 0) && ((y) < 0) && ((x) < LLONG_MIN - (y)))) \
2285 	        ? 1 : 0)
2286 
2287 /*
2288  * sys_getdtablesize
2289  *
2290  * Description:	Returns the per process maximum size of the descriptor table
2291  *
2292  * Parameters:	p				Process being queried
2293  *		retval				Pointer to the call return area
2294  *
2295  * Returns:	0				Success
2296  *
2297  * Implicit returns:
2298  *		*retval (modified)		Size of dtable
2299  */
2300 int
sys_getdtablesize(proc_t p,__unused struct getdtablesize_args * uap,int32_t * retval)2301 sys_getdtablesize(proc_t p, __unused struct getdtablesize_args *uap, int32_t *retval)
2302 {
2303 	*retval = proc_limitgetcur_nofile(p);
2304 	return 0;
2305 }
2306 
2307 
2308 /*
2309  * check_file_seek_range
2310  *
2311  * Description: Checks if seek offsets are in the range of 0 to LLONG_MAX.
2312  *
2313  * Parameters:  fl		Flock structure.
2314  *		cur_file_offset	Current offset in the file.
2315  *
2316  * Returns:     0               on Success.
2317  *		EOVERFLOW	on overflow.
2318  *		EINVAL          on offset less than zero.
2319  */
2320 
2321 static int
check_file_seek_range(struct flock * fl,off_t cur_file_offset)2322 check_file_seek_range(struct flock *fl, off_t cur_file_offset)
2323 {
2324 	if (fl->l_whence == SEEK_CUR) {
2325 		/* Check if the start marker is beyond LLONG_MAX. */
2326 		if (CHECK_ADD_OVERFLOW_INT64L(fl->l_start, cur_file_offset)) {
2327 			/* Check if start marker is negative */
2328 			if (fl->l_start < 0) {
2329 				return EINVAL;
2330 			}
2331 			return EOVERFLOW;
2332 		}
2333 		/* Check if the start marker is negative. */
2334 		if (fl->l_start + cur_file_offset < 0) {
2335 			return EINVAL;
2336 		}
2337 		/* Check if end marker is beyond LLONG_MAX. */
2338 		if ((fl->l_len > 0) && (CHECK_ADD_OVERFLOW_INT64L(fl->l_start +
2339 		    cur_file_offset, fl->l_len - 1))) {
2340 			return EOVERFLOW;
2341 		}
2342 		/* Check if the end marker is negative. */
2343 		if ((fl->l_len <= 0) && (fl->l_start + cur_file_offset +
2344 		    fl->l_len < 0)) {
2345 			return EINVAL;
2346 		}
2347 	} else if (fl->l_whence == SEEK_SET) {
2348 		/* Check if the start marker is negative. */
2349 		if (fl->l_start < 0) {
2350 			return EINVAL;
2351 		}
2352 		/* Check if the end marker is beyond LLONG_MAX. */
2353 		if ((fl->l_len > 0) &&
2354 		    CHECK_ADD_OVERFLOW_INT64L(fl->l_start, fl->l_len - 1)) {
2355 			return EOVERFLOW;
2356 		}
2357 		/* Check if the end marker is negative. */
2358 		if ((fl->l_len < 0) && fl->l_start + fl->l_len < 0) {
2359 			return EINVAL;
2360 		}
2361 	}
2362 	return 0;
2363 }
2364 
2365 
2366 /*
2367  * sys_dup
2368  *
2369  * Description:	Duplicate a file descriptor.
2370  *
2371  * Parameters:	p				Process performing the dup
2372  *		uap->fd				The fd to dup
2373  *		retval				Pointer to the call return area
2374  *
2375  * Returns:	0				Success
2376  *		!0				Errno
2377  *
2378  * Implicit returns:
2379  *		*retval (modified)		The new descriptor
2380  */
2381 int
sys_dup(proc_t p,struct dup_args * uap,int32_t * retval)2382 sys_dup(proc_t p, struct dup_args *uap, int32_t *retval)
2383 {
2384 	int old = uap->fd;
2385 	int new, error;
2386 	struct fileproc *fp;
2387 	kauth_cred_t p_cred;
2388 
2389 	proc_fdlock(p);
2390 	if ((error = fp_lookup(p, old, &fp, 1))) {
2391 		proc_fdunlock(p);
2392 		return error;
2393 	}
2394 	if (fp_isguarded(fp, GUARD_DUP)) {
2395 		error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
2396 		(void) fp_drop(p, old, fp, 1);
2397 		proc_fdunlock(p);
2398 		return error;
2399 	}
2400 	if ((error = fdalloc(p, 0, &new))) {
2401 		fp_drop(p, old, fp, 1);
2402 		proc_fdunlock(p);
2403 		return error;
2404 	}
2405 	p_cred = current_cached_proc_cred(p);
2406 	error = finishdup(p, p_cred, old, new, 0, retval);
2407 
2408 	if (ENTR_SHOULDTRACE && FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) {
2409 		KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_START,
2410 		    new, 0, (int64_t)VM_KERNEL_ADDRPERM(fp_get_data(fp)));
2411 	}
2412 
2413 	fp_drop(p, old, fp, 1);
2414 	proc_fdunlock(p);
2415 
2416 	return error;
2417 }
2418 
2419 /*
2420  * sys_dup2
2421  *
2422  * Description:	Duplicate a file descriptor to a particular value.
2423  *
2424  * Parameters:	p				Process performing the dup
2425  *		uap->from			The fd to dup
2426  *		uap->to				The fd to dup it to
2427  *		retval				Pointer to the call return area
2428  *
2429  * Returns:	0				Success
2430  *		!0				Errno
2431  *
2432  * Implicit returns:
2433  *		*retval (modified)		The new descriptor
2434  */
2435 int
sys_dup2(proc_t p,struct dup2_args * uap,int32_t * retval)2436 sys_dup2(proc_t p, struct dup2_args *uap, int32_t *retval)
2437 {
2438 	kauth_cred_t p_cred = current_cached_proc_cred(p);
2439 
2440 	return dup2(p, p_cred, uap->from, uap->to, retval);
2441 }
2442 
2443 int
dup2(proc_t p,kauth_cred_t p_cred,int old,int new,int * retval)2444 dup2(proc_t p, kauth_cred_t p_cred, int old, int new, int *retval)
2445 {
2446 	struct filedesc *fdp = &p->p_fd;
2447 	struct fileproc *fp, *nfp;
2448 	int i, error;
2449 
2450 	proc_fdlock(p);
2451 
2452 startover:
2453 	if ((error = fp_lookup(p, old, &fp, 1))) {
2454 		proc_fdunlock(p);
2455 		return error;
2456 	}
2457 	if (fp_isguarded(fp, GUARD_DUP)) {
2458 		error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
2459 		(void) fp_drop(p, old, fp, 1);
2460 		proc_fdunlock(p);
2461 		return error;
2462 	}
2463 	if (new < 0 || new >= proc_limitgetcur_nofile(p)) {
2464 		fp_drop(p, old, fp, 1);
2465 		proc_fdunlock(p);
2466 		return EBADF;
2467 	}
2468 	if (old == new) {
2469 		fp_drop(p, old, fp, 1);
2470 		*retval = new;
2471 		proc_fdunlock(p);
2472 		return 0;
2473 	}
2474 	if (new < 0 || new >= fdp->fd_nfiles) {
2475 		if ((error = fdalloc(p, new, &i))) {
2476 			fp_drop(p, old, fp, 1);
2477 			proc_fdunlock(p);
2478 			return error;
2479 		}
2480 		if (new != i) {
2481 			fdrelse(p, i);
2482 			goto closeit;
2483 		}
2484 	} else {
2485 closeit:
2486 		if ((fdp->fd_ofileflags[new] & UF_RESERVED) == UF_RESERVED) {
2487 			fp_drop(p, old, fp, 1);
2488 			procfdtbl_waitfd(p, new);
2489 #if DIAGNOSTIC
2490 			proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
2491 #endif
2492 			goto startover;
2493 		}
2494 
2495 		if ((nfp = fdp->fd_ofiles[new]) != NULL) {
2496 			if (fp_isguarded(nfp, GUARD_CLOSE)) {
2497 				fp_drop(p, old, fp, 1);
2498 				error = fp_guard_exception(p,
2499 				    new, nfp, kGUARD_EXC_CLOSE);
2500 				proc_fdunlock(p);
2501 				return error;
2502 			}
2503 			(void)fp_close_and_unlock(p, p_cred, new, nfp, FD_DUP2RESV);
2504 			proc_fdlock(p);
2505 			assert(fdp->fd_ofileflags[new] & UF_RESERVED);
2506 		} else {
2507 #if DIAGNOSTIC
2508 			if (fdp->fd_ofiles[new] != NULL) {
2509 				panic("dup2: no ref on fileproc %d", new);
2510 			}
2511 #endif
2512 			procfdtbl_reservefd(p, new);
2513 		}
2514 	}
2515 #if DIAGNOSTIC
2516 	if (fdp->fd_ofiles[new] != 0) {
2517 		panic("dup2: overwriting fd_ofiles with new %d", new);
2518 	}
2519 	if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0) {
2520 		panic("dup2: unreserved fileflags with new %d", new);
2521 	}
2522 #endif
2523 	error = finishdup(p, p_cred, old, new, 0, retval);
2524 	fp_drop(p, old, fp, 1);
2525 	proc_fdunlock(p);
2526 
2527 	return error;
2528 }
2529 
2530 
2531 /*
2532  * fcntl
2533  *
2534  * Description:	The file control system call.
2535  *
2536  * Parameters:	p			Process performing the fcntl
2537  *		uap->fd				The fd to operate against
2538  *		uap->cmd			The command to perform
2539  *		uap->arg			Pointer to the command argument
2540  *		retval				Pointer to the call return area
2541  *
2542  * Returns:	0			Success
2543  *		!0				Errno (see fcntl_nocancel)
2544  *
2545  * Implicit returns:
2546  *		*retval (modified)		fcntl return value (if any)
2547  *
2548  * Notes:	This system call differs from fcntl_nocancel() in that it
2549  *		tests for cancellation prior to performing a potentially
2550  *		blocking operation.
2551  */
2552 int
sys_fcntl(proc_t p,struct fcntl_args * uap,int32_t * retval)2553 sys_fcntl(proc_t p, struct fcntl_args *uap, int32_t *retval)
2554 {
2555 	__pthread_testcancel(1);
2556 	return sys_fcntl_nocancel(p, (struct fcntl_nocancel_args *)uap, retval);
2557 }
2558 
2559 #define ACCOUNT_OPENFROM_ENTITLEMENT \
2560 	"com.apple.private.vfs.role-account-openfrom"
2561 
2562 /*
2563  * sys_fcntl_nocancel
2564  *
2565  * Description:	A non-cancel-testing file control system call.
2566  *
2567  * Parameters:	p				Process performing the fcntl
2568  *		uap->fd				The fd to operate against
2569  *		uap->cmd			The command to perform
2570  *		uap->arg			Pointer to the command argument
2571  *		retval				Pointer to the call return area
2572  *
2573  * Returns:	0				Success
2574  *		EINVAL
2575  *	fp_lookup:EBADF				Bad file descriptor
2576  * [F_DUPFD]
2577  *	fdalloc:EMFILE
2578  *	fdalloc:ENOMEM
2579  *	finishdup:EBADF
2580  *	finishdup:ENOMEM
2581  * [F_SETOWN]
2582  *		ESRCH
2583  * [F_SETLK]
2584  *		EBADF
2585  *		EOVERFLOW
2586  *	copyin:EFAULT
2587  *	vnode_getwithref:???
2588  *	VNOP_ADVLOCK:???
2589  *	msleep:ETIMEDOUT
2590  * [F_GETLK]
2591  *		EBADF
2592  *		EOVERFLOW
2593  *	copyin:EFAULT
2594  *	copyout:EFAULT
2595  *	vnode_getwithref:???
2596  *	VNOP_ADVLOCK:???
2597  * [F_PREALLOCATE]
2598  *		EBADF
2599  *		EFBIG
2600  *		EINVAL
2601  *		ENOSPC
2602  *	copyin:EFAULT
2603  *	copyout:EFAULT
2604  *	vnode_getwithref:???
2605  *	VNOP_ALLOCATE:???
2606  * [F_SETSIZE,F_RDADVISE]
2607  *		EBADF
2608  *		EINVAL
2609  *	copyin:EFAULT
2610  *	vnode_getwithref:???
2611  * [F_RDAHEAD,F_NOCACHE]
2612  *		EBADF
2613  *	vnode_getwithref:???
2614  * [???]
2615  *
2616  * Implicit returns:
2617  *		*retval (modified)		fcntl return value (if any)
2618  */
2619 #define SYS_FCNTL_DECLARE_VFS_CONTEXT(context) \
2620 	struct vfs_context context = { \
2621 	    .vc_thread = current_thread(), \
2622 	    .vc_ucred = fp->f_cred, \
2623 	}
2624 
2625 static user_addr_t
sys_fnctl_parse_arg(proc_t p,user_long_t arg)2626 sys_fnctl_parse_arg(proc_t p, user_long_t arg)
2627 {
2628 	/*
2629 	 * Since the arg parameter is defined as a long but may be
2630 	 * either a long or a pointer we must take care to handle
2631 	 * sign extension issues.  Our sys call munger will sign
2632 	 * extend a long when we are called from a 32-bit process.
2633 	 * Since we can never have an address greater than 32-bits
2634 	 * from a 32-bit process we lop off the top 32-bits to avoid
2635 	 * getting the wrong address
2636 	 */
2637 	return proc_is64bit(p) ? arg : CAST_USER_ADDR_T((uint32_t)arg);
2638 }
2639 
2640 /* cleanup code common to fnctl functions, for when the fdlock is still held */
2641 static int
sys_fcntl_out(proc_t p,int fd,struct fileproc * fp,int error)2642 sys_fcntl_out(proc_t p, int fd, struct fileproc *fp, int error)
2643 {
2644 	fp_drop(p, fd, fp, 1);
2645 	proc_fdunlock(p);
2646 	return error;
2647 }
2648 
2649 /* cleanup code common to fnctl acting on vnodes, once they unlocked the fdlock */
2650 static int
sys_fcntl_outdrop(proc_t p,int fd,struct fileproc * fp,struct vnode * vp,int error)2651 sys_fcntl_outdrop(proc_t p, int fd, struct fileproc *fp, struct vnode *vp, int error)
2652 {
2653 #pragma unused(vp)
2654 
2655 	AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1);
2656 	fp_drop(p, fd, fp, 0);
2657 	return error;
2658 }
2659 
2660 typedef int (*sys_fnctl_handler_t)(proc_t p, int fd, int cmd, user_long_t arg,
2661     struct fileproc *fp, int32_t *retval);
2662 
2663 typedef int (*sys_fnctl_vnode_handler_t)(proc_t p, int fd, int cmd,
2664     user_long_t arg, struct fileproc *fp, struct vnode *vp, int32_t *retval);
2665 
2666 /*
2667  * SPI (private) for opening a file starting from a dir fd
2668  *
2669  * Note: do not inline to keep stack usage under control.
2670  */
2671 __attribute__((noinline))
2672 static int
sys_fcntl__OPENFROM(proc_t p,int fd,int cmd,user_long_t arg,struct fileproc * fp,struct vnode * vp,int32_t * retval)2673 sys_fcntl__OPENFROM(proc_t p, int fd, int cmd, user_long_t arg,
2674     struct fileproc *fp, struct vnode *vp, int32_t *retval)
2675 {
2676 #pragma unused(cmd)
2677 
2678 	user_addr_t argp = sys_fnctl_parse_arg(p, arg);
2679 	struct user_fopenfrom fopen;
2680 	struct vnode_attr *va;
2681 	struct nameidata *nd;
2682 	int error, cmode;
2683 	bool has_entitlement;
2684 
2685 	/* Check if this isn't a valid file descriptor */
2686 	if ((fp->f_flag & FREAD) == 0) {
2687 		return sys_fcntl_out(p, fd, fp, EBADF);
2688 	}
2689 	proc_fdunlock(p);
2690 
2691 	if (vnode_getwithref(vp)) {
2692 		error = ENOENT;
2693 		goto outdrop;
2694 	}
2695 
2696 	/* Only valid for directories */
2697 	if (vp->v_type != VDIR) {
2698 		vnode_put(vp);
2699 		error = ENOTDIR;
2700 		goto outdrop;
2701 	}
2702 
2703 	/*
2704 	 * Only entitled apps may use the credentials of the thread
2705 	 * that opened the file descriptor.
2706 	 * Non-entitled threads will use their own context.
2707 	 */
2708 	has_entitlement = IOCurrentTaskHasEntitlement(ACCOUNT_OPENFROM_ENTITLEMENT);
2709 
2710 	/* Get flags, mode and pathname arguments. */
2711 	if (IS_64BIT_PROCESS(p)) {
2712 		error = copyin(argp, &fopen, sizeof(fopen));
2713 	} else {
2714 		struct user32_fopenfrom fopen32;
2715 
2716 		error = copyin(argp, &fopen32, sizeof(fopen32));
2717 		fopen.o_flags = fopen32.o_flags;
2718 		fopen.o_mode = fopen32.o_mode;
2719 		fopen.o_pathname = CAST_USER_ADDR_T(fopen32.o_pathname);
2720 	}
2721 	if (error) {
2722 		vnode_put(vp);
2723 		goto outdrop;
2724 	}
2725 
2726 	/* open1() can have really deep stacks, so allocate those */
2727 	va = kalloc_type(struct vnode_attr, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2728 	nd = kalloc_type(struct nameidata, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2729 
2730 	AUDIT_ARG(fflags, fopen.o_flags);
2731 	AUDIT_ARG(mode, fopen.o_mode);
2732 	VATTR_INIT(va);
2733 	/* Mask off all but regular access permissions */
2734 	cmode = ((fopen.o_mode & ~p->p_fd.fd_cmask) & ALLPERMS) & ~S_ISTXT;
2735 	VATTR_SET(va, va_mode, cmode & ACCESSPERMS);
2736 
2737 	SYS_FCNTL_DECLARE_VFS_CONTEXT(context);
2738 
2739 	/* Start the lookup relative to the file descriptor's vnode. */
2740 	NDINIT(nd, LOOKUP, OP_OPEN, USEDVP | FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
2741 	    fopen.o_pathname, has_entitlement ? &context : vfs_context_current());
2742 	nd->ni_dvp = vp;
2743 
2744 	error = open1(has_entitlement ? &context : vfs_context_current(),
2745 	    nd, fopen.o_flags, va, NULL, NULL, retval, AUTH_OPEN_NOAUTHFD);
2746 
2747 	kfree_type(struct vnode_attr, va);
2748 	kfree_type(struct nameidata, nd);
2749 
2750 	vnode_put(vp);
2751 
2752 outdrop:
2753 	return sys_fcntl_outdrop(p, fd, fp, vp, error);
2754 }
2755 
2756 int
sys_fcntl_nocancel(proc_t p,struct fcntl_nocancel_args * uap,int32_t * retval)2757 sys_fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval)
2758 {
2759 	int fd = uap->fd;
2760 	int cmd = uap->cmd;
2761 	struct fileproc *fp;
2762 	struct vnode *vp = NULLVP;      /* for AUDIT_ARG() at end */
2763 	unsigned int oflags, nflags;
2764 	int i, tmp, error, error2, flg = 0;
2765 	struct flock fl = {};
2766 	struct flocktimeout fltimeout;
2767 	struct timespec *timeout = NULL;
2768 	off_t offset;
2769 	int newmin;
2770 	daddr64_t lbn, bn;
2771 	unsigned int fflag;
2772 	user_addr_t argp;
2773 	boolean_t is64bit;
2774 	int has_entitlement = 0;
2775 	kauth_cred_t p_cred;
2776 	cs_blob_add_flags_t csblob_add_flags = 0;
2777 
2778 	AUDIT_ARG(fd, uap->fd);
2779 	AUDIT_ARG(cmd, uap->cmd);
2780 
2781 	proc_fdlock(p);
2782 	if ((error = fp_lookup(p, fd, &fp, 1))) {
2783 		proc_fdunlock(p);
2784 		return error;
2785 	}
2786 
2787 	SYS_FCNTL_DECLARE_VFS_CONTEXT(context);
2788 
2789 	is64bit = proc_is64bit(p);
2790 	if (is64bit) {
2791 		argp = uap->arg;
2792 	} else {
2793 		/*
2794 		 * Since the arg parameter is defined as a long but may be
2795 		 * either a long or a pointer we must take care to handle
2796 		 * sign extension issues.  Our sys call munger will sign
2797 		 * extend a long when we are called from a 32-bit process.
2798 		 * Since we can never have an address greater than 32-bits
2799 		 * from a 32-bit process we lop off the top 32-bits to avoid
2800 		 * getting the wrong address
2801 		 */
2802 		argp = CAST_USER_ADDR_T((uint32_t)uap->arg);
2803 	}
2804 
2805 #if CONFIG_MACF
2806 	error = mac_file_check_fcntl(kauth_cred_get(), fp->fp_glob, cmd, uap->arg);
2807 	if (error) {
2808 		goto out;
2809 	}
2810 #endif
2811 
2812 	switch (cmd) {
2813 	case F_DUPFD:
2814 	case F_DUPFD_CLOEXEC:
2815 		if (fp_isguarded(fp, GUARD_DUP)) {
2816 			error = fp_guard_exception(p, fd, fp, kGUARD_EXC_DUP);
2817 			goto out;
2818 		}
2819 		newmin = CAST_DOWN_EXPLICIT(int, uap->arg); /* arg is an int, so we won't lose bits */
2820 		AUDIT_ARG(value32, newmin);
2821 		if (newmin < 0 || newmin >= proc_limitgetcur_nofile(p)) {
2822 			error = EINVAL;
2823 			goto out;
2824 		}
2825 		if ((error = fdalloc(p, newmin, &i))) {
2826 			goto out;
2827 		}
2828 		p_cred = current_cached_proc_cred(p);
2829 		error = finishdup(p, p_cred, fd, i,
2830 		    cmd == F_DUPFD_CLOEXEC ? FP_CLOEXEC : 0, retval);
2831 		goto out;
2832 
2833 	case F_GETFD:
2834 		*retval = (fp->fp_flags & FP_CLOEXEC) ? FD_CLOEXEC : 0;
2835 		error = 0;
2836 		goto out;
2837 
2838 	case F_SETFD:
2839 		AUDIT_ARG(value32, (uint32_t)uap->arg);
2840 		if (uap->arg & FD_CLOEXEC) {
2841 			fp->fp_flags |= FP_CLOEXEC;
2842 			error = 0;
2843 		} else if (!fp->fp_guard_attrs) {
2844 			fp->fp_flags &= ~FP_CLOEXEC;
2845 			error = 0;
2846 		} else {
2847 			error = fp_guard_exception(p,
2848 			    fd, fp, kGUARD_EXC_NOCLOEXEC);
2849 		}
2850 		goto out;
2851 
2852 	case F_GETFL:
2853 		fflag = fp->f_flag;
2854 		if ((fflag & O_EVTONLY) && proc_disallow_rw_for_o_evtonly(p)) {
2855 			/*
2856 			 * We insert back F_READ so that conversion back to open flags with
2857 			 * OFLAGS() will come out right. We only need to set 'FREAD' as the
2858 			 * 'O_RDONLY' is always implied.
2859 			 */
2860 			fflag |= FREAD;
2861 		}
2862 		*retval = OFLAGS(fflag);
2863 		error = 0;
2864 		goto out;
2865 
2866 	case F_SETFL:
2867 		// FIXME (rdar://54898652)
2868 		//
2869 		// this code is broken if fnctl(F_SETFL), ioctl() are
2870 		// called concurrently for the same fileglob.
2871 
2872 		tmp = CAST_DOWN_EXPLICIT(int, uap->arg); /* arg is an int, so we won't lose bits */
2873 		AUDIT_ARG(value32, tmp);
2874 
2875 		os_atomic_rmw_loop(&fp->f_flag, oflags, nflags, relaxed, {
2876 			nflags  = oflags & ~FCNTLFLAGS;
2877 			nflags |= FFLAGS(tmp) & FCNTLFLAGS;
2878 		});
2879 		tmp = nflags & FNONBLOCK;
2880 		error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
2881 		if (error) {
2882 			goto out;
2883 		}
2884 		tmp = nflags & FASYNC;
2885 		error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
2886 		if (!error) {
2887 			goto out;
2888 		}
2889 		os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed);
2890 		tmp = 0;
2891 		(void)fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
2892 		goto out;
2893 
2894 	case F_GETOWN:
2895 		if (fp->f_type == DTYPE_SOCKET) {
2896 			*retval = ((struct socket *)fp_get_data(fp))->so_pgid;
2897 			error = 0;
2898 			goto out;
2899 		}
2900 		error = fo_ioctl(fp, TIOCGPGRP, (caddr_t)retval, &context);
2901 		*retval = -*retval;
2902 		goto out;
2903 
2904 	case F_SETOWN:
2905 		tmp = CAST_DOWN_EXPLICIT(pid_t, uap->arg); /* arg is an int, so we won't lose bits */
2906 		AUDIT_ARG(value32, tmp);
2907 		if (fp->f_type == DTYPE_SOCKET) {
2908 			((struct socket *)fp_get_data(fp))->so_pgid = tmp;
2909 			error = 0;
2910 			goto out;
2911 		}
2912 		if (fp->f_type == DTYPE_PIPE) {
2913 			error =  fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
2914 			goto out;
2915 		}
2916 
2917 		if (tmp <= 0) {
2918 			tmp = -tmp;
2919 		} else {
2920 			proc_t p1 = proc_find(tmp);
2921 			if (p1 == 0) {
2922 				error = ESRCH;
2923 				goto out;
2924 			}
2925 			tmp = (int)p1->p_pgrpid;
2926 			proc_rele(p1);
2927 		}
2928 		error =  fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
2929 		goto out;
2930 
2931 	case F_SETNOSIGPIPE:
2932 		tmp = CAST_DOWN_EXPLICIT(int, uap->arg);
2933 		if (fp->f_type == DTYPE_SOCKET) {
2934 #if SOCKETS
2935 			error = sock_setsockopt((struct socket *)fp_get_data(fp),
2936 			    SOL_SOCKET, SO_NOSIGPIPE, &tmp, sizeof(tmp));
2937 #else
2938 			error = EINVAL;
2939 #endif
2940 		} else {
2941 			struct fileglob *fg = fp->fp_glob;
2942 
2943 			lck_mtx_lock_spin(&fg->fg_lock);
2944 			if (tmp) {
2945 				fg->fg_lflags |= FG_NOSIGPIPE;
2946 			} else {
2947 				fg->fg_lflags &= ~FG_NOSIGPIPE;
2948 			}
2949 			lck_mtx_unlock(&fg->fg_lock);
2950 			error = 0;
2951 		}
2952 		goto out;
2953 
2954 	case F_GETNOSIGPIPE:
2955 		if (fp->f_type == DTYPE_SOCKET) {
2956 #if SOCKETS
2957 			int retsize = sizeof(*retval);
2958 			error = sock_getsockopt((struct socket *)fp_get_data(fp),
2959 			    SOL_SOCKET, SO_NOSIGPIPE, retval, &retsize);
2960 #else
2961 			error = EINVAL;
2962 #endif
2963 		} else {
2964 			*retval = (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) ?
2965 			    1 : 0;
2966 			error = 0;
2967 		}
2968 		goto out;
2969 
2970 	case F_SETCONFINED:
2971 		/*
2972 		 * If this is the only reference to this fglob in the process
2973 		 * and it's already marked as close-on-fork then mark it as
2974 		 * (immutably) "confined" i.e. any fd that points to it will
2975 		 * forever be close-on-fork, and attempts to use an IPC
2976 		 * mechanism to move the descriptor elsewhere will fail.
2977 		 */
2978 		if (CAST_DOWN_EXPLICIT(int, uap->arg)) {
2979 			struct fileglob *fg = fp->fp_glob;
2980 
2981 			lck_mtx_lock_spin(&fg->fg_lock);
2982 			if (fg->fg_lflags & FG_CONFINED) {
2983 				error = 0;
2984 			} else if (1 != os_ref_get_count_raw(&fg->fg_count)) {
2985 				error = EAGAIN; /* go close the dup .. */
2986 			} else if (fp->fp_flags & FP_CLOFORK) {
2987 				fg->fg_lflags |= FG_CONFINED;
2988 				error = 0;
2989 			} else {
2990 				error = EBADF;  /* open without O_CLOFORK? */
2991 			}
2992 			lck_mtx_unlock(&fg->fg_lock);
2993 		} else {
2994 			/*
2995 			 * Other subsystems may have built on the immutability
2996 			 * of FG_CONFINED; clearing it may be tricky.
2997 			 */
2998 			error = EPERM;          /* immutable */
2999 		}
3000 		goto out;
3001 
3002 	case F_GETCONFINED:
3003 		*retval = (fp->fp_glob->fg_lflags & FG_CONFINED) ? 1 : 0;
3004 		error = 0;
3005 		goto out;
3006 
3007 	case F_SETLKWTIMEOUT:
3008 	case F_SETLKW:
3009 	case F_OFD_SETLKWTIMEOUT:
3010 	case F_OFD_SETLKW:
3011 		flg |= F_WAIT;
3012 		OS_FALLTHROUGH;
3013 
3014 	case F_SETLK:
3015 	case F_OFD_SETLK:
3016 		if (fp->f_type != DTYPE_VNODE) {
3017 			error = EBADF;
3018 			goto out;
3019 		}
3020 		vp = (struct vnode *)fp_get_data(fp);
3021 
3022 		fflag = fp->f_flag;
3023 		offset = fp->f_offset;
3024 		proc_fdunlock(p);
3025 
3026 		/* Copy in the lock structure */
3027 		if (F_SETLKWTIMEOUT == cmd || F_OFD_SETLKWTIMEOUT == cmd) {
3028 			error = copyin(argp, (caddr_t) &fltimeout, sizeof(fltimeout));
3029 			if (error) {
3030 				goto outdrop;
3031 			}
3032 			fl = fltimeout.fl;
3033 			timeout = &fltimeout.timeout;
3034 		} else {
3035 			error = copyin(argp, (caddr_t)&fl, sizeof(fl));
3036 			if (error) {
3037 				goto outdrop;
3038 			}
3039 		}
3040 
3041 		/* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */
3042 		/* and ending byte for EOVERFLOW in SEEK_SET */
3043 		error = check_file_seek_range(&fl, offset);
3044 		if (error) {
3045 			goto outdrop;
3046 		}
3047 
3048 		if ((error = vnode_getwithref(vp))) {
3049 			goto outdrop;
3050 		}
3051 		if (fl.l_whence == SEEK_CUR) {
3052 			fl.l_start += offset;
3053 		}
3054 
3055 #if CONFIG_MACF
3056 		error = mac_file_check_lock(kauth_cred_get(), fp->fp_glob,
3057 		    F_SETLK, &fl);
3058 		if (error) {
3059 			(void)vnode_put(vp);
3060 			goto outdrop;
3061 		}
3062 #endif
3063 
3064 #if CONFIG_FILE_LEASES
3065 		(void)vnode_breaklease(vp, O_WRONLY, vfs_context_current());
3066 #endif
3067 
3068 		switch (cmd) {
3069 		case F_OFD_SETLK:
3070 		case F_OFD_SETLKW:
3071 		case F_OFD_SETLKWTIMEOUT:
3072 			flg |= F_OFD_LOCK;
3073 			if (fp->fp_glob->fg_lflags & FG_CONFINED) {
3074 				flg |= F_CONFINED;
3075 			}
3076 			switch (fl.l_type) {
3077 			case F_RDLCK:
3078 				if ((fflag & FREAD) == 0) {
3079 					error = EBADF;
3080 					break;
3081 				}
3082 				error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3083 				    F_SETLK, &fl, flg, &context, timeout);
3084 				break;
3085 			case F_WRLCK:
3086 				if ((fflag & FWRITE) == 0) {
3087 					error = EBADF;
3088 					break;
3089 				}
3090 				error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3091 				    F_SETLK, &fl, flg, &context, timeout);
3092 				break;
3093 			case F_UNLCK:
3094 				error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3095 				    F_UNLCK, &fl, F_OFD_LOCK, &context,
3096 				    timeout);
3097 				break;
3098 			default:
3099 				error = EINVAL;
3100 				break;
3101 			}
3102 			if (0 == error &&
3103 			    (F_RDLCK == fl.l_type || F_WRLCK == fl.l_type)) {
3104 				struct fileglob *fg = fp->fp_glob;
3105 
3106 				/*
3107 				 * arrange F_UNLCK on last close (once
3108 				 * set, FG_HAS_OFDLOCK is immutable)
3109 				 */
3110 				if ((fg->fg_lflags & FG_HAS_OFDLOCK) == 0) {
3111 					lck_mtx_lock_spin(&fg->fg_lock);
3112 					fg->fg_lflags |= FG_HAS_OFDLOCK;
3113 					lck_mtx_unlock(&fg->fg_lock);
3114 				}
3115 			}
3116 			break;
3117 		default:
3118 			flg |= F_POSIX;
3119 			switch (fl.l_type) {
3120 			case F_RDLCK:
3121 				if ((fflag & FREAD) == 0) {
3122 					error = EBADF;
3123 					break;
3124 				}
3125 				// XXX UInt32 unsafe for LP64 kernel
3126 				os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed);
3127 				error = VNOP_ADVLOCK(vp, (caddr_t)p,
3128 				    F_SETLK, &fl, flg, &context, timeout);
3129 				break;
3130 			case F_WRLCK:
3131 				if ((fflag & FWRITE) == 0) {
3132 					error = EBADF;
3133 					break;
3134 				}
3135 				// XXX UInt32 unsafe for LP64 kernel
3136 				os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed);
3137 				error = VNOP_ADVLOCK(vp, (caddr_t)p,
3138 				    F_SETLK, &fl, flg, &context, timeout);
3139 				break;
3140 			case F_UNLCK:
3141 				error = VNOP_ADVLOCK(vp, (caddr_t)p,
3142 				    F_UNLCK, &fl, F_POSIX, &context, timeout);
3143 				break;
3144 			default:
3145 				error = EINVAL;
3146 				break;
3147 			}
3148 			break;
3149 		}
3150 		(void) vnode_put(vp);
3151 		goto outdrop;
3152 
3153 	case F_GETLK:
3154 	case F_OFD_GETLK:
3155 	case F_GETLKPID:
3156 	case F_OFD_GETLKPID:
3157 		if (fp->f_type != DTYPE_VNODE) {
3158 			error = EBADF;
3159 			goto out;
3160 		}
3161 		vp = (struct vnode *)fp_get_data(fp);
3162 
3163 		offset = fp->f_offset;
3164 		proc_fdunlock(p);
3165 
3166 		/* Copy in the lock structure */
3167 		error = copyin(argp, (caddr_t)&fl, sizeof(fl));
3168 		if (error) {
3169 			goto outdrop;
3170 		}
3171 
3172 		/* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */
3173 		/* and ending byte for EOVERFLOW in SEEK_SET */
3174 		error = check_file_seek_range(&fl, offset);
3175 		if (error) {
3176 			goto outdrop;
3177 		}
3178 
3179 		if ((fl.l_whence == SEEK_SET) && (fl.l_start < 0)) {
3180 			error = EINVAL;
3181 			goto outdrop;
3182 		}
3183 
3184 		switch (fl.l_type) {
3185 		case F_RDLCK:
3186 		case F_UNLCK:
3187 		case F_WRLCK:
3188 			break;
3189 		default:
3190 			error = EINVAL;
3191 			goto outdrop;
3192 		}
3193 
3194 		switch (fl.l_whence) {
3195 		case SEEK_CUR:
3196 		case SEEK_SET:
3197 		case SEEK_END:
3198 			break;
3199 		default:
3200 			error = EINVAL;
3201 			goto outdrop;
3202 		}
3203 
3204 		if ((error = vnode_getwithref(vp)) == 0) {
3205 			if (fl.l_whence == SEEK_CUR) {
3206 				fl.l_start += offset;
3207 			}
3208 
3209 #if CONFIG_MACF
3210 			error = mac_file_check_lock(kauth_cred_get(), fp->fp_glob,
3211 			    cmd, &fl);
3212 			if (error == 0)
3213 #endif
3214 			switch (cmd) {
3215 			case F_OFD_GETLK:
3216 				error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3217 				    F_GETLK, &fl, F_OFD_LOCK, &context, NULL);
3218 				break;
3219 			case F_OFD_GETLKPID:
3220 				error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3221 				    F_GETLKPID, &fl, F_OFD_LOCK, &context, NULL);
3222 				break;
3223 			default:
3224 				error = VNOP_ADVLOCK(vp, (caddr_t)p,
3225 				    cmd, &fl, F_POSIX, &context, NULL);
3226 				break;
3227 			}
3228 
3229 			(void)vnode_put(vp);
3230 
3231 			if (error == 0) {
3232 				error = copyout((caddr_t)&fl, argp, sizeof(fl));
3233 			}
3234 		}
3235 		goto outdrop;
3236 
3237 	case F_PREALLOCATE: {
3238 		fstore_t alloc_struct;    /* structure for allocate command */
3239 		u_int32_t alloc_flags = 0;
3240 
3241 		if (fp->f_type != DTYPE_VNODE) {
3242 			error = EBADF;
3243 			goto out;
3244 		}
3245 
3246 		vp = (struct vnode *)fp_get_data(fp);
3247 		proc_fdunlock(p);
3248 
3249 		/* make sure that we have write permission */
3250 		if ((fp->f_flag & FWRITE) == 0) {
3251 			error = EBADF;
3252 			goto outdrop;
3253 		}
3254 
3255 		error = copyin(argp, (caddr_t)&alloc_struct, sizeof(alloc_struct));
3256 		if (error) {
3257 			goto outdrop;
3258 		}
3259 
3260 		/* now set the space allocated to 0 */
3261 		alloc_struct.fst_bytesalloc = 0;
3262 
3263 		/*
3264 		 * Do some simple parameter checking
3265 		 */
3266 
3267 		/* set up the flags */
3268 
3269 		alloc_flags |= PREALLOCATE;
3270 
3271 		if (alloc_struct.fst_flags & F_ALLOCATECONTIG) {
3272 			alloc_flags |= ALLOCATECONTIG;
3273 		}
3274 
3275 		if (alloc_struct.fst_flags & F_ALLOCATEALL) {
3276 			alloc_flags |= ALLOCATEALL;
3277 		}
3278 
3279 		if (alloc_struct.fst_flags & F_ALLOCATEPERSIST) {
3280 			alloc_flags |= ALLOCATEPERSIST;
3281 		}
3282 
3283 		/*
3284 		 * Do any position mode specific stuff.  The only
3285 		 * position mode  supported now is PEOFPOSMODE
3286 		 */
3287 
3288 		switch (alloc_struct.fst_posmode) {
3289 		case F_PEOFPOSMODE:
3290 			if (alloc_struct.fst_offset != 0) {
3291 				error = EINVAL;
3292 				goto outdrop;
3293 			}
3294 
3295 			alloc_flags |= ALLOCATEFROMPEOF;
3296 			break;
3297 
3298 		case F_VOLPOSMODE:
3299 			if (alloc_struct.fst_offset <= 0) {
3300 				error = EINVAL;
3301 				goto outdrop;
3302 			}
3303 
3304 			alloc_flags |= ALLOCATEFROMVOL;
3305 			break;
3306 
3307 		default: {
3308 			error = EINVAL;
3309 			goto outdrop;
3310 		}
3311 		}
3312 		if ((error = vnode_getwithref(vp)) == 0) {
3313 			/*
3314 			 * call allocate to get the space
3315 			 */
3316 			error = VNOP_ALLOCATE(vp, alloc_struct.fst_length, alloc_flags,
3317 			    &alloc_struct.fst_bytesalloc, alloc_struct.fst_offset,
3318 			    &context);
3319 			(void)vnode_put(vp);
3320 
3321 			error2 = copyout((caddr_t)&alloc_struct, argp, sizeof(alloc_struct));
3322 
3323 			if (error == 0) {
3324 				error = error2;
3325 			}
3326 		}
3327 		goto outdrop;
3328 	}
3329 	case F_PUNCHHOLE: {
3330 		fpunchhole_t args;
3331 
3332 		if (fp->f_type != DTYPE_VNODE) {
3333 			error = EBADF;
3334 			goto out;
3335 		}
3336 
3337 		vp = (struct vnode *)fp_get_data(fp);
3338 		proc_fdunlock(p);
3339 
3340 		/* need write permissions */
3341 		if ((fp->f_flag & FWRITE) == 0) {
3342 			error = EPERM;
3343 			goto outdrop;
3344 		}
3345 
3346 		if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3347 			goto outdrop;
3348 		}
3349 
3350 		if ((error = vnode_getwithref(vp))) {
3351 			goto outdrop;
3352 		}
3353 
3354 #if CONFIG_MACF
3355 		if ((error = mac_vnode_check_write(&context, fp->fp_glob->fg_cred, vp))) {
3356 			(void)vnode_put(vp);
3357 			goto outdrop;
3358 		}
3359 #endif
3360 
3361 		error = VNOP_IOCTL(vp, F_PUNCHHOLE, (caddr_t)&args, 0, &context);
3362 		(void)vnode_put(vp);
3363 
3364 		goto outdrop;
3365 	}
3366 	case F_TRIM_ACTIVE_FILE: {
3367 		ftrimactivefile_t args;
3368 
3369 		if (priv_check_cred(kauth_cred_get(), PRIV_TRIM_ACTIVE_FILE, 0)) {
3370 			error = EACCES;
3371 			goto out;
3372 		}
3373 
3374 		if (fp->f_type != DTYPE_VNODE) {
3375 			error = EBADF;
3376 			goto out;
3377 		}
3378 
3379 		vp = (struct vnode *)fp_get_data(fp);
3380 		proc_fdunlock(p);
3381 
3382 		/* need write permissions */
3383 		if ((fp->f_flag & FWRITE) == 0) {
3384 			error = EPERM;
3385 			goto outdrop;
3386 		}
3387 
3388 		if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3389 			goto outdrop;
3390 		}
3391 
3392 		if ((error = vnode_getwithref(vp))) {
3393 			goto outdrop;
3394 		}
3395 
3396 		error = VNOP_IOCTL(vp, F_TRIM_ACTIVE_FILE, (caddr_t)&args, 0, &context);
3397 		(void)vnode_put(vp);
3398 
3399 		goto outdrop;
3400 	}
3401 	case F_SPECULATIVE_READ: {
3402 		fspecread_t args;
3403 		off_t temp_length = 0;
3404 
3405 		if (fp->f_type != DTYPE_VNODE) {
3406 			error = EBADF;
3407 			goto out;
3408 		}
3409 
3410 		vp = (struct vnode *)fp_get_data(fp);
3411 		proc_fdunlock(p);
3412 
3413 		if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3414 			goto outdrop;
3415 		}
3416 
3417 		/* Discard invalid offsets or lengths */
3418 		if ((args.fsr_offset < 0) || (args.fsr_length < 0)) {
3419 			error = EINVAL;
3420 			goto outdrop;
3421 		}
3422 
3423 		/*
3424 		 * Round the file offset down to a page-size boundary (or to 0).
3425 		 * The filesystem will need to round the length up to the end of the page boundary
3426 		 * or to the EOF of the file.
3427 		 */
3428 		uint64_t foff = (((uint64_t)args.fsr_offset) & ~((uint64_t)PAGE_MASK));
3429 		uint64_t foff_delta = args.fsr_offset - foff;
3430 		args.fsr_offset = (off_t) foff;
3431 
3432 		/*
3433 		 * Now add in the delta to the supplied length. Since we may have adjusted the
3434 		 * offset, increase it by the amount that we adjusted.
3435 		 */
3436 		if (os_add_overflow(args.fsr_length, foff_delta, &args.fsr_length)) {
3437 			error = EOVERFLOW;
3438 			goto outdrop;
3439 		}
3440 
3441 		/*
3442 		 * Make sure (fsr_offset + fsr_length) does not overflow.
3443 		 */
3444 		if (os_add_overflow(args.fsr_offset, args.fsr_length, &temp_length)) {
3445 			error = EOVERFLOW;
3446 			goto outdrop;
3447 		}
3448 
3449 		if ((error = vnode_getwithref(vp))) {
3450 			goto outdrop;
3451 		}
3452 		error = VNOP_IOCTL(vp, F_SPECULATIVE_READ, (caddr_t)&args, 0, &context);
3453 		(void)vnode_put(vp);
3454 
3455 		goto outdrop;
3456 	}
3457 	case F_ATTRIBUTION_TAG: {
3458 		fattributiontag_t args;
3459 
3460 		if (fp->f_type != DTYPE_VNODE) {
3461 			error = EBADF;
3462 			goto out;
3463 		}
3464 
3465 		vp = (struct vnode *)fp_get_data(fp);
3466 		proc_fdunlock(p);
3467 
3468 		if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3469 			goto outdrop;
3470 		}
3471 
3472 		if ((error = vnode_getwithref(vp))) {
3473 			goto outdrop;
3474 		}
3475 
3476 		error = VNOP_IOCTL(vp, F_ATTRIBUTION_TAG, (caddr_t)&args, 0, &context);
3477 		(void)vnode_put(vp);
3478 
3479 		if (error == 0) {
3480 			error = copyout((caddr_t)&args, argp, sizeof(args));
3481 		}
3482 
3483 		goto outdrop;
3484 	}
3485 	case F_SETSIZE: {
3486 		struct vnode_attr va;
3487 
3488 		if (fp->f_type != DTYPE_VNODE) {
3489 			error = EBADF;
3490 			goto out;
3491 		}
3492 
3493 		if ((fp->fp_glob->fg_flag & FWRITE) == 0) {
3494 			error = EBADF;
3495 			goto out;
3496 		}
3497 		vp = (struct vnode *)fp_get_data(fp);
3498 		proc_fdunlock(p);
3499 
3500 		error = copyin(argp, (caddr_t)&offset, sizeof(off_t));
3501 		if (error) {
3502 			goto outdrop;
3503 		}
3504 		AUDIT_ARG(value64, offset);
3505 
3506 		error = vnode_getwithref(vp);
3507 		if (error) {
3508 			goto outdrop;
3509 		}
3510 
3511 		VATTR_INIT(&va);
3512 		VATTR_WANTED(&va, va_flags);
3513 
3514 		error = vnode_getattr(vp, &va, vfs_context_current());
3515 		if (error) {
3516 			vnode_put(vp);
3517 			goto outdrop;
3518 		}
3519 
3520 		/* Don't allow F_SETSIZE if the file has append-only flag set. */
3521 		if (va.va_flags & APPEND) {
3522 			error = EPERM;
3523 			vnode_put(vp);
3524 			goto outdrop;
3525 		}
3526 
3527 #if CONFIG_MACF
3528 		error = mac_vnode_check_truncate(&context,
3529 		    fp->fp_glob->fg_cred, vp);
3530 		if (error) {
3531 			(void)vnode_put(vp);
3532 			goto outdrop;
3533 		}
3534 #endif
3535 		/*
3536 		 * Make sure that we are root.  Growing a file
3537 		 * without zero filling the data is a security hole.
3538 		 */
3539 		if (!kauth_cred_issuser(kauth_cred_get())) {
3540 			error = EACCES;
3541 		} else {
3542 			/*
3543 			 * Require privilege to change file size without zerofill,
3544 			 * else will change the file size and zerofill it.
3545 			 */
3546 			error = priv_check_cred(kauth_cred_get(), PRIV_VFS_SETSIZE, 0);
3547 			if (error == 0) {
3548 				error = vnode_setsize(vp, offset, IO_NOZEROFILL, &context);
3549 			} else {
3550 				error = vnode_setsize(vp, offset, 0, &context);
3551 			}
3552 
3553 #if CONFIG_MACF
3554 			if (error == 0) {
3555 				mac_vnode_notify_truncate(&context, fp->fp_glob->fg_cred, vp);
3556 			}
3557 #endif
3558 		}
3559 
3560 		(void)vnode_put(vp);
3561 		goto outdrop;
3562 	}
3563 
3564 	case F_RDAHEAD:
3565 		if (fp->f_type != DTYPE_VNODE) {
3566 			error = EBADF;
3567 			goto out;
3568 		}
3569 		if (uap->arg) {
3570 			os_atomic_andnot(&fp->fp_glob->fg_flag, FNORDAHEAD, relaxed);
3571 		} else {
3572 			os_atomic_or(&fp->fp_glob->fg_flag, FNORDAHEAD, relaxed);
3573 		}
3574 		goto out;
3575 
3576 	case F_NOCACHE:
3577 	case F_NOCACHE_EXT:
3578 		if ((fp->f_type != DTYPE_VNODE) || (cmd == F_NOCACHE_EXT &&
3579 		    (vnode_vtype((struct vnode *)fp_get_data(fp)) != VREG))) {
3580 			error = EBADF;
3581 			goto out;
3582 		}
3583 		if (uap->arg) {
3584 			os_atomic_or(&fp->fp_glob->fg_flag, FNOCACHE, relaxed);
3585 			if (cmd == F_NOCACHE_EXT) {
3586 				/*
3587 				 * We're reusing the O_NOCTTY bit for this purpose as it is only
3588 				 * used for open(2) and is mutually exclusive with a regular file.
3589 				 */
3590 				os_atomic_or(&fp->fp_glob->fg_flag, O_NOCTTY, relaxed);
3591 			}
3592 		} else {
3593 			os_atomic_andnot(&fp->fp_glob->fg_flag, FNOCACHE | O_NOCTTY, relaxed);
3594 		}
3595 		goto out;
3596 
3597 	case F_NODIRECT:
3598 		if (fp->f_type != DTYPE_VNODE) {
3599 			error = EBADF;
3600 			goto out;
3601 		}
3602 		if (uap->arg) {
3603 			os_atomic_or(&fp->fp_glob->fg_flag, FNODIRECT, relaxed);
3604 		} else {
3605 			os_atomic_andnot(&fp->fp_glob->fg_flag, FNODIRECT, relaxed);
3606 		}
3607 		goto out;
3608 
3609 	case F_SINGLE_WRITER:
3610 		if (fp->f_type != DTYPE_VNODE) {
3611 			error = EBADF;
3612 			goto out;
3613 		}
3614 		if (uap->arg) {
3615 			os_atomic_or(&fp->fp_glob->fg_flag, FSINGLE_WRITER, relaxed);
3616 		} else {
3617 			os_atomic_andnot(&fp->fp_glob->fg_flag, FSINGLE_WRITER, relaxed);
3618 		}
3619 		goto out;
3620 
3621 	case F_GLOBAL_NOCACHE:
3622 		if (fp->f_type != DTYPE_VNODE) {
3623 			error = EBADF;
3624 			goto out;
3625 		}
3626 		vp = (struct vnode *)fp_get_data(fp);
3627 		proc_fdunlock(p);
3628 
3629 		if ((error = vnode_getwithref(vp)) == 0) {
3630 			*retval = vnode_isnocache(vp);
3631 
3632 			if (uap->arg) {
3633 				vnode_setnocache(vp);
3634 			} else {
3635 				vnode_clearnocache(vp);
3636 			}
3637 
3638 			(void)vnode_put(vp);
3639 		}
3640 		goto outdrop;
3641 
3642 	case F_CHECK_OPENEVT:
3643 		if (fp->f_type != DTYPE_VNODE) {
3644 			error = EBADF;
3645 			goto out;
3646 		}
3647 		vp = (struct vnode *)fp_get_data(fp);
3648 		proc_fdunlock(p);
3649 
3650 		if ((error = vnode_getwithref(vp)) == 0) {
3651 			*retval = vnode_is_openevt(vp);
3652 
3653 			if (uap->arg) {
3654 				vnode_set_openevt(vp);
3655 			} else {
3656 				vnode_clear_openevt(vp);
3657 			}
3658 
3659 			(void)vnode_put(vp);
3660 		}
3661 		goto outdrop;
3662 
3663 	case F_RDADVISE: {
3664 		struct radvisory ra_struct;
3665 
3666 		if (fp->f_type != DTYPE_VNODE) {
3667 			error = EBADF;
3668 			goto out;
3669 		}
3670 		vp = (struct vnode *)fp_get_data(fp);
3671 		proc_fdunlock(p);
3672 
3673 		if ((error = copyin(argp, (caddr_t)&ra_struct, sizeof(ra_struct)))) {
3674 			goto outdrop;
3675 		}
3676 		if (ra_struct.ra_offset < 0 || ra_struct.ra_count < 0) {
3677 			error = EINVAL;
3678 			goto outdrop;
3679 		}
3680 		if ((error = vnode_getwithref(vp)) == 0) {
3681 			error = VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, &context);
3682 
3683 			(void)vnode_put(vp);
3684 		}
3685 		goto outdrop;
3686 	}
3687 
3688 	case F_FLUSH_DATA:
3689 
3690 		if (fp->f_type != DTYPE_VNODE) {
3691 			error = EBADF;
3692 			goto out;
3693 		}
3694 		vp = (struct vnode *)fp_get_data(fp);
3695 		proc_fdunlock(p);
3696 
3697 		if ((error = vnode_getwithref(vp)) == 0) {
3698 			error = VNOP_FSYNC(vp, MNT_NOWAIT, &context);
3699 
3700 			(void)vnode_put(vp);
3701 		}
3702 		goto outdrop;
3703 
3704 	case F_LOG2PHYS:
3705 	case F_LOG2PHYS_EXT: {
3706 		struct log2phys l2p_struct = {};    /* structure for allocate command */
3707 		int devBlockSize;
3708 
3709 		off_t file_offset = 0;
3710 		size_t a_size = 0;
3711 		size_t run = 0;
3712 
3713 		if (cmd == F_LOG2PHYS_EXT) {
3714 			error = copyin(argp, (caddr_t)&l2p_struct, sizeof(l2p_struct));
3715 			if (error) {
3716 				goto out;
3717 			}
3718 			file_offset = l2p_struct.l2p_devoffset;
3719 		} else {
3720 			file_offset = fp->f_offset;
3721 		}
3722 		if (fp->f_type != DTYPE_VNODE) {
3723 			error = EBADF;
3724 			goto out;
3725 		}
3726 		vp = (struct vnode *)fp_get_data(fp);
3727 		proc_fdunlock(p);
3728 		if ((error = vnode_getwithref(vp))) {
3729 			goto outdrop;
3730 		}
3731 		error = VNOP_OFFTOBLK(vp, file_offset, &lbn);
3732 		if (error) {
3733 			(void)vnode_put(vp);
3734 			goto outdrop;
3735 		}
3736 		error = VNOP_BLKTOOFF(vp, lbn, &offset);
3737 		if (error) {
3738 			(void)vnode_put(vp);
3739 			goto outdrop;
3740 		}
3741 		devBlockSize = vfs_devblocksize(vnode_mount(vp));
3742 		if (cmd == F_LOG2PHYS_EXT) {
3743 			if (l2p_struct.l2p_contigbytes < 0) {
3744 				vnode_put(vp);
3745 				error = EINVAL;
3746 				goto outdrop;
3747 			}
3748 
3749 			a_size = (size_t)MIN((uint64_t)l2p_struct.l2p_contigbytes, SIZE_MAX);
3750 		} else {
3751 			a_size = devBlockSize;
3752 		}
3753 
3754 		error = VNOP_BLOCKMAP(vp, offset, a_size, &bn, &run, NULL, 0, &context);
3755 
3756 		(void)vnode_put(vp);
3757 
3758 		if (!error) {
3759 			l2p_struct.l2p_flags = 0;       /* for now */
3760 			if (cmd == F_LOG2PHYS_EXT) {
3761 				l2p_struct.l2p_contigbytes = run - (file_offset - offset);
3762 			} else {
3763 				l2p_struct.l2p_contigbytes = 0; /* for now */
3764 			}
3765 
3766 			/*
3767 			 * The block number being -1 suggests that the file offset is not backed
3768 			 * by any real blocks on-disk.  As a result, just let it be passed back up wholesale.
3769 			 */
3770 			if (bn == -1) {
3771 				/* Don't multiply it by the block size */
3772 				l2p_struct.l2p_devoffset = bn;
3773 			} else {
3774 				l2p_struct.l2p_devoffset = bn * devBlockSize;
3775 				l2p_struct.l2p_devoffset += file_offset - offset;
3776 			}
3777 			error = copyout((caddr_t)&l2p_struct, argp, sizeof(l2p_struct));
3778 		}
3779 		goto outdrop;
3780 	}
3781 	case F_GETPATH:
3782 	case F_GETPATH_NOFIRMLINK: {
3783 		char *pathbufp;
3784 		size_t pathlen;
3785 
3786 		if (fp->f_type != DTYPE_VNODE) {
3787 			error = EBADF;
3788 			goto out;
3789 		}
3790 		vp = (struct vnode *)fp_get_data(fp);
3791 		proc_fdunlock(p);
3792 
3793 		pathlen = MAXPATHLEN;
3794 		pathbufp = zalloc(ZV_NAMEI);
3795 
3796 		if ((error = vnode_getwithref(vp)) == 0) {
3797 			error = vn_getpath_ext(vp, NULL, pathbufp,
3798 			    &pathlen, cmd == F_GETPATH_NOFIRMLINK ?
3799 			    VN_GETPATH_NO_FIRMLINK : 0);
3800 			(void)vnode_put(vp);
3801 
3802 			if (error == 0) {
3803 				error = copyout((caddr_t)pathbufp, argp, pathlen);
3804 			}
3805 		}
3806 		zfree(ZV_NAMEI, pathbufp);
3807 		goto outdrop;
3808 	}
3809 
3810 	case F_PATHPKG_CHECK: {
3811 		char *pathbufp;
3812 		size_t pathlen;
3813 
3814 		if (fp->f_type != DTYPE_VNODE) {
3815 			error = EBADF;
3816 			goto out;
3817 		}
3818 		vp = (struct vnode *)fp_get_data(fp);
3819 		proc_fdunlock(p);
3820 
3821 		pathlen = MAXPATHLEN;
3822 		pathbufp = zalloc(ZV_NAMEI);
3823 
3824 		if ((error = copyinstr(argp, pathbufp, MAXPATHLEN, &pathlen)) == 0) {
3825 			if ((error = vnode_getwithref(vp)) == 0) {
3826 				AUDIT_ARG(text, pathbufp);
3827 				error = vn_path_package_check(vp, pathbufp, (int)pathlen, retval);
3828 
3829 				(void)vnode_put(vp);
3830 			}
3831 		}
3832 		zfree(ZV_NAMEI, pathbufp);
3833 		goto outdrop;
3834 	}
3835 
3836 	case F_CHKCLEAN:   // used by regression tests to see if all dirty pages got cleaned by fsync()
3837 	case F_FULLFSYNC:  // fsync + flush the journal + DKIOCSYNCHRONIZE
3838 	case F_BARRIERFSYNC:  // fsync + barrier
3839 	case F_FREEZE_FS:  // freeze all other fs operations for the fs of this fd
3840 	case F_THAW_FS: {  // thaw all frozen fs operations for the fs of this fd
3841 		if (fp->f_type != DTYPE_VNODE) {
3842 			error = EBADF;
3843 			goto out;
3844 		}
3845 		vp = (struct vnode *)fp_get_data(fp);
3846 		proc_fdunlock(p);
3847 
3848 		if ((error = vnode_getwithref(vp)) == 0) {
3849 			if ((cmd == F_BARRIERFSYNC) &&
3850 			    (vp->v_mount->mnt_supl_kern_flag & MNTK_SUPL_USE_FULLSYNC)) {
3851 				cmd = F_FULLFSYNC;
3852 			}
3853 			error = VNOP_IOCTL(vp, cmd, (caddr_t)NULL, 0, &context);
3854 
3855 			/*
3856 			 * Promote F_BARRIERFSYNC to F_FULLFSYNC if the underlying
3857 			 * filesystem doesn't support it.
3858 			 */
3859 			if ((error == ENOTTY || error == ENOTSUP || error == EINVAL) &&
3860 			    (cmd == F_BARRIERFSYNC)) {
3861 				os_atomic_or(&vp->v_mount->mnt_supl_kern_flag,
3862 				    MNTK_SUPL_USE_FULLSYNC, relaxed);
3863 
3864 				error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, &context);
3865 			}
3866 
3867 			(void)vnode_put(vp);
3868 		}
3869 		break;
3870 	}
3871 
3872 	/*
3873 	 * SPI (private) for opening a file starting from a dir fd
3874 	 */
3875 	case F_OPENFROM: {
3876 		/* Check if this isn't a valid file descriptor */
3877 		if (fp->f_type != DTYPE_VNODE) {
3878 			error = EBADF;
3879 			goto out;
3880 		}
3881 		vp = (struct vnode *)fp_get_data(fp);
3882 
3883 		return sys_fcntl__OPENFROM(p, fd, cmd, uap->arg, fp, vp, retval);
3884 	}
3885 
3886 	/*
3887 	 * SPI (private) for unlinking a file starting from a dir fd
3888 	 */
3889 	case F_UNLINKFROM: {
3890 		user_addr_t pathname;
3891 
3892 		/* Check if this isn't a valid file descriptor */
3893 		if ((fp->f_type != DTYPE_VNODE) ||
3894 		    (fp->f_flag & FREAD) == 0) {
3895 			error = EBADF;
3896 			goto out;
3897 		}
3898 		vp = (struct vnode *)fp_get_data(fp);
3899 		proc_fdunlock(p);
3900 
3901 		if (vnode_getwithref(vp)) {
3902 			error = ENOENT;
3903 			goto outdrop;
3904 		}
3905 
3906 		/* Only valid for directories */
3907 		if (vp->v_type != VDIR) {
3908 			vnode_put(vp);
3909 			error = ENOTDIR;
3910 			goto outdrop;
3911 		}
3912 
3913 		/*
3914 		 * Only entitled apps may use the credentials of the thread
3915 		 * that opened the file descriptor.
3916 		 * Non-entitled threads will use their own context.
3917 		 */
3918 		if (IOCurrentTaskHasEntitlement(ACCOUNT_OPENFROM_ENTITLEMENT)) {
3919 			has_entitlement = 1;
3920 		}
3921 
3922 		/* Get flags, mode and pathname arguments. */
3923 		if (IS_64BIT_PROCESS(p)) {
3924 			pathname = (user_addr_t)argp;
3925 		} else {
3926 			pathname = CAST_USER_ADDR_T(argp);
3927 		}
3928 
3929 		/* Start the lookup relative to the file descriptor's vnode. */
3930 		error = unlink1(has_entitlement ? &context : vfs_context_current(),
3931 		    vp, pathname, UIO_USERSPACE, 0);
3932 
3933 		vnode_put(vp);
3934 		break;
3935 	}
3936 
3937 #if DEVELOPMENT || DEBUG
3938 	case F_ADDSIGS_MAIN_BINARY:
3939 		csblob_add_flags |= CS_BLOB_ADD_ALLOW_MAIN_BINARY;
3940 		OS_FALLTHROUGH;
3941 #endif
3942 	case F_ADDSIGS:
3943 	case F_ADDFILESIGS:
3944 	case F_ADDFILESIGS_FOR_DYLD_SIM:
3945 	case F_ADDFILESIGS_RETURN:
3946 	case F_ADDFILESIGS_INFO:
3947 	{
3948 		struct cs_blob *blob = NULL;
3949 		struct user_fsignatures fs;
3950 		kern_return_t kr;
3951 		vm_offset_t kernel_blob_addr;
3952 		vm_size_t kernel_blob_size;
3953 		int blob_add_flags = 0;
3954 		const size_t sizeof_fs = (cmd == F_ADDFILESIGS_INFO ?
3955 		    offsetof(struct user_fsignatures, fs_cdhash /* first output element */) :
3956 		    offsetof(struct user_fsignatures, fs_fsignatures_size /* compat */));
3957 
3958 		if (fp->f_type != DTYPE_VNODE) {
3959 			error = EBADF;
3960 			goto out;
3961 		}
3962 		vp = (struct vnode *)fp_get_data(fp);
3963 		proc_fdunlock(p);
3964 
3965 		if (cmd == F_ADDFILESIGS_FOR_DYLD_SIM) {
3966 			blob_add_flags |= MAC_VNODE_CHECK_DYLD_SIM;
3967 			if ((proc_getcsflags(p) & CS_KILL) == 0) {
3968 				proc_lock(p);
3969 				proc_csflags_set(p, CS_KILL);
3970 				proc_unlock(p);
3971 			}
3972 		}
3973 
3974 		error = vnode_getwithref(vp);
3975 		if (error) {
3976 			goto outdrop;
3977 		}
3978 
3979 		if (IS_64BIT_PROCESS(p)) {
3980 			error = copyin(argp, &fs, sizeof_fs);
3981 		} else {
3982 			if (cmd == F_ADDFILESIGS_INFO) {
3983 				error = EINVAL;
3984 				vnode_put(vp);
3985 				goto outdrop;
3986 			}
3987 
3988 			struct user32_fsignatures fs32;
3989 
3990 			error = copyin(argp, &fs32, sizeof(fs32));
3991 			fs.fs_file_start = fs32.fs_file_start;
3992 			fs.fs_blob_start = CAST_USER_ADDR_T(fs32.fs_blob_start);
3993 			fs.fs_blob_size = fs32.fs_blob_size;
3994 		}
3995 
3996 		if (error) {
3997 			vnode_put(vp);
3998 			goto outdrop;
3999 		}
4000 
4001 		/*
4002 		 * First check if we have something loaded a this offset
4003 		 */
4004 		blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, CPU_SUBTYPE_ANY, fs.fs_file_start);
4005 		if (blob != NULL) {
4006 			/* If this is for dyld_sim revalidate the blob */
4007 			if (cmd == F_ADDFILESIGS_FOR_DYLD_SIM) {
4008 				error = ubc_cs_blob_revalidate(vp, blob, NULL, blob_add_flags, proc_platform(p));
4009 				if (error) {
4010 					blob = NULL;
4011 					if (error != EAGAIN) {
4012 						vnode_put(vp);
4013 						goto outdrop;
4014 					}
4015 				}
4016 			}
4017 		}
4018 
4019 		if (blob == NULL) {
4020 			/*
4021 			 * An arbitrary limit, to prevent someone from mapping in a 20GB blob.  This should cover
4022 			 * our use cases for the immediate future, but note that at the time of this commit, some
4023 			 * platforms are nearing 2MB blob sizes (with a prior soft limit of 2.5MB).
4024 			 *
4025 			 * We should consider how we can manage this more effectively; the above means that some
4026 			 * platforms are using megabytes of memory for signing data; it merely hasn't crossed the
4027 			 * threshold considered ridiculous at the time of this change.
4028 			 */
4029 #define CS_MAX_BLOB_SIZE (40ULL * 1024ULL * 1024ULL)
4030 			if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
4031 				error = E2BIG;
4032 				vnode_put(vp);
4033 				goto outdrop;
4034 			}
4035 
4036 			kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
4037 			kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
4038 			if (kr != KERN_SUCCESS || kernel_blob_size < fs.fs_blob_size) {
4039 				error = ENOMEM;
4040 				vnode_put(vp);
4041 				goto outdrop;
4042 			}
4043 
4044 			if (cmd == F_ADDSIGS || cmd == F_ADDSIGS_MAIN_BINARY) {
4045 				error = copyin(fs.fs_blob_start,
4046 				    (void *) kernel_blob_addr,
4047 				    fs.fs_blob_size);
4048 			} else { /* F_ADDFILESIGS || F_ADDFILESIGS_RETURN || F_ADDFILESIGS_FOR_DYLD_SIM || F_ADDFILESIGS_INFO */
4049 				int resid;
4050 
4051 				error = vn_rdwr(UIO_READ,
4052 				    vp,
4053 				    (caddr_t) kernel_blob_addr,
4054 				    (int)kernel_blob_size,
4055 				    fs.fs_file_start + fs.fs_blob_start,
4056 				    UIO_SYSSPACE,
4057 				    0,
4058 				    kauth_cred_get(),
4059 				    &resid,
4060 				    p);
4061 				if ((error == 0) && resid) {
4062 					/* kernel_blob_size rounded to a page size, but signature may be at end of file */
4063 					memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid);
4064 				}
4065 			}
4066 
4067 			if (error) {
4068 				ubc_cs_blob_deallocate(kernel_blob_addr,
4069 				    kernel_blob_size);
4070 				vnode_put(vp);
4071 				goto outdrop;
4072 			}
4073 
4074 			blob = NULL;
4075 			error = ubc_cs_blob_add(vp,
4076 			    proc_platform(p),
4077 			    CPU_TYPE_ANY,                       /* not for a specific architecture */
4078 			    CPU_SUBTYPE_ANY,
4079 			    fs.fs_file_start,
4080 			    &kernel_blob_addr,
4081 			    kernel_blob_size,
4082 			    NULL,
4083 			    blob_add_flags,
4084 			    &blob,
4085 			    csblob_add_flags);
4086 
4087 			/* ubc_blob_add() has consumed "kernel_blob_addr" if it is zeroed */
4088 			if (error) {
4089 				if (kernel_blob_addr) {
4090 					ubc_cs_blob_deallocate(kernel_blob_addr,
4091 					    kernel_blob_size);
4092 				}
4093 				vnode_put(vp);
4094 				goto outdrop;
4095 			} else {
4096 #if CHECK_CS_VALIDATION_BITMAP
4097 				ubc_cs_validation_bitmap_allocate( vp );
4098 #endif
4099 			}
4100 		}
4101 
4102 		if (cmd == F_ADDFILESIGS_RETURN || cmd == F_ADDFILESIGS_FOR_DYLD_SIM ||
4103 		    cmd == F_ADDFILESIGS_INFO) {
4104 			/*
4105 			 * The first element of the structure is a
4106 			 * off_t that happen to have the same size for
4107 			 * all archs. Lets overwrite that.
4108 			 */
4109 			off_t end_offset = 0;
4110 			if (blob) {
4111 				end_offset = blob->csb_end_offset;
4112 			}
4113 			error = copyout(&end_offset, argp, sizeof(end_offset));
4114 
4115 			if (error) {
4116 				vnode_put(vp);
4117 				goto outdrop;
4118 			}
4119 		}
4120 
4121 		if (cmd == F_ADDFILESIGS_INFO) {
4122 			/* Return information. What we copy out depends on the size of the
4123 			 * passed in structure, to keep binary compatibility. */
4124 
4125 			if (fs.fs_fsignatures_size >= sizeof(struct user_fsignatures)) {
4126 				// enough room for fs_cdhash[20]+fs_hash_type
4127 
4128 				if (blob != NULL) {
4129 					error = copyout(blob->csb_cdhash,
4130 					    (vm_address_t)argp + offsetof(struct user_fsignatures, fs_cdhash),
4131 					    USER_FSIGNATURES_CDHASH_LEN);
4132 					if (error) {
4133 						vnode_put(vp);
4134 						goto outdrop;
4135 					}
4136 					int hashtype = cs_hash_type(blob->csb_hashtype);
4137 					error = copyout(&hashtype,
4138 					    (vm_address_t)argp + offsetof(struct user_fsignatures, fs_hash_type),
4139 					    sizeof(int));
4140 					if (error) {
4141 						vnode_put(vp);
4142 						goto outdrop;
4143 					}
4144 				}
4145 			}
4146 		}
4147 
4148 		(void) vnode_put(vp);
4149 		break;
4150 	}
4151 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4152 	case F_ADDFILESUPPL:
4153 	{
4154 		struct vnode *ivp;
4155 		struct cs_blob *blob = NULL;
4156 		struct user_fsupplement fs;
4157 		int orig_fd;
4158 		struct fileproc* orig_fp = NULL;
4159 		kern_return_t kr;
4160 		vm_offset_t kernel_blob_addr;
4161 		vm_size_t kernel_blob_size;
4162 
4163 		if (!IS_64BIT_PROCESS(p)) {
4164 			error = EINVAL;
4165 			goto out; // drop fp and unlock fds
4166 		}
4167 
4168 		if (fp->f_type != DTYPE_VNODE) {
4169 			error = EBADF;
4170 			goto out;
4171 		}
4172 
4173 		error = copyin(argp, &fs, sizeof(fs));
4174 		if (error) {
4175 			goto out;
4176 		}
4177 
4178 		orig_fd = fs.fs_orig_fd;
4179 		if ((error = fp_lookup(p, orig_fd, &orig_fp, 1))) {
4180 			printf("CODE SIGNING: Failed to find original file for supplemental signature attachment\n");
4181 			goto out;
4182 		}
4183 
4184 		if (orig_fp->f_type != DTYPE_VNODE) {
4185 			error = EBADF;
4186 			fp_drop(p, orig_fd, orig_fp, 1);
4187 			goto out;
4188 		}
4189 
4190 		ivp = (struct vnode *)fp_get_data(orig_fp);
4191 
4192 		vp = (struct vnode *)fp_get_data(fp);
4193 
4194 		proc_fdunlock(p);
4195 
4196 		error = vnode_getwithref(ivp);
4197 		if (error) {
4198 			fp_drop(p, orig_fd, orig_fp, 0);
4199 			goto outdrop; //drop fp
4200 		}
4201 
4202 		error = vnode_getwithref(vp);
4203 		if (error) {
4204 			vnode_put(ivp);
4205 			fp_drop(p, orig_fd, orig_fp, 0);
4206 			goto outdrop;
4207 		}
4208 
4209 		if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
4210 			error = E2BIG;
4211 			goto dropboth; // drop iocounts on vp and ivp, drop orig_fp then drop fp via outdrop
4212 		}
4213 
4214 		kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
4215 		kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
4216 		if (kr != KERN_SUCCESS) {
4217 			error = ENOMEM;
4218 			goto dropboth;
4219 		}
4220 
4221 		int resid;
4222 		error = vn_rdwr(UIO_READ, vp,
4223 		    (caddr_t)kernel_blob_addr, (int)kernel_blob_size,
4224 		    fs.fs_file_start + fs.fs_blob_start,
4225 		    UIO_SYSSPACE, 0,
4226 		    kauth_cred_get(), &resid, p);
4227 		if ((error == 0) && resid) {
4228 			/* kernel_blob_size rounded to a page size, but signature may be at end of file */
4229 			memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid);
4230 		}
4231 
4232 		if (error) {
4233 			ubc_cs_blob_deallocate(kernel_blob_addr,
4234 			    kernel_blob_size);
4235 			goto dropboth;
4236 		}
4237 
4238 		error = ubc_cs_blob_add_supplement(vp, ivp, fs.fs_file_start,
4239 		    &kernel_blob_addr, kernel_blob_size, &blob);
4240 
4241 		/* ubc_blob_add_supplement() has consumed kernel_blob_addr if it is zeroed */
4242 		if (error) {
4243 			if (kernel_blob_addr) {
4244 				ubc_cs_blob_deallocate(kernel_blob_addr,
4245 				    kernel_blob_size);
4246 			}
4247 			goto dropboth;
4248 		}
4249 		vnode_put(ivp);
4250 		vnode_put(vp);
4251 		fp_drop(p, orig_fd, orig_fp, 0);
4252 		break;
4253 
4254 dropboth:
4255 		vnode_put(ivp);
4256 		vnode_put(vp);
4257 		fp_drop(p, orig_fd, orig_fp, 0);
4258 		goto outdrop;
4259 	}
4260 #endif
4261 	case F_GETCODEDIR:
4262 	case F_FINDSIGS: {
4263 		error = ENOTSUP;
4264 		goto out;
4265 	}
4266 	case F_CHECK_LV: {
4267 		struct fileglob *fg;
4268 		fchecklv_t lv = {};
4269 
4270 		if (fp->f_type != DTYPE_VNODE) {
4271 			error = EBADF;
4272 			goto out;
4273 		}
4274 		fg = fp->fp_glob;
4275 		proc_fdunlock(p);
4276 
4277 		if (IS_64BIT_PROCESS(p)) {
4278 			error = copyin(argp, &lv, sizeof(lv));
4279 		} else {
4280 			struct user32_fchecklv lv32 = {};
4281 
4282 			error = copyin(argp, &lv32, sizeof(lv32));
4283 			lv.lv_file_start = lv32.lv_file_start;
4284 			lv.lv_error_message = (void *)(uintptr_t)lv32.lv_error_message;
4285 			lv.lv_error_message_size = lv32.lv_error_message_size;
4286 		}
4287 		if (error) {
4288 			goto outdrop;
4289 		}
4290 
4291 #if CONFIG_MACF
4292 		error = mac_file_check_library_validation(p, fg, lv.lv_file_start,
4293 		    (user_long_t)lv.lv_error_message, lv.lv_error_message_size);
4294 #endif
4295 
4296 		break;
4297 	}
4298 	case F_GETSIGSINFO: {
4299 		struct cs_blob *blob = NULL;
4300 		fgetsigsinfo_t sigsinfo = {};
4301 
4302 		if (fp->f_type != DTYPE_VNODE) {
4303 			error = EBADF;
4304 			goto out;
4305 		}
4306 		vp = (struct vnode *)fp_get_data(fp);
4307 		proc_fdunlock(p);
4308 
4309 		error = vnode_getwithref(vp);
4310 		if (error) {
4311 			goto outdrop;
4312 		}
4313 
4314 		error = copyin(argp, &sigsinfo, sizeof(sigsinfo));
4315 		if (error) {
4316 			vnode_put(vp);
4317 			goto outdrop;
4318 		}
4319 
4320 		blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, CPU_SUBTYPE_ANY, sigsinfo.fg_file_start);
4321 		if (blob == NULL) {
4322 			error = ENOENT;
4323 			vnode_put(vp);
4324 			goto outdrop;
4325 		}
4326 		switch (sigsinfo.fg_info_request) {
4327 		case GETSIGSINFO_PLATFORM_BINARY:
4328 			sigsinfo.fg_sig_is_platform = blob->csb_platform_binary;
4329 			error = copyout(&sigsinfo.fg_sig_is_platform,
4330 			    (vm_address_t)argp + offsetof(struct fgetsigsinfo, fg_sig_is_platform),
4331 			    sizeof(sigsinfo.fg_sig_is_platform));
4332 			if (error) {
4333 				vnode_put(vp);
4334 				goto outdrop;
4335 			}
4336 			break;
4337 		default:
4338 			error = EINVAL;
4339 			vnode_put(vp);
4340 			goto outdrop;
4341 		}
4342 		vnode_put(vp);
4343 		break;
4344 	}
4345 #if CONFIG_PROTECT
4346 	case F_GETPROTECTIONCLASS: {
4347 		if (fp->f_type != DTYPE_VNODE) {
4348 			error = EBADF;
4349 			goto out;
4350 		}
4351 		vp = (struct vnode *)fp_get_data(fp);
4352 
4353 		proc_fdunlock(p);
4354 
4355 		if (vnode_getwithref(vp)) {
4356 			error = ENOENT;
4357 			goto outdrop;
4358 		}
4359 
4360 		struct vnode_attr va;
4361 
4362 		VATTR_INIT(&va);
4363 		VATTR_WANTED(&va, va_dataprotect_class);
4364 		error = VNOP_GETATTR(vp, &va, &context);
4365 		if (!error) {
4366 			if (VATTR_IS_SUPPORTED(&va, va_dataprotect_class)) {
4367 				*retval = va.va_dataprotect_class;
4368 			} else {
4369 				error = ENOTSUP;
4370 			}
4371 		}
4372 
4373 		vnode_put(vp);
4374 		break;
4375 	}
4376 
4377 	case F_SETPROTECTIONCLASS: {
4378 		/* tmp must be a valid PROTECTION_CLASS_* */
4379 		tmp = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
4380 
4381 		if (fp->f_type != DTYPE_VNODE) {
4382 			error = EBADF;
4383 			goto out;
4384 		}
4385 		vp = (struct vnode *)fp_get_data(fp);
4386 
4387 		proc_fdunlock(p);
4388 
4389 		if (vnode_getwithref(vp)) {
4390 			error = ENOENT;
4391 			goto outdrop;
4392 		}
4393 
4394 		/* Only go forward if you have write access */
4395 		vfs_context_t ctx = vfs_context_current();
4396 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4397 			vnode_put(vp);
4398 			error = EBADF;
4399 			goto outdrop;
4400 		}
4401 
4402 		struct vnode_attr va;
4403 
4404 		VATTR_INIT(&va);
4405 		VATTR_SET(&va, va_dataprotect_class, tmp);
4406 
4407 		error = VNOP_SETATTR(vp, &va, ctx);
4408 
4409 		vnode_put(vp);
4410 		break;
4411 	}
4412 
4413 	case F_TRANSCODEKEY: {
4414 		if (fp->f_type != DTYPE_VNODE) {
4415 			error = EBADF;
4416 			goto out;
4417 		}
4418 
4419 		vp = (struct vnode *)fp_get_data(fp);
4420 		proc_fdunlock(p);
4421 
4422 		if (vnode_getwithref(vp)) {
4423 			error = ENOENT;
4424 			goto outdrop;
4425 		}
4426 
4427 		cp_key_t k = {
4428 			.len = CP_MAX_WRAPPEDKEYSIZE,
4429 		};
4430 
4431 		k.key = kalloc_data(CP_MAX_WRAPPEDKEYSIZE, Z_WAITOK | Z_ZERO);
4432 		if (k.key == NULL) {
4433 			error = ENOMEM;
4434 		} else {
4435 			error = VNOP_IOCTL(vp, F_TRANSCODEKEY, (caddr_t)&k, 1, &context);
4436 		}
4437 
4438 		vnode_put(vp);
4439 
4440 		if (error == 0) {
4441 			error = copyout(k.key, argp, k.len);
4442 			*retval = k.len;
4443 		}
4444 		kfree_data(k.key, CP_MAX_WRAPPEDKEYSIZE);
4445 
4446 		break;
4447 	}
4448 
4449 	case F_GETPROTECTIONLEVEL:  {
4450 		if (fp->f_type != DTYPE_VNODE) {
4451 			error = EBADF;
4452 			goto out;
4453 		}
4454 
4455 		vp = (struct vnode*)fp_get_data(fp);
4456 		proc_fdunlock(p);
4457 
4458 		if (vnode_getwithref(vp)) {
4459 			error = ENOENT;
4460 			goto outdrop;
4461 		}
4462 
4463 		error = VNOP_IOCTL(vp, F_GETPROTECTIONLEVEL, (caddr_t)retval, 0, &context);
4464 
4465 		vnode_put(vp);
4466 		break;
4467 	}
4468 
4469 	case F_GETDEFAULTPROTLEVEL:  {
4470 		if (fp->f_type != DTYPE_VNODE) {
4471 			error = EBADF;
4472 			goto out;
4473 		}
4474 
4475 		vp = (struct vnode*)fp_get_data(fp);
4476 		proc_fdunlock(p);
4477 
4478 		if (vnode_getwithref(vp)) {
4479 			error = ENOENT;
4480 			goto outdrop;
4481 		}
4482 
4483 		/*
4484 		 * if cp_get_major_vers fails, error will be set to proper errno
4485 		 * and cp_version will still be 0.
4486 		 */
4487 
4488 		error = VNOP_IOCTL(vp, F_GETDEFAULTPROTLEVEL, (caddr_t)retval, 0, &context);
4489 
4490 		vnode_put(vp);
4491 		break;
4492 	}
4493 
4494 #endif /* CONFIG_PROTECT */
4495 
4496 	case F_MOVEDATAEXTENTS: {
4497 		struct fileproc *fp2 = NULL;
4498 		struct vnode *src_vp = NULLVP;
4499 		struct vnode *dst_vp = NULLVP;
4500 		/* We need to grab the 2nd FD out of the arguments before moving on. */
4501 		int fd2 = CAST_DOWN_EXPLICIT(int32_t, uap->arg);
4502 
4503 		error = priv_check_cred(kauth_cred_get(), PRIV_VFS_MOVE_DATA_EXTENTS, 0);
4504 		if (error) {
4505 			goto out;
4506 		}
4507 
4508 		if (fp->f_type != DTYPE_VNODE) {
4509 			error = EBADF;
4510 			goto out;
4511 		}
4512 
4513 		/*
4514 		 * For now, special case HFS+ and APFS only, since this
4515 		 * is SPI.
4516 		 */
4517 		src_vp = (struct vnode *)fp_get_data(fp);
4518 		if (src_vp->v_tag != VT_HFS && src_vp->v_tag != VT_APFS) {
4519 			error = ENOTSUP;
4520 			goto out;
4521 		}
4522 
4523 		/*
4524 		 * Get the references before we start acquiring iocounts on the vnodes,
4525 		 * while we still hold the proc fd lock
4526 		 */
4527 		if ((error = fp_lookup(p, fd2, &fp2, 1))) {
4528 			error = EBADF;
4529 			goto out;
4530 		}
4531 		if (fp2->f_type != DTYPE_VNODE) {
4532 			fp_drop(p, fd2, fp2, 1);
4533 			error = EBADF;
4534 			goto out;
4535 		}
4536 		dst_vp = (struct vnode *)fp_get_data(fp2);
4537 		if (dst_vp->v_tag != VT_HFS && dst_vp->v_tag != VT_APFS) {
4538 			fp_drop(p, fd2, fp2, 1);
4539 			error = ENOTSUP;
4540 			goto out;
4541 		}
4542 
4543 #if CONFIG_MACF
4544 		/* Re-do MAC checks against the new FD, pass in a fake argument */
4545 		error = mac_file_check_fcntl(kauth_cred_get(), fp2->fp_glob, cmd, 0);
4546 		if (error) {
4547 			fp_drop(p, fd2, fp2, 1);
4548 			goto out;
4549 		}
4550 #endif
4551 		/* Audit the 2nd FD */
4552 		AUDIT_ARG(fd, fd2);
4553 
4554 		proc_fdunlock(p);
4555 
4556 		if (vnode_getwithref(src_vp)) {
4557 			fp_drop(p, fd2, fp2, 0);
4558 			error = ENOENT;
4559 			goto outdrop;
4560 		}
4561 		if (vnode_getwithref(dst_vp)) {
4562 			vnode_put(src_vp);
4563 			fp_drop(p, fd2, fp2, 0);
4564 			error = ENOENT;
4565 			goto outdrop;
4566 		}
4567 
4568 		/*
4569 		 * Basic asserts; validate they are not the same and that
4570 		 * both live on the same filesystem.
4571 		 */
4572 		if (dst_vp == src_vp) {
4573 			vnode_put(src_vp);
4574 			vnode_put(dst_vp);
4575 			fp_drop(p, fd2, fp2, 0);
4576 			error = EINVAL;
4577 			goto outdrop;
4578 		}
4579 
4580 		if (dst_vp->v_mount != src_vp->v_mount) {
4581 			vnode_put(src_vp);
4582 			vnode_put(dst_vp);
4583 			fp_drop(p, fd2, fp2, 0);
4584 			error = EXDEV;
4585 			goto outdrop;
4586 		}
4587 
4588 		/* Now we have a legit pair of FDs.  Go to work */
4589 
4590 		/* Now check for write access to the target files */
4591 		if (vnode_authorize(src_vp, NULLVP,
4592 		    (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4593 			vnode_put(src_vp);
4594 			vnode_put(dst_vp);
4595 			fp_drop(p, fd2, fp2, 0);
4596 			error = EBADF;
4597 			goto outdrop;
4598 		}
4599 
4600 		if (vnode_authorize(dst_vp, NULLVP,
4601 		    (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4602 			vnode_put(src_vp);
4603 			vnode_put(dst_vp);
4604 			fp_drop(p, fd2, fp2, 0);
4605 			error = EBADF;
4606 			goto outdrop;
4607 		}
4608 
4609 		/* Verify that both vps point to files and not directories */
4610 		if (!vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
4611 			error = EINVAL;
4612 			vnode_put(src_vp);
4613 			vnode_put(dst_vp);
4614 			fp_drop(p, fd2, fp2, 0);
4615 			goto outdrop;
4616 		}
4617 
4618 		/*
4619 		 * The exchangedata syscall handler passes in 0 for the flags to VNOP_EXCHANGE.
4620 		 * We'll pass in our special bit indicating that the new behavior is expected
4621 		 */
4622 
4623 		error = VNOP_EXCHANGE(src_vp, dst_vp, FSOPT_EXCHANGE_DATA_ONLY, &context);
4624 
4625 		vnode_put(src_vp);
4626 		vnode_put(dst_vp);
4627 		fp_drop(p, fd2, fp2, 0);
4628 		break;
4629 	}
4630 
4631 	case F_TRANSFEREXTENTS: {
4632 		struct fileproc *fp2 = NULL;
4633 		struct vnode *src_vp = NULLVP;
4634 		struct vnode *dst_vp = NULLVP;
4635 
4636 		/* Get 2nd FD out of the arguments. */
4637 		int fd2 = CAST_DOWN_EXPLICIT(int, uap->arg);
4638 		if (fd2 < 0) {
4639 			error = EINVAL;
4640 			goto out;
4641 		}
4642 
4643 		if (fp->f_type != DTYPE_VNODE) {
4644 			error = EBADF;
4645 			goto out;
4646 		}
4647 
4648 		/*
4649 		 * Only allow this for APFS
4650 		 */
4651 		src_vp = (struct vnode *)fp_get_data(fp);
4652 		if (src_vp->v_tag != VT_APFS) {
4653 			error = ENOTSUP;
4654 			goto out;
4655 		}
4656 
4657 		/*
4658 		 * Get the references before we start acquiring iocounts on the vnodes,
4659 		 * while we still hold the proc fd lock
4660 		 */
4661 		if ((error = fp_lookup(p, fd2, &fp2, 1))) {
4662 			error = EBADF;
4663 			goto out;
4664 		}
4665 		if (fp2->f_type != DTYPE_VNODE) {
4666 			fp_drop(p, fd2, fp2, 1);
4667 			error = EBADF;
4668 			goto out;
4669 		}
4670 		dst_vp = (struct vnode *)fp_get_data(fp2);
4671 		if (dst_vp->v_tag != VT_APFS) {
4672 			fp_drop(p, fd2, fp2, 1);
4673 			error = ENOTSUP;
4674 			goto out;
4675 		}
4676 
4677 #if CONFIG_MACF
4678 		/* Re-do MAC checks against the new FD, pass in a fake argument */
4679 		error = mac_file_check_fcntl(kauth_cred_get(), fp2->fp_glob, cmd, 0);
4680 		if (error) {
4681 			fp_drop(p, fd2, fp2, 1);
4682 			goto out;
4683 		}
4684 #endif
4685 		/* Audit the 2nd FD */
4686 		AUDIT_ARG(fd, fd2);
4687 
4688 		proc_fdunlock(p);
4689 
4690 		if (vnode_getwithref(src_vp)) {
4691 			fp_drop(p, fd2, fp2, 0);
4692 			error = ENOENT;
4693 			goto outdrop;
4694 		}
4695 		if (vnode_getwithref(dst_vp)) {
4696 			vnode_put(src_vp);
4697 			fp_drop(p, fd2, fp2, 0);
4698 			error = ENOENT;
4699 			goto outdrop;
4700 		}
4701 
4702 		/*
4703 		 * Validate they are not the same and that
4704 		 * both live on the same filesystem.
4705 		 */
4706 		if (dst_vp == src_vp) {
4707 			vnode_put(src_vp);
4708 			vnode_put(dst_vp);
4709 			fp_drop(p, fd2, fp2, 0);
4710 			error = EINVAL;
4711 			goto outdrop;
4712 		}
4713 		if (dst_vp->v_mount != src_vp->v_mount) {
4714 			vnode_put(src_vp);
4715 			vnode_put(dst_vp);
4716 			fp_drop(p, fd2, fp2, 0);
4717 			error = EXDEV;
4718 			goto outdrop;
4719 		}
4720 
4721 		/* Verify that both vps point to files and not directories */
4722 		if (!vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
4723 			error = EINVAL;
4724 			vnode_put(src_vp);
4725 			vnode_put(dst_vp);
4726 			fp_drop(p, fd2, fp2, 0);
4727 			goto outdrop;
4728 		}
4729 
4730 
4731 		/*
4732 		 * Okay, vps are legit. Check  access.  We'll require write access
4733 		 * to both files.
4734 		 */
4735 		if (vnode_authorize(src_vp, NULLVP,
4736 		    (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4737 			vnode_put(src_vp);
4738 			vnode_put(dst_vp);
4739 			fp_drop(p, fd2, fp2, 0);
4740 			error = EBADF;
4741 			goto outdrop;
4742 		}
4743 		if (vnode_authorize(dst_vp, NULLVP,
4744 		    (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4745 			vnode_put(src_vp);
4746 			vnode_put(dst_vp);
4747 			fp_drop(p, fd2, fp2, 0);
4748 			error = EBADF;
4749 			goto outdrop;
4750 		}
4751 
4752 		/* Pass it on through to the fs */
4753 		error = VNOP_IOCTL(src_vp, cmd, (caddr_t)dst_vp, 0, &context);
4754 
4755 		vnode_put(src_vp);
4756 		vnode_put(dst_vp);
4757 		fp_drop(p, fd2, fp2, 0);
4758 		break;
4759 	}
4760 
4761 	/*
4762 	 * SPI for making a file compressed.
4763 	 */
4764 	case F_MAKECOMPRESSED: {
4765 		uint32_t gcounter = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
4766 
4767 		if (fp->f_type != DTYPE_VNODE) {
4768 			error = EBADF;
4769 			goto out;
4770 		}
4771 
4772 		vp = (struct vnode*)fp_get_data(fp);
4773 		proc_fdunlock(p);
4774 
4775 		/* get the vnode */
4776 		if (vnode_getwithref(vp)) {
4777 			error = ENOENT;
4778 			goto outdrop;
4779 		}
4780 
4781 		/* Is it a file? */
4782 		if ((vnode_isreg(vp) == 0) && (vnode_islnk(vp) == 0)) {
4783 			vnode_put(vp);
4784 			error = EBADF;
4785 			goto outdrop;
4786 		}
4787 
4788 		/* invoke ioctl to pass off to FS */
4789 		/* Only go forward if you have write access */
4790 		vfs_context_t ctx = vfs_context_current();
4791 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4792 			vnode_put(vp);
4793 			error = EBADF;
4794 			goto outdrop;
4795 		}
4796 
4797 		error = VNOP_IOCTL(vp, cmd, (caddr_t)&gcounter, 0, &context);
4798 
4799 		vnode_put(vp);
4800 		break;
4801 	}
4802 
4803 	/*
4804 	 * SPI (private) for indicating to a filesystem that subsequent writes to
4805 	 * the open FD will written to the Fastflow.
4806 	 */
4807 	case F_SET_GREEDY_MODE:
4808 	/* intentionally drop through to the same handler as F_SETSTATIC.
4809 	 * both fcntls should pass the argument and their selector into VNOP_IOCTL.
4810 	 */
4811 
4812 	/*
4813 	 * SPI (private) for indicating to a filesystem that subsequent writes to
4814 	 * the open FD will represent static content.
4815 	 */
4816 	case F_SETSTATICCONTENT: {
4817 		caddr_t ioctl_arg = NULL;
4818 
4819 		if (uap->arg) {
4820 			ioctl_arg = (caddr_t) 1;
4821 		}
4822 
4823 		if (fp->f_type != DTYPE_VNODE) {
4824 			error = EBADF;
4825 			goto out;
4826 		}
4827 		vp = (struct vnode *)fp_get_data(fp);
4828 		proc_fdunlock(p);
4829 
4830 		error = vnode_getwithref(vp);
4831 		if (error) {
4832 			error = ENOENT;
4833 			goto outdrop;
4834 		}
4835 
4836 		/* Only go forward if you have write access */
4837 		vfs_context_t ctx = vfs_context_current();
4838 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4839 			vnode_put(vp);
4840 			error = EBADF;
4841 			goto outdrop;
4842 		}
4843 
4844 		error = VNOP_IOCTL(vp, cmd, ioctl_arg, 0, &context);
4845 		(void)vnode_put(vp);
4846 
4847 		break;
4848 	}
4849 
4850 	/*
4851 	 * SPI (private) for indicating to the lower level storage driver that the
4852 	 * subsequent writes should be of a particular IO type (burst, greedy, static),
4853 	 * or other flavors that may be necessary.
4854 	 */
4855 	case F_SETIOTYPE: {
4856 		caddr_t param_ptr;
4857 		uint32_t param;
4858 
4859 		if (uap->arg) {
4860 			/* extract 32 bits of flags from userland */
4861 			param_ptr = (caddr_t) uap->arg;
4862 			param = (uint32_t) param_ptr;
4863 		} else {
4864 			/* If no argument is specified, error out */
4865 			error = EINVAL;
4866 			goto out;
4867 		}
4868 
4869 		/*
4870 		 * Validate the different types of flags that can be specified:
4871 		 * all of them are mutually exclusive for now.
4872 		 */
4873 		switch (param) {
4874 		case F_IOTYPE_ISOCHRONOUS:
4875 			break;
4876 
4877 		default:
4878 			error = EINVAL;
4879 			goto out;
4880 		}
4881 
4882 
4883 		if (fp->f_type != DTYPE_VNODE) {
4884 			error = EBADF;
4885 			goto out;
4886 		}
4887 		vp = (struct vnode *)fp_get_data(fp);
4888 		proc_fdunlock(p);
4889 
4890 		error = vnode_getwithref(vp);
4891 		if (error) {
4892 			error = ENOENT;
4893 			goto outdrop;
4894 		}
4895 
4896 		/* Only go forward if you have write access */
4897 		vfs_context_t ctx = vfs_context_current();
4898 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4899 			vnode_put(vp);
4900 			error = EBADF;
4901 			goto outdrop;
4902 		}
4903 
4904 		error = VNOP_IOCTL(vp, cmd, param_ptr, 0, &context);
4905 		(void)vnode_put(vp);
4906 
4907 		break;
4908 	}
4909 
4910 	/*
4911 	 * Set the vnode pointed to by 'fd'
4912 	 * and tag it as the (potentially future) backing store
4913 	 * for another filesystem
4914 	 */
4915 	case F_SETBACKINGSTORE: {
4916 		if (fp->f_type != DTYPE_VNODE) {
4917 			error = EBADF;
4918 			goto out;
4919 		}
4920 
4921 		vp = (struct vnode *)fp_get_data(fp);
4922 
4923 		if (vp->v_tag != VT_HFS) {
4924 			error = EINVAL;
4925 			goto out;
4926 		}
4927 		proc_fdunlock(p);
4928 
4929 		if (vnode_getwithref(vp)) {
4930 			error = ENOENT;
4931 			goto outdrop;
4932 		}
4933 
4934 		/* only proceed if you have write access */
4935 		vfs_context_t ctx = vfs_context_current();
4936 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4937 			vnode_put(vp);
4938 			error = EBADF;
4939 			goto outdrop;
4940 		}
4941 
4942 
4943 		/* If arg != 0, set, otherwise unset */
4944 		if (uap->arg) {
4945 			error = VNOP_IOCTL(vp, cmd, (caddr_t)1, 0, &context);
4946 		} else {
4947 			error = VNOP_IOCTL(vp, cmd, (caddr_t)NULL, 0, &context);
4948 		}
4949 
4950 		vnode_put(vp);
4951 		break;
4952 	}
4953 
4954 	/*
4955 	 * like F_GETPATH, but special semantics for
4956 	 * the mobile time machine handler.
4957 	 */
4958 	case F_GETPATH_MTMINFO: {
4959 		char *pathbufp;
4960 		int pathlen;
4961 
4962 		if (fp->f_type != DTYPE_VNODE) {
4963 			error = EBADF;
4964 			goto out;
4965 		}
4966 		vp = (struct vnode *)fp_get_data(fp);
4967 		proc_fdunlock(p);
4968 
4969 		pathlen = MAXPATHLEN;
4970 		pathbufp = zalloc(ZV_NAMEI);
4971 
4972 		if ((error = vnode_getwithref(vp)) == 0) {
4973 			int backingstore = 0;
4974 
4975 			/* Check for error from vn_getpath before moving on */
4976 			if ((error = vn_getpath(vp, pathbufp, &pathlen)) == 0) {
4977 				if (vp->v_tag == VT_HFS) {
4978 					error = VNOP_IOCTL(vp, cmd, (caddr_t) &backingstore, 0, &context);
4979 				}
4980 				(void)vnode_put(vp);
4981 
4982 				if (error == 0) {
4983 					error = copyout((caddr_t)pathbufp, argp, pathlen);
4984 				}
4985 				if (error == 0) {
4986 					/*
4987 					 * If the copyout was successful, now check to ensure
4988 					 * that this vnode is not a BACKINGSTORE vnode.  mtmd
4989 					 * wants the path regardless.
4990 					 */
4991 					if (backingstore) {
4992 						error = EBUSY;
4993 					}
4994 				}
4995 			} else {
4996 				(void)vnode_put(vp);
4997 			}
4998 		}
4999 
5000 		zfree(ZV_NAMEI, pathbufp);
5001 		goto outdrop;
5002 	}
5003 
5004 	case F_RECYCLE: {
5005 #if !DEBUG && !DEVELOPMENT
5006 		bool allowed = false;
5007 
5008 		//
5009 		// non-debug and non-development kernels have restrictions
5010 		// on who can all this fcntl.  the process has to be marked
5011 		// with the dataless-manipulator entitlement and either the
5012 		// process or thread have to be marked rapid-aging.
5013 		//
5014 		if (!vfs_context_is_dataless_manipulator(&context)) {
5015 			error = EPERM;
5016 			goto out;
5017 		}
5018 
5019 		proc_t proc = vfs_context_proc(&context);
5020 		if (proc && (proc->p_lflag & P_LRAGE_VNODES)) {
5021 			allowed = true;
5022 		} else {
5023 			thread_t thr = vfs_context_thread(&context);
5024 			if (thr) {
5025 				struct uthread *ut = get_bsdthread_info(thr);
5026 
5027 				if (ut && (ut->uu_flag & UT_RAGE_VNODES)) {
5028 					allowed = true;
5029 				}
5030 			}
5031 		}
5032 		if (!allowed) {
5033 			error = EPERM;
5034 			goto out;
5035 		}
5036 #endif
5037 
5038 		if (fp->f_type != DTYPE_VNODE) {
5039 			error = EBADF;
5040 			goto out;
5041 		}
5042 		vp = (struct vnode *)fp_get_data(fp);
5043 		proc_fdunlock(p);
5044 
5045 		vnode_recycle(vp);
5046 		break;
5047 	}
5048 
5049 #if CONFIG_FILE_LEASES
5050 	case F_SETLEASE: {
5051 		struct fileglob *fg;
5052 		int fl_type;
5053 		int expcounts;
5054 
5055 		if (fp->f_type != DTYPE_VNODE) {
5056 			error = EBADF;
5057 			goto out;
5058 		}
5059 		vp = (struct vnode *)fp_get_data(fp);
5060 		fg = fp->fp_glob;;
5061 		proc_fdunlock(p);
5062 
5063 		/*
5064 		 * In order to allow a process to avoid breaking
5065 		 * its own leases, the expected open count needs
5066 		 * to be provided to F_SETLEASE when placing write lease.
5067 		 * Similarly, in order to allow a process to place a read lease
5068 		 * after opening the file multiple times in RW mode, the expected
5069 		 * write count needs to be provided to F_SETLEASE when placing a
5070 		 * read lease.
5071 		 *
5072 		 * We use the upper 30 bits of the integer argument (way more than
5073 		 * enough) as the expected open/write count.
5074 		 *
5075 		 * If the caller passed 0 for the expected open count,
5076 		 * assume 1.
5077 		 */
5078 		fl_type = CAST_DOWN_EXPLICIT(int, uap->arg);
5079 		expcounts = (unsigned int)fl_type >> 2;
5080 		fl_type &= 3;
5081 
5082 		if (fl_type == F_WRLCK && expcounts == 0) {
5083 			expcounts = 1;
5084 		}
5085 
5086 		AUDIT_ARG(value32, fl_type);
5087 
5088 		if ((error = vnode_getwithref(vp))) {
5089 			goto outdrop;
5090 		}
5091 
5092 		/*
5093 		 * Only support for regular file/dir mounted on local-based filesystem.
5094 		 */
5095 		if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR) ||
5096 		    !(vfs_flags(vnode_mount(vp)) & MNT_LOCAL)) {
5097 			error = EBADF;
5098 			vnode_put(vp);
5099 			goto outdrop;
5100 		}
5101 
5102 		/* For directory, we only support read lease. */
5103 		if (vnode_vtype(vp) == VDIR && fl_type == F_WRLCK) {
5104 			error = ENOTSUP;
5105 			vnode_put(vp);
5106 			goto outdrop;
5107 		}
5108 
5109 		switch (fl_type) {
5110 		case F_RDLCK:
5111 		case F_WRLCK:
5112 		case F_UNLCK:
5113 			error = vnode_setlease(vp, fg, fl_type, expcounts,
5114 			    vfs_context_current());
5115 			break;
5116 		default:
5117 			error = EINVAL;
5118 			break;
5119 		}
5120 
5121 		vnode_put(vp);
5122 		goto outdrop;
5123 	}
5124 
5125 	case F_GETLEASE: {
5126 		if (fp->f_type != DTYPE_VNODE) {
5127 			error = EBADF;
5128 			goto out;
5129 		}
5130 		vp = (struct vnode *)fp_get_data(fp);
5131 		proc_fdunlock(p);
5132 
5133 		if ((error = vnode_getwithref(vp))) {
5134 			goto outdrop;
5135 		}
5136 
5137 		if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR) ||
5138 		    !(vfs_flags(vnode_mount(vp)) & MNT_LOCAL)) {
5139 			error = EBADF;
5140 			vnode_put(vp);
5141 			goto outdrop;
5142 		}
5143 
5144 		error = 0;
5145 		*retval = vnode_getlease(vp);
5146 		vnode_put(vp);
5147 		goto outdrop;
5148 	}
5149 #endif /* CONFIG_FILE_LEASES */
5150 
5151 	/* SPI (private) for asserting background access to a file */
5152 	case F_ASSERT_BG_ACCESS:
5153 	/* SPI (private) for releasing background access to a file */
5154 	case F_RELEASE_BG_ACCESS: {
5155 		/*
5156 		 * Check if the process is platform code, which means
5157 		 * that it is considered part of the Operating System.
5158 		 */
5159 		if (!csproc_get_platform_binary(p)) {
5160 			error = EPERM;
5161 			goto out;
5162 		}
5163 
5164 		if (fp->f_type != DTYPE_VNODE) {
5165 			error = EBADF;
5166 			goto out;
5167 		}
5168 
5169 		vp = (struct vnode *)fp_get_data(fp);
5170 		proc_fdunlock(p);
5171 
5172 		if (vnode_getwithref(vp)) {
5173 			error = ENOENT;
5174 			goto outdrop;
5175 		}
5176 
5177 		/* Verify that vp points to a file and not a directory */
5178 		if (!vnode_isreg(vp)) {
5179 			vnode_put(vp);
5180 			error = EINVAL;
5181 			goto outdrop;
5182 		}
5183 
5184 		/* Only proceed if you have read access */
5185 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_READ_DATA), &context) != 0) {
5186 			vnode_put(vp);
5187 			error = EBADF;
5188 			goto outdrop;
5189 		}
5190 
5191 		if (cmd == F_ASSERT_BG_ACCESS) {
5192 			fassertbgaccess_t args;
5193 
5194 			if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
5195 				vnode_put(vp);
5196 				goto outdrop;
5197 			}
5198 
5199 			error = VNOP_IOCTL(vp, F_ASSERT_BG_ACCESS, (caddr_t)&args, 0, &context);
5200 		} else {
5201 			// cmd == F_RELEASE_BG_ACCESS
5202 			error = VNOP_IOCTL(vp, F_RELEASE_BG_ACCESS, (caddr_t)NULL, 0, &context);
5203 		}
5204 
5205 		vnode_put(vp);
5206 
5207 		goto outdrop;
5208 	}
5209 
5210 	default:
5211 		/*
5212 		 * This is an fcntl() that we d not recognize at this level;
5213 		 * if this is a vnode, we send it down into the VNOP_IOCTL
5214 		 * for this vnode; this can include special devices, and will
5215 		 * effectively overload fcntl() to send ioctl()'s.
5216 		 */
5217 		if ((cmd & IOC_VOID) && (cmd & IOC_INOUT)) {
5218 			error = EINVAL;
5219 			goto out;
5220 		}
5221 
5222 		/*
5223 		 * Catch any now-invalid fcntl() selectors.
5224 		 * (When adding a selector to this list, it may be prudent
5225 		 * to consider adding it to the list in fsctl_internal() as well.)
5226 		 */
5227 		switch (cmd) {
5228 		case (int)APFSIOC_REVERT_TO_SNAPSHOT:
5229 		case (int)FSIOC_FIOSEEKHOLE:
5230 		case (int)FSIOC_FIOSEEKDATA:
5231 		case (int)FSIOC_CAS_BSDFLAGS:
5232 		case (int)FSIOC_KERNEL_ROOTAUTH:
5233 		case (int)FSIOC_GRAFT_FS:
5234 		case (int)FSIOC_UNGRAFT_FS:
5235 		case (int)APFSIOC_IS_GRAFT_SUPPORTED:
5236 		case (int)FSIOC_AUTH_FS:
5237 		case HFS_GET_BOOT_INFO:
5238 		case HFS_SET_BOOT_INFO:
5239 		case FIOPINSWAP:
5240 		case F_MARKDEPENDENCY:
5241 		case TIOCREVOKE:
5242 		case TIOCREVOKECLEAR:
5243 			error = EINVAL;
5244 			goto out;
5245 		default:
5246 			break;
5247 		}
5248 
5249 		if (fp->f_type != DTYPE_VNODE) {
5250 			error = EBADF;
5251 			goto out;
5252 		}
5253 		vp = (struct vnode *)fp_get_data(fp);
5254 		proc_fdunlock(p);
5255 
5256 		if ((error = vnode_getwithref(vp)) == 0) {
5257 #define STK_PARAMS 128
5258 			char stkbuf[STK_PARAMS] = {0};
5259 			unsigned int size;
5260 			caddr_t data, memp;
5261 			/*
5262 			 * For this to work properly, we have to copy in the
5263 			 * ioctl() cmd argument if there is one; we must also
5264 			 * check that a command parameter, if present, does
5265 			 * not exceed the maximum command length dictated by
5266 			 * the number of bits we have available in the command
5267 			 * to represent a structure length.  Finally, we have
5268 			 * to copy the results back out, if it is that type of
5269 			 * ioctl().
5270 			 */
5271 			size = IOCPARM_LEN(cmd);
5272 			if (size > IOCPARM_MAX) {
5273 				(void)vnode_put(vp);
5274 				error = EINVAL;
5275 				break;
5276 			}
5277 
5278 			memp = NULL;
5279 			if (size > sizeof(stkbuf)) {
5280 				memp = (caddr_t)kalloc_data(size, Z_WAITOK);
5281 				if (memp == 0) {
5282 					(void)vnode_put(vp);
5283 					error = ENOMEM;
5284 					goto outdrop;
5285 				}
5286 				data = memp;
5287 			} else {
5288 				data = &stkbuf[0];
5289 			}
5290 
5291 			if (cmd & IOC_IN) {
5292 				if (size) {
5293 					/* structure */
5294 					error = copyin(argp, data, size);
5295 					if (error) {
5296 						(void)vnode_put(vp);
5297 						if (memp) {
5298 							kfree_data(memp, size);
5299 						}
5300 						goto outdrop;
5301 					}
5302 
5303 					/* Bzero the section beyond that which was needed */
5304 					if (size <= sizeof(stkbuf)) {
5305 						bzero((((uint8_t*)data) + size), (sizeof(stkbuf) - size));
5306 					}
5307 				} else {
5308 					/* int */
5309 					if (is64bit) {
5310 						*(user_addr_t *)data = argp;
5311 					} else {
5312 						*(uint32_t *)data = (uint32_t)argp;
5313 					}
5314 				};
5315 			} else if ((cmd & IOC_OUT) && size) {
5316 				/*
5317 				 * Zero the buffer so the user always
5318 				 * gets back something deterministic.
5319 				 */
5320 				bzero(data, size);
5321 			} else if (cmd & IOC_VOID) {
5322 				if (is64bit) {
5323 					*(user_addr_t *)data = argp;
5324 				} else {
5325 					*(uint32_t *)data = (uint32_t)argp;
5326 				}
5327 			}
5328 
5329 			error = VNOP_IOCTL(vp, cmd, CAST_DOWN(caddr_t, data), 0, &context);
5330 
5331 			(void)vnode_put(vp);
5332 
5333 			/* Copy any output data to user */
5334 			if (error == 0 && (cmd & IOC_OUT) && size) {
5335 				error = copyout(data, argp, size);
5336 			}
5337 			if (memp) {
5338 				kfree_data(memp, size);
5339 			}
5340 		}
5341 		break;
5342 	}
5343 
5344 outdrop:
5345 	return sys_fcntl_outdrop(p, fd, fp, vp, error);
5346 
5347 out:
5348 	return sys_fcntl_out(p, fd, fp, error);
5349 }
5350 
5351 
5352 /*
5353  * sys_close
5354  *
5355  * Description:	The implementation of the close(2) system call
5356  *
5357  * Parameters:	p			Process in whose per process file table
5358  *					the close is to occur
5359  *		uap->fd			fd to be closed
5360  *		retval			<unused>
5361  *
5362  * Returns:	0			Success
5363  *	fp_lookup:EBADF			Bad file descriptor
5364  *      fp_guard_exception:???          Guarded file descriptor
5365  *	close_internal:EBADF
5366  *	close_internal:???              Anything returnable by a per-fileops
5367  *					close function
5368  */
5369 int
sys_close(proc_t p,struct close_args * uap,__unused int32_t * retval)5370 sys_close(proc_t p, struct close_args *uap, __unused int32_t *retval)
5371 {
5372 	kauth_cred_t p_cred = current_cached_proc_cred(p);
5373 
5374 	__pthread_testcancel(1);
5375 	return close_nocancel(p, p_cred, uap->fd);
5376 }
5377 
5378 int
sys_close_nocancel(proc_t p,struct close_nocancel_args * uap,__unused int32_t * retval)5379 sys_close_nocancel(proc_t p, struct close_nocancel_args *uap, __unused int32_t *retval)
5380 {
5381 	kauth_cred_t p_cred = current_cached_proc_cred(p);
5382 
5383 	return close_nocancel(p, p_cred, uap->fd);
5384 }
5385 
5386 int
close_nocancel(proc_t p,kauth_cred_t p_cred,int fd)5387 close_nocancel(proc_t p, kauth_cred_t p_cred, int fd)
5388 {
5389 	struct fileproc *fp;
5390 
5391 	AUDIT_SYSCLOSE(p, fd);
5392 
5393 	proc_fdlock(p);
5394 	if ((fp = fp_get_noref_locked(p, fd)) == NULL) {
5395 		proc_fdunlock(p);
5396 		return EBADF;
5397 	}
5398 
5399 	if (fp_isguarded(fp, GUARD_CLOSE)) {
5400 		int error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
5401 		proc_fdunlock(p);
5402 		return error;
5403 	}
5404 
5405 	return fp_close_and_unlock(p, p_cred, fd, fp, 0);
5406 }
5407 
5408 
5409 /*
5410  * fstat
5411  *
5412  * Description:	Return status information about a file descriptor.
5413  *
5414  * Parameters:	p				The process doing the fstat
5415  *		fd				The fd to stat
5416  *		ub				The user stat buffer
5417  *		xsecurity			The user extended security
5418  *						buffer, or 0 if none
5419  *		xsecurity_size			The size of xsecurity, or 0
5420  *						if no xsecurity
5421  *		isstat64			Flag to indicate 64 bit version
5422  *						for inode size, etc.
5423  *
5424  * Returns:	0				Success
5425  *		EBADF
5426  *		EFAULT
5427  *	fp_lookup:EBADF				Bad file descriptor
5428  *	vnode_getwithref:???
5429  *	copyout:EFAULT
5430  *	vnode_getwithref:???
5431  *	vn_stat:???
5432  *	soo_stat:???
5433  *	pipe_stat:???
5434  *	pshm_stat:???
5435  *	kqueue_stat:???
5436  *
5437  * Notes:	Internal implementation for all other fstat() related
5438  *		functions
5439  *
5440  *		XXX switch on node type is bogus; need a stat in struct
5441  *		XXX fileops instead.
5442  */
5443 static int
fstat(proc_t p,int fd,user_addr_t ub,user_addr_t xsecurity,user_addr_t xsecurity_size,int isstat64)5444 fstat(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity,
5445     user_addr_t xsecurity_size, int isstat64)
5446 {
5447 	struct fileproc *fp;
5448 	union {
5449 		struct stat sb;
5450 		struct stat64 sb64;
5451 	} source;
5452 	union {
5453 		struct user64_stat user64_sb;
5454 		struct user32_stat user32_sb;
5455 		struct user64_stat64 user64_sb64;
5456 		struct user32_stat64 user32_sb64;
5457 	} dest;
5458 	int error, my_size;
5459 	file_type_t type;
5460 	caddr_t data;
5461 	kauth_filesec_t fsec;
5462 	user_size_t xsecurity_bufsize;
5463 	vfs_context_t ctx = vfs_context_current();
5464 	void * sbptr;
5465 
5466 
5467 	AUDIT_ARG(fd, fd);
5468 
5469 	if ((error = fp_lookup(p, fd, &fp, 0)) != 0) {
5470 		return error;
5471 	}
5472 	type = fp->f_type;
5473 	data = (caddr_t)fp_get_data(fp);
5474 	fsec = KAUTH_FILESEC_NONE;
5475 
5476 	sbptr = (void *)&source;
5477 
5478 	switch (type) {
5479 	case DTYPE_VNODE:
5480 		if ((error = vnode_getwithref((vnode_t)data)) == 0) {
5481 			/*
5482 			 * If the caller has the file open, and is not
5483 			 * requesting extended security information, we are
5484 			 * going to let them get the basic stat information.
5485 			 */
5486 			if (xsecurity == USER_ADDR_NULL) {
5487 				error = vn_stat_noauth((vnode_t)data, sbptr, NULL, isstat64, 0, ctx,
5488 				    fp->fp_glob->fg_cred);
5489 			} else {
5490 				error = vn_stat((vnode_t)data, sbptr, &fsec, isstat64, 0, ctx);
5491 			}
5492 
5493 			AUDIT_ARG(vnpath, (struct vnode *)data, ARG_VNODE1);
5494 			(void)vnode_put((vnode_t)data);
5495 		}
5496 		break;
5497 
5498 #if SOCKETS
5499 	case DTYPE_SOCKET:
5500 		error = soo_stat((struct socket *)data, sbptr, isstat64);
5501 		break;
5502 #endif /* SOCKETS */
5503 
5504 	case DTYPE_PIPE:
5505 		error = pipe_stat((void *)data, sbptr, isstat64);
5506 		break;
5507 
5508 	case DTYPE_PSXSHM:
5509 		error = pshm_stat((void *)data, sbptr, isstat64);
5510 		break;
5511 
5512 	case DTYPE_KQUEUE:
5513 		error = kqueue_stat((void *)data, sbptr, isstat64, p);
5514 		break;
5515 
5516 	default:
5517 		error = EBADF;
5518 		goto out;
5519 	}
5520 	if (error == 0) {
5521 		caddr_t sbp;
5522 
5523 		if (isstat64 != 0) {
5524 			source.sb64.st_lspare = 0;
5525 			source.sb64.st_qspare[0] = 0LL;
5526 			source.sb64.st_qspare[1] = 0LL;
5527 
5528 			if (IS_64BIT_PROCESS(p)) {
5529 				munge_user64_stat64(&source.sb64, &dest.user64_sb64);
5530 				my_size = sizeof(dest.user64_sb64);
5531 				sbp = (caddr_t)&dest.user64_sb64;
5532 			} else {
5533 				munge_user32_stat64(&source.sb64, &dest.user32_sb64);
5534 				my_size = sizeof(dest.user32_sb64);
5535 				sbp = (caddr_t)&dest.user32_sb64;
5536 			}
5537 		} else {
5538 			source.sb.st_lspare = 0;
5539 			source.sb.st_qspare[0] = 0LL;
5540 			source.sb.st_qspare[1] = 0LL;
5541 			if (IS_64BIT_PROCESS(p)) {
5542 				munge_user64_stat(&source.sb, &dest.user64_sb);
5543 				my_size = sizeof(dest.user64_sb);
5544 				sbp = (caddr_t)&dest.user64_sb;
5545 			} else {
5546 				munge_user32_stat(&source.sb, &dest.user32_sb);
5547 				my_size = sizeof(dest.user32_sb);
5548 				sbp = (caddr_t)&dest.user32_sb;
5549 			}
5550 		}
5551 
5552 		error = copyout(sbp, ub, my_size);
5553 	}
5554 
5555 	/* caller wants extended security information? */
5556 	if (xsecurity != USER_ADDR_NULL) {
5557 		/* did we get any? */
5558 		if (fsec == KAUTH_FILESEC_NONE) {
5559 			if (susize(xsecurity_size, 0) != 0) {
5560 				error = EFAULT;
5561 				goto out;
5562 			}
5563 		} else {
5564 			/* find the user buffer size */
5565 			xsecurity_bufsize = fusize(xsecurity_size);
5566 
5567 			/* copy out the actual data size */
5568 			if (susize(xsecurity_size, KAUTH_FILESEC_COPYSIZE(fsec)) != 0) {
5569 				error = EFAULT;
5570 				goto out;
5571 			}
5572 
5573 			/* if the caller supplied enough room, copy out to it */
5574 			if (xsecurity_bufsize >= KAUTH_FILESEC_COPYSIZE(fsec)) {
5575 				error = copyout(fsec, xsecurity, KAUTH_FILESEC_COPYSIZE(fsec));
5576 			}
5577 		}
5578 	}
5579 out:
5580 	fp_drop(p, fd, fp, 0);
5581 	if (fsec != NULL) {
5582 		kauth_filesec_free(fsec);
5583 	}
5584 	return error;
5585 }
5586 
5587 
5588 /*
5589  * sys_fstat_extended
5590  *
5591  * Description:	Extended version of fstat supporting returning extended
5592  *		security information
5593  *
5594  * Parameters:	p				The process doing the fstat
5595  *		uap->fd				The fd to stat
5596  *		uap->ub				The user stat buffer
5597  *		uap->xsecurity			The user extended security
5598  *						buffer, or 0 if none
5599  *		uap->xsecurity_size		The size of xsecurity, or 0
5600  *
5601  * Returns:	0				Success
5602  *		!0				Errno (see fstat)
5603  */
5604 int
sys_fstat_extended(proc_t p,struct fstat_extended_args * uap,__unused int32_t * retval)5605 sys_fstat_extended(proc_t p, struct fstat_extended_args *uap, __unused int32_t *retval)
5606 {
5607 	return fstat(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 0);
5608 }
5609 
5610 
5611 /*
5612  * sys_fstat
5613  *
5614  * Description:	Get file status for the file associated with fd
5615  *
5616  * Parameters:	p				The process doing the fstat
5617  *		uap->fd				The fd to stat
5618  *		uap->ub				The user stat buffer
5619  *
5620  * Returns:	0				Success
5621  *		!0				Errno (see fstat)
5622  */
5623 int
sys_fstat(proc_t p,struct fstat_args * uap,__unused int32_t * retval)5624 sys_fstat(proc_t p, struct fstat_args *uap, __unused int32_t *retval)
5625 {
5626 	return fstat(p, uap->fd, uap->ub, 0, 0, 0);
5627 }
5628 
5629 
5630 /*
5631  * sys_fstat64_extended
5632  *
5633  * Description:	Extended version of fstat64 supporting returning extended
5634  *		security information
5635  *
5636  * Parameters:	p				The process doing the fstat
5637  *		uap->fd				The fd to stat
5638  *		uap->ub				The user stat buffer
5639  *		uap->xsecurity			The user extended security
5640  *						buffer, or 0 if none
5641  *		uap->xsecurity_size		The size of xsecurity, or 0
5642  *
5643  * Returns:	0				Success
5644  *		!0				Errno (see fstat)
5645  */
5646 int
sys_fstat64_extended(proc_t p,struct fstat64_extended_args * uap,__unused int32_t * retval)5647 sys_fstat64_extended(proc_t p, struct fstat64_extended_args *uap, __unused int32_t *retval)
5648 {
5649 	return fstat(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 1);
5650 }
5651 
5652 
5653 /*
5654  * sys_fstat64
5655  *
5656  * Description:	Get 64 bit version of the file status for the file associated
5657  *		with fd
5658  *
5659  * Parameters:	p				The process doing the fstat
5660  *		uap->fd				The fd to stat
5661  *		uap->ub				The user stat buffer
5662  *
5663  * Returns:	0				Success
5664  *		!0				Errno (see fstat)
5665  */
5666 int
sys_fstat64(proc_t p,struct fstat64_args * uap,__unused int32_t * retval)5667 sys_fstat64(proc_t p, struct fstat64_args *uap, __unused int32_t *retval)
5668 {
5669 	return fstat(p, uap->fd, uap->ub, 0, 0, 1);
5670 }
5671 
5672 
5673 /*
5674  * sys_fpathconf
5675  *
5676  * Description:	Return pathconf information about a file descriptor.
5677  *
5678  * Parameters:	p				Process making the request
5679  *		uap->fd				fd to get information about
5680  *		uap->name			Name of information desired
5681  *		retval				Pointer to the call return area
5682  *
5683  * Returns:	0				Success
5684  *		EINVAL
5685  *	fp_lookup:EBADF				Bad file descriptor
5686  *	vnode_getwithref:???
5687  *	vn_pathconf:???
5688  *
5689  * Implicit returns:
5690  *		*retval (modified)		Returned information (numeric)
5691  */
5692 int
sys_fpathconf(proc_t p,struct fpathconf_args * uap,int32_t * retval)5693 sys_fpathconf(proc_t p, struct fpathconf_args *uap, int32_t *retval)
5694 {
5695 	int fd = uap->fd;
5696 	struct fileproc *fp;
5697 	struct vnode *vp;
5698 	int error = 0;
5699 	file_type_t type;
5700 
5701 
5702 	AUDIT_ARG(fd, uap->fd);
5703 	if ((error = fp_lookup(p, fd, &fp, 0))) {
5704 		return error;
5705 	}
5706 	type = fp->f_type;
5707 
5708 	switch (type) {
5709 	case DTYPE_SOCKET:
5710 		if (uap->name != _PC_PIPE_BUF) {
5711 			error = EINVAL;
5712 			goto out;
5713 		}
5714 		*retval = PIPE_BUF;
5715 		error = 0;
5716 		goto out;
5717 
5718 	case DTYPE_PIPE:
5719 		if (uap->name != _PC_PIPE_BUF) {
5720 			error = EINVAL;
5721 			goto out;
5722 		}
5723 		*retval = PIPE_BUF;
5724 		error = 0;
5725 		goto out;
5726 
5727 	case DTYPE_VNODE:
5728 		vp = (struct vnode *)fp_get_data(fp);
5729 
5730 		if ((error = vnode_getwithref(vp)) == 0) {
5731 			AUDIT_ARG(vnpath, vp, ARG_VNODE1);
5732 
5733 			error = vn_pathconf(vp, uap->name, retval, vfs_context_current());
5734 
5735 			(void)vnode_put(vp);
5736 		}
5737 		goto out;
5738 
5739 	default:
5740 		error = EINVAL;
5741 		goto out;
5742 	}
5743 	/*NOTREACHED*/
5744 out:
5745 	fp_drop(p, fd, fp, 0);
5746 	return error;
5747 }
5748 
5749 /*
5750  * sys_flock
5751  *
5752  * Description:	Apply an advisory lock on a file descriptor.
5753  *
5754  * Parameters:	p				Process making request
5755  *		uap->fd				fd on which the lock is to be
5756  *						attempted
5757  *		uap->how			(Un)Lock bits, including type
5758  *		retval				Pointer to the call return area
5759  *
5760  * Returns:	0				Success
5761  *	fp_getfvp:EBADF				Bad file descriptor
5762  *	fp_getfvp:ENOTSUP			fd does not refer to a vnode
5763  *	vnode_getwithref:???
5764  *	VNOP_ADVLOCK:???
5765  *
5766  * Implicit returns:
5767  *		*retval (modified)		Size of dtable
5768  *
5769  * Notes:	Just attempt to get a record lock of the requested type on
5770  *		the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
5771  */
5772 int
sys_flock(proc_t p,struct flock_args * uap,__unused int32_t * retval)5773 sys_flock(proc_t p, struct flock_args *uap, __unused int32_t *retval)
5774 {
5775 	int fd = uap->fd;
5776 	int how = uap->how;
5777 	struct fileproc *fp;
5778 	struct vnode *vp;
5779 	struct flock lf;
5780 	vfs_context_t ctx = vfs_context_current();
5781 	int error = 0;
5782 
5783 	AUDIT_ARG(fd, uap->fd);
5784 	if ((error = fp_getfvp(p, fd, &fp, &vp))) {
5785 		return error;
5786 	}
5787 	if ((error = vnode_getwithref(vp))) {
5788 		goto out1;
5789 	}
5790 	AUDIT_ARG(vnpath, vp, ARG_VNODE1);
5791 
5792 	lf.l_whence = SEEK_SET;
5793 	lf.l_start = 0;
5794 	lf.l_len = 0;
5795 	if (how & LOCK_UN) {
5796 		lf.l_type = F_UNLCK;
5797 		error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_UNLCK, &lf, F_FLOCK, ctx, NULL);
5798 		goto out;
5799 	}
5800 	if (how & LOCK_EX) {
5801 		lf.l_type = F_WRLCK;
5802 	} else if (how & LOCK_SH) {
5803 		lf.l_type = F_RDLCK;
5804 	} else {
5805 		error = EBADF;
5806 		goto out;
5807 	}
5808 #if CONFIG_MACF
5809 	error = mac_file_check_lock(kauth_cred_get(), fp->fp_glob, F_SETLK, &lf);
5810 	if (error) {
5811 		goto out;
5812 	}
5813 #endif
5814 	error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_SETLK, &lf,
5815 	    (how & LOCK_NB ? F_FLOCK : F_FLOCK | F_WAIT),
5816 	    ctx, NULL);
5817 	if (!error) {
5818 		os_atomic_or(&fp->fp_glob->fg_flag, FWASLOCKED, relaxed);
5819 	}
5820 out:
5821 	(void)vnode_put(vp);
5822 out1:
5823 	fp_drop(p, fd, fp, 0);
5824 	return error;
5825 }
5826 
5827 /*
5828  * sys_fileport_makeport
5829  *
5830  * Description: Obtain a Mach send right for a given file descriptor.
5831  *
5832  * Parameters:	p		Process calling fileport
5833  *              uap->fd		The fd to reference
5834  *              uap->portnamep  User address at which to place port name.
5835  *
5836  * Returns:	0		Success.
5837  *              EBADF		Bad file descriptor.
5838  *              EINVAL		File descriptor had type that cannot be sent, misc. other errors.
5839  *              EFAULT		Address at which to store port name is not valid.
5840  *              EAGAIN		Resource shortage.
5841  *
5842  * Implicit returns:
5843  *		On success, name of send right is stored at user-specified address.
5844  */
5845 int
sys_fileport_makeport(proc_t p,struct fileport_makeport_args * uap,__unused int * retval)5846 sys_fileport_makeport(proc_t p, struct fileport_makeport_args *uap,
5847     __unused int *retval)
5848 {
5849 	int err;
5850 	int fd = uap->fd;
5851 	user_addr_t user_portaddr = uap->portnamep;
5852 	struct fileproc *fp = FILEPROC_NULL;
5853 	struct fileglob *fg = NULL;
5854 	ipc_port_t fileport;
5855 	mach_port_name_t name = MACH_PORT_NULL;
5856 
5857 	proc_fdlock(p);
5858 	err = fp_lookup(p, fd, &fp, 1);
5859 	if (err != 0) {
5860 		goto out_unlock;
5861 	}
5862 
5863 	fg = fp->fp_glob;
5864 	if (!fg_sendable(fg)) {
5865 		err = EINVAL;
5866 		goto out_unlock;
5867 	}
5868 
5869 	if (fp_isguarded(fp, GUARD_FILEPORT)) {
5870 		err = fp_guard_exception(p, fd, fp, kGUARD_EXC_FILEPORT);
5871 		goto out_unlock;
5872 	}
5873 
5874 	/* Dropped when port is deallocated */
5875 	fg_ref(p, fg);
5876 
5877 	proc_fdunlock(p);
5878 
5879 	/* Allocate and initialize a port */
5880 	fileport = fileport_alloc(fg);
5881 	if (fileport == IPC_PORT_NULL) {
5882 		fg_drop_live(fg);
5883 		err = EAGAIN;
5884 		goto out;
5885 	}
5886 
5887 	/* Add an entry.  Deallocates port on failure. */
5888 	name = ipc_port_copyout_send(fileport, get_task_ipcspace(proc_task(p)));
5889 	if (!MACH_PORT_VALID(name)) {
5890 		err = EINVAL;
5891 		goto out;
5892 	}
5893 
5894 	err = copyout(&name, user_portaddr, sizeof(mach_port_name_t));
5895 	if (err != 0) {
5896 		goto out;
5897 	}
5898 
5899 	/* Tag the fileglob for debugging purposes */
5900 	lck_mtx_lock_spin(&fg->fg_lock);
5901 	fg->fg_lflags |= FG_PORTMADE;
5902 	lck_mtx_unlock(&fg->fg_lock);
5903 
5904 	fp_drop(p, fd, fp, 0);
5905 
5906 	return 0;
5907 
5908 out_unlock:
5909 	proc_fdunlock(p);
5910 out:
5911 	if (MACH_PORT_VALID(name)) {
5912 		/* Don't care if another thread races us to deallocate the entry */
5913 		(void) mach_port_deallocate(get_task_ipcspace(proc_task(p)), name);
5914 	}
5915 
5916 	if (fp != FILEPROC_NULL) {
5917 		fp_drop(p, fd, fp, 0);
5918 	}
5919 
5920 	return err;
5921 }
5922 
5923 void
fileport_releasefg(struct fileglob * fg)5924 fileport_releasefg(struct fileglob *fg)
5925 {
5926 	(void)fg_drop(FG_NOPROC, fg);
5927 }
5928 
5929 /*
5930  * fileport_makefd
5931  *
5932  * Description: Obtain the file descriptor for a given Mach send right.
5933  *
5934  * Returns:	0		Success
5935  *		EINVAL		Invalid Mach port name, or port is not for a file.
5936  *	fdalloc:EMFILE
5937  *	fdalloc:ENOMEM		Unable to allocate fileproc or extend file table.
5938  *
5939  * Implicit returns:
5940  *		*retval (modified)		The new descriptor
5941  */
5942 int
fileport_makefd(proc_t p,ipc_port_t port,fileproc_flags_t fp_flags,int * retval)5943 fileport_makefd(proc_t p, ipc_port_t port, fileproc_flags_t fp_flags, int *retval)
5944 {
5945 	struct fileglob *fg;
5946 	struct fileproc *fp = FILEPROC_NULL;
5947 	int fd;
5948 	int err;
5949 
5950 	fg = fileport_port_to_fileglob(port);
5951 	if (fg == NULL) {
5952 		err = EINVAL;
5953 		goto out;
5954 	}
5955 
5956 	fp = fileproc_alloc_init();
5957 
5958 	proc_fdlock(p);
5959 	err = fdalloc(p, 0, &fd);
5960 	if (err != 0) {
5961 		proc_fdunlock(p);
5962 		goto out;
5963 	}
5964 	if (fp_flags) {
5965 		fp->fp_flags |= fp_flags;
5966 	}
5967 
5968 	fp->fp_glob = fg;
5969 	fg_ref(p, fg);
5970 
5971 	procfdtbl_releasefd(p, fd, fp);
5972 	proc_fdunlock(p);
5973 
5974 	*retval = fd;
5975 	err = 0;
5976 out:
5977 	if ((fp != NULL) && (0 != err)) {
5978 		fileproc_free(fp);
5979 	}
5980 
5981 	return err;
5982 }
5983 
5984 /*
5985  * sys_fileport_makefd
5986  *
5987  * Description: Obtain the file descriptor for a given Mach send right.
5988  *
5989  * Parameters:	p		Process calling fileport
5990  *              uap->port	Name of send right to file port.
5991  *
5992  * Returns:	0		Success
5993  *		EINVAL		Invalid Mach port name, or port is not for a file.
5994  *	fdalloc:EMFILE
5995  *	fdalloc:ENOMEM		Unable to allocate fileproc or extend file table.
5996  *
5997  * Implicit returns:
5998  *		*retval (modified)		The new descriptor
5999  */
6000 int
sys_fileport_makefd(proc_t p,struct fileport_makefd_args * uap,int32_t * retval)6001 sys_fileport_makefd(proc_t p, struct fileport_makefd_args *uap, int32_t *retval)
6002 {
6003 	ipc_port_t port = IPC_PORT_NULL;
6004 	mach_port_name_t send = uap->port;
6005 	kern_return_t res;
6006 	int err;
6007 
6008 	res = ipc_typed_port_copyin_send(get_task_ipcspace(proc_task(p)),
6009 	    send, IKOT_FILEPORT, &port);
6010 
6011 	if (res == KERN_SUCCESS) {
6012 		err = fileport_makefd(p, port, FP_CLOEXEC, retval);
6013 	} else {
6014 		err = EINVAL;
6015 	}
6016 
6017 	if (IPC_PORT_NULL != port) {
6018 		ipc_typed_port_release_send(port, IKOT_FILEPORT);
6019 	}
6020 
6021 	return err;
6022 }
6023 
6024 
6025 #pragma mark fileops wrappers
6026 
6027 /*
6028  * fo_read
6029  *
6030  * Description:	Generic fileops read indirected through the fileops pointer
6031  *		in the fileproc structure
6032  *
6033  * Parameters:	fp				fileproc structure pointer
6034  *		uio				user I/O structure pointer
6035  *		flags				FOF_ flags
6036  *		ctx				VFS context for operation
6037  *
6038  * Returns:	0				Success
6039  *		!0				Errno from read
6040  */
6041 int
fo_read(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)6042 fo_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6043 {
6044 	return (*fp->f_ops->fo_read)(fp, uio, flags, ctx);
6045 }
6046 
6047 int
fo_no_read(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)6048 fo_no_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6049 {
6050 #pragma unused(fp, uio, flags, ctx)
6051 	return ENXIO;
6052 }
6053 
6054 
6055 /*
6056  * fo_write
6057  *
6058  * Description:	Generic fileops write indirected through the fileops pointer
6059  *		in the fileproc structure
6060  *
6061  * Parameters:	fp				fileproc structure pointer
6062  *		uio				user I/O structure pointer
6063  *		flags				FOF_ flags
6064  *		ctx				VFS context for operation
6065  *
6066  * Returns:	0				Success
6067  *		!0				Errno from write
6068  */
6069 int
fo_write(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)6070 fo_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6071 {
6072 	return (*fp->f_ops->fo_write)(fp, uio, flags, ctx);
6073 }
6074 
6075 int
fo_no_write(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)6076 fo_no_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6077 {
6078 #pragma unused(fp, uio, flags, ctx)
6079 	return ENXIO;
6080 }
6081 
6082 
6083 /*
6084  * fo_ioctl
6085  *
6086  * Description:	Generic fileops ioctl indirected through the fileops pointer
6087  *		in the fileproc structure
6088  *
6089  * Parameters:	fp				fileproc structure pointer
6090  *		com				ioctl command
6091  *		data				pointer to internalized copy
6092  *						of user space ioctl command
6093  *						parameter data in kernel space
6094  *		ctx				VFS context for operation
6095  *
6096  * Returns:	0				Success
6097  *		!0				Errno from ioctl
6098  *
6099  * Locks:	The caller is assumed to have held the proc_fdlock; this
6100  *		function releases and reacquires this lock.  If the caller
6101  *		accesses data protected by this lock prior to calling this
6102  *		function, it will need to revalidate/reacquire any cached
6103  *		protected data obtained prior to the call.
6104  */
6105 int
fo_ioctl(struct fileproc * fp,u_long com,caddr_t data,vfs_context_t ctx)6106 fo_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx)
6107 {
6108 	int error;
6109 
6110 	proc_fdunlock(vfs_context_proc(ctx));
6111 	error = (*fp->f_ops->fo_ioctl)(fp, com, data, ctx);
6112 	proc_fdlock(vfs_context_proc(ctx));
6113 	return error;
6114 }
6115 
6116 int
fo_no_ioctl(struct fileproc * fp,u_long com,caddr_t data,vfs_context_t ctx)6117 fo_no_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx)
6118 {
6119 #pragma unused(fp, com, data, ctx)
6120 	return ENOTTY;
6121 }
6122 
6123 
6124 /*
6125  * fo_select
6126  *
6127  * Description:	Generic fileops select indirected through the fileops pointer
6128  *		in the fileproc structure
6129  *
6130  * Parameters:	fp				fileproc structure pointer
6131  *		which				select which
6132  *		wql				pointer to wait queue list
6133  *		ctx				VFS context for operation
6134  *
6135  * Returns:	0				Success
6136  *		!0				Errno from select
6137  */
6138 int
fo_select(struct fileproc * fp,int which,void * wql,vfs_context_t ctx)6139 fo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
6140 {
6141 	return (*fp->f_ops->fo_select)(fp, which, wql, ctx);
6142 }
6143 
6144 int
fo_no_select(struct fileproc * fp,int which,void * wql,vfs_context_t ctx)6145 fo_no_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
6146 {
6147 #pragma unused(fp, which, wql, ctx)
6148 	return ENOTSUP;
6149 }
6150 
6151 
6152 /*
6153  * fo_close
6154  *
6155  * Description:	Generic fileops close indirected through the fileops pointer
6156  *		in the fileproc structure
6157  *
6158  * Parameters:	fp				fileproc structure pointer for
6159  *						file to close
6160  *		ctx				VFS context for operation
6161  *
6162  * Returns:	0				Success
6163  *		!0				Errno from close
6164  */
6165 int
fo_close(struct fileglob * fg,vfs_context_t ctx)6166 fo_close(struct fileglob *fg, vfs_context_t ctx)
6167 {
6168 	return (*fg->fg_ops->fo_close)(fg, ctx);
6169 }
6170 
6171 
6172 /*
6173  * fo_drain
6174  *
6175  * Description:	Generic fileops kqueue filter indirected through the fileops
6176  *		pointer in the fileproc structure
6177  *
6178  * Parameters:	fp				fileproc structure pointer
6179  *		ctx				VFS context for operation
6180  *
6181  * Returns:	0				Success
6182  *		!0				errno from drain
6183  */
6184 int
fo_drain(struct fileproc * fp,vfs_context_t ctx)6185 fo_drain(struct fileproc *fp, vfs_context_t ctx)
6186 {
6187 	return (*fp->f_ops->fo_drain)(fp, ctx);
6188 }
6189 
6190 int
fo_no_drain(struct fileproc * fp,vfs_context_t ctx)6191 fo_no_drain(struct fileproc *fp, vfs_context_t ctx)
6192 {
6193 #pragma unused(fp, ctx)
6194 	return ENOTSUP;
6195 }
6196 
6197 
6198 /*
6199  * fo_kqfilter
6200  *
6201  * Description:	Generic fileops kqueue filter indirected through the fileops
6202  *		pointer in the fileproc structure
6203  *
6204  * Parameters:	fp				fileproc structure pointer
6205  *		kn				pointer to knote to filter on
6206  *
6207  * Returns:	(kn->kn_flags & EV_ERROR)	error in kn->kn_data
6208  *		0				Filter is not active
6209  *		!0				Filter is active
6210  */
6211 int
fo_kqfilter(struct fileproc * fp,struct knote * kn,struct kevent_qos_s * kev)6212 fo_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev)
6213 {
6214 	return (*fp->f_ops->fo_kqfilter)(fp, kn, kev);
6215 }
6216 
6217 int
fo_no_kqfilter(struct fileproc * fp,struct knote * kn,struct kevent_qos_s * kev)6218 fo_no_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev)
6219 {
6220 #pragma unused(fp, kev)
6221 	knote_set_error(kn, ENOTSUP);
6222 	return 0;
6223 }
6224