xref: /xnu-8019.80.24/bsd/kern/kern_fork.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_fork.c	8.8 (Berkeley) 2/14/95
67  */
68 /*
69  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 /*
75  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
76  * support for mandatory and extensible security protections.  This notice
77  * is included in support of clause 2.2 (b) of the Apple Public License,
78  * Version 2.0.
79  */
80 
81 #include <kern/assert.h>
82 #include <kern/bits.h>
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/filedesc.h>
86 #include <sys/kernel.h>
87 #include <sys/malloc.h>
88 #include <sys/proc_internal.h>
89 #include <sys/kauth.h>
90 #include <sys/user.h>
91 #include <sys/reason.h>
92 #include <sys/resourcevar.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/file_internal.h>
95 #include <sys/acct.h>
96 #include <sys/codesign.h>
97 #include <sys/sysent.h>
98 #include <sys/sysproto.h>
99 #if CONFIG_PERSONAS
100 #include <sys/persona.h>
101 #endif
102 #include <sys/doc_tombstone.h>
103 #if CONFIG_DTRACE
104 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
105 extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
106 extern void dtrace_proc_fork(proc_t, proc_t, int);
107 
108 /*
109  * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
110  * we will store its value before actually calling it.
111  */
112 static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
113 
114 #include <sys/dtrace_ptss.h>
115 #endif
116 
117 #include <security/audit/audit.h>
118 
119 #include <mach/mach_types.h>
120 #include <kern/coalition.h>
121 #include <kern/kern_types.h>
122 #include <kern/kalloc.h>
123 #include <kern/mach_param.h>
124 #include <kern/task.h>
125 #include <kern/thread.h>
126 #include <kern/thread_call.h>
127 #include <kern/zalloc.h>
128 
129 #include <os/log.h>
130 
131 #if CONFIG_MACF
132 #include <security/mac_framework.h>
133 #include <security/mac_mach_internal.h>
134 #endif
135 
136 #include <vm/vm_map.h>
137 #include <vm/vm_protos.h>
138 #include <vm/vm_shared_region.h>
139 
140 #include <sys/shm_internal.h>   /* for shmfork() */
141 #include <mach/task.h>          /* for thread_create() */
142 #include <mach/thread_act.h>    /* for thread_resume() */
143 
144 #include <sys/sdt.h>
145 
146 #if CONFIG_MEMORYSTATUS
147 #include <sys/kern_memorystatus.h>
148 #endif
149 
150 /* XXX routines which should have Mach prototypes, but don't */
151 void thread_set_parent(thread_t parent, int pid);
152 extern void act_thread_catt(void *ctx);
153 void thread_set_child(thread_t child, int pid);
154 void *act_thread_csave(void);
155 extern boolean_t task_is_exec_copy(task_t);
156 int nextpidversion = 0;
157 
158 
159 thread_t cloneproc(task_t, coalition_t *, proc_t, int, int);
160 proc_t forkproc(proc_t);
161 void forkproc_free(proc_t);
162 thread_t fork_create_child(task_t parent_task,
163     coalition_t *parent_coalitions,
164     proc_t child,
165     int inherit_memory,
166     int is_64bit_addr,
167     int is_64bit_data,
168     int in_exec);
169 
170 __private_extern__ const size_t uthread_size = sizeof(struct uthread);
171 static LCK_GRP_DECLARE(rethrottle_lock_grp, "rethrottle");
172 
173 os_refgrp_decl(, p_refgrp, "proc", NULL);
174 SECURITY_READ_ONLY_LATE(zone_t) proc_zone;
175 ZONE_INIT(&proc_zone, "proc", sizeof(struct proc),
176     ZC_ZFREE_CLEARMEM | ZC_SEQUESTER, /* sequester is needed for proc_rele() */
177     ZONE_ID_PROC, NULL);
178 
179 KALLOC_TYPE_DEFINE(proc_stats_zone, struct pstats, KT_DEFAULT);
180 
181 static SECURITY_READ_ONLY_LATE(zone_t) proc_sigacts_ro_zone;
182 ZONE_INIT(&proc_sigacts_ro_zone, "sigacts_ro", sizeof(struct sigacts_ro),
183     ZC_READONLY | ZC_ZFREE_CLEARMEM, ZONE_ID_PROC_SIGACTS_RO, NULL);
184 
185 /*
186  * fork1
187  *
188  * Description:	common code used by all new process creation other than the
189  *		bootstrap of the initial process on the system
190  *
191  * Parameters: parent_proc		parent process of the process being
192  *		child_threadp		pointer to location to receive the
193  *					Mach thread_t of the child process
194  *					created
195  *		kind			kind of creation being requested
196  *		coalitions		if spawn, the set of coalitions the
197  *					child process should join, or NULL to
198  *					inherit the parent's. On non-spawns,
199  *					this param is ignored and the child
200  *					always inherits the parent's
201  *					coalitions.
202  *
203  * Notes:	Permissable values for 'kind':
204  *
205  *		PROC_CREATE_FORK	Create a complete process which will
206  *					return actively running in both the
207  *					parent and the child; the child copies
208  *					the parent address space.
209  *		PROC_CREATE_SPAWN	Create a complete process which will
210  *					return actively running in the parent
211  *					only after returning actively running
212  *					in the child; the child address space
213  *					is newly created by an image activator,
214  *					after which the child is run.
215  *
216  *		At first it may seem strange that we return the child thread
217  *		address rather than process structure, since the process is
218  *		the only part guaranteed to be "new"; however, since we do
219  *		not actualy adjust other references between Mach and BSD, this
220  *		is the only method which guarantees us the ability to get
221  *		back to the other information.
222  */
223 int
fork1(proc_t parent_proc,thread_t * child_threadp,int kind,coalition_t * coalitions)224 fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalitions)
225 {
226 	proc_t child_proc = NULL;       /* set in switch, but compiler... */
227 	thread_t child_thread = NULL;
228 	uid_t uid;
229 	size_t count;
230 	int err = 0;
231 	int spawn = 0;
232 	rlim_t rlimit_nproc_cur;
233 
234 	/*
235 	 * Although process entries are dynamically created, we still keep
236 	 * a global limit on the maximum number we will create.  Don't allow
237 	 * a nonprivileged user to use the last process; don't let root
238 	 * exceed the limit. The variable nprocs is the current number of
239 	 * processes, maxproc is the limit.
240 	 */
241 	uid = kauth_getruid();
242 	proc_list_lock();
243 	if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
244 #if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX)
245 		/*
246 		 * On the development kernel, panic so that the fact that we hit
247 		 * the process limit is obvious, as this may very well wedge the
248 		 * system.
249 		 */
250 		panic("The process table is full; parent pid=%d", proc_getpid(parent_proc));
251 #endif
252 		proc_list_unlock();
253 		tablefull("proc");
254 		return EAGAIN;
255 	}
256 	proc_list_unlock();
257 
258 	/*
259 	 * Increment the count of procs running with this uid. Don't allow
260 	 * a nonprivileged user to exceed their current limit, which is
261 	 * always less than what an rlim_t can hold.
262 	 * (locking protection is provided by list lock held in chgproccnt)
263 	 */
264 	count = chgproccnt(uid, 1);
265 	rlimit_nproc_cur = proc_limitgetcur(parent_proc, RLIMIT_NPROC);
266 	if (uid != 0 &&
267 	    (rlim_t)count > rlimit_nproc_cur) {
268 #if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX)
269 		/*
270 		 * On the development kernel, panic so that the fact that we hit
271 		 * the per user process limit is obvious.  This may be less dire
272 		 * than hitting the global process limit, but we cannot rely on
273 		 * that.
274 		 */
275 		panic("The per-user process limit has been hit; parent pid=%d, uid=%d", proc_getpid(parent_proc), uid);
276 #endif
277 		err = EAGAIN;
278 		goto bad;
279 	}
280 
281 #if CONFIG_MACF
282 	/*
283 	 * Determine if MAC policies applied to the process will allow
284 	 * it to fork.  This is an advisory-only check.
285 	 */
286 	err = mac_proc_check_fork(parent_proc);
287 	if (err != 0) {
288 		goto bad;
289 	}
290 #endif
291 
292 	switch (kind) {
293 	case PROC_CREATE_SPAWN:
294 		/*
295 		 * A spawned process differs from a forked process in that
296 		 * the spawned process does not carry around the parents
297 		 * baggage with regard to address space copying, dtrace,
298 		 * and so on.
299 		 */
300 		spawn = 1;
301 
302 		OS_FALLTHROUGH;
303 
304 	case PROC_CREATE_FORK:
305 		/*
306 		 * When we clone the parent process, we are going to inherit
307 		 * its task attributes and memory, since when we fork, we
308 		 * will, in effect, create a duplicate of it, with only minor
309 		 * differences.  Contrarily, spawned processes do not inherit.
310 		 */
311 		if ((child_thread = cloneproc(parent_proc->task,
312 		    spawn ? coalitions : NULL,
313 		    parent_proc,
314 		    spawn ? FALSE : TRUE,
315 		    FALSE)) == NULL) {
316 			/* Failed to create thread */
317 			err = EAGAIN;
318 			goto bad;
319 		}
320 
321 		/* copy current thread state into the child thread (only for fork) */
322 		if (!spawn) {
323 			thread_dup(child_thread);
324 		}
325 
326 		/* child_proc = child_thread->task->proc; */
327 		child_proc = (proc_t)(get_bsdtask_info(get_threadtask(child_thread)));
328 
329 // XXX BEGIN: wants to move to be common code (and safe)
330 #if CONFIG_MACF
331 		/*
332 		 * allow policies to associate the credential/label that
333 		 * we referenced from the parent ... with the child
334 		 * JMM - this really isn't safe, as we can drop that
335 		 *       association without informing the policy in other
336 		 *       situations (keep long enough to get policies changed)
337 		 */
338 		mac_cred_label_associate_fork(proc_ucred(child_proc), child_proc);
339 #endif
340 
341 		/*
342 		 * Propogate change of PID - may get new cred if auditing.
343 		 */
344 		set_security_token(child_proc);
345 
346 		AUDIT_ARG(pid, proc_getpid(child_proc));
347 
348 // XXX END: wants to move to be common code (and safe)
349 
350 		/*
351 		 * Blow thread state information; this is what gives the child
352 		 * process its "return" value from a fork() call.
353 		 *
354 		 * Note: this should probably move to fork() proper, since it
355 		 * is not relevent to spawn, and the value won't matter
356 		 * until we resume the child there.  If you are in here
357 		 * refactoring code, consider doing this at the same time.
358 		 */
359 		thread_set_child(child_thread, proc_getpid(child_proc));
360 
361 		child_proc->p_acflag = AFORK;   /* forked but not exec'ed */
362 
363 #if CONFIG_DTRACE
364 		dtrace_proc_fork(parent_proc, child_proc, spawn);
365 #endif  /* CONFIG_DTRACE */
366 		if (!spawn) {
367 			/*
368 			 * Of note, we need to initialize the bank context behind
369 			 * the protection of the proc_trans lock to prevent a race with exit.
370 			 */
371 			task_bank_init(get_threadtask(child_thread));
372 		}
373 
374 		break;
375 
376 	default:
377 		panic("fork1 called with unknown kind %d", kind);
378 		break;
379 	}
380 
381 
382 	/* return the thread pointer to the caller */
383 	*child_threadp = child_thread;
384 
385 bad:
386 	/*
387 	 * In the error case, we return a 0 value for the returned pid (but
388 	 * it is ignored in the trampoline due to the error return); this
389 	 * is probably not necessary.
390 	 */
391 	if (err) {
392 		(void)chgproccnt(uid, -1);
393 	}
394 
395 	return err;
396 }
397 
398 
399 
400 
401 /*
402  * fork_create_child
403  *
404  * Description:	Common operations associated with the creation of a child
405  *		process. Return with new task and first thread's control port movable
406  *      and not pinned.
407  *
408  * Parameters:	parent_task		parent task
409  *		parent_coalitions	parent's set of coalitions
410  *		child_proc			child process
411  *		inherit_memory		TRUE, if the parents address space is
412  *							to be inherited by the child
413  *		is_64bit_addr		TRUE, if the child being created will
414  *							be associated with a 64 bit address space
415  *		is_64bit_data		TRUE if the child being created will use a
416  *                                                       64-bit register state
417  *		in_exec				TRUE, if called from execve or posix spawn set exec
418  *							FALSE, if called from fork or vfexec
419  *
420  * Note:	This code is called in the fork() case, from the execve() call
421  *		graph, from the posix_spawn() call graph (which implicitly
422  *		includes a vfork() equivalent call, and in the system
423  *		bootstrap case.
424  *
425  *		It creates a new task and thread (and as a side effect of the
426  *		thread creation, a uthread) in the parent coalition set, which is
427  *		then associated with the process 'child'.  If the parent
428  *		process address space is to be inherited, then a flag
429  *		indicates that the newly created task should inherit this from
430  *		the child task.
431  *
432  *		As a special concession to bootstrapping the initial process
433  *		in the system, it's possible for 'parent_task' to be TASK_NULL;
434  *		in this case, 'inherit_memory' MUST be FALSE.
435  */
436 thread_t
fork_create_child(task_t parent_task,coalition_t * parent_coalitions,proc_t child_proc,int inherit_memory,int is_64bit_addr,int is_64bit_data,int in_exec)437 fork_create_child(task_t parent_task,
438     coalition_t *parent_coalitions,
439     proc_t child_proc,
440     int inherit_memory,
441     int is_64bit_addr,
442     int is_64bit_data,
443     int in_exec)
444 {
445 	thread_t        child_thread = NULL;
446 	task_t          child_task;
447 	kern_return_t   result;
448 	proc_ro_t       proc_ro;
449 
450 	proc_ro = proc_get_ro(child_proc);
451 	if (proc_ro_task(proc_ro) != NULL) {
452 		/* task will need to allocate its own proc_ro: */
453 		proc_ro = NULL;
454 	}
455 
456 	/* Create a new task for the child process */
457 	result = task_create_internal(parent_task,
458 	    proc_ro,
459 	    parent_coalitions,
460 	    inherit_memory,
461 	    is_64bit_addr,
462 	    is_64bit_data,
463 	    TF_NONE,
464 	    in_exec ? TPF_EXEC_COPY : TPF_NONE,                        /* Mark the task exec copy if in execve */
465 	    (TRW_LRETURNWAIT | TRW_LRETURNWAITER),                     /* All created threads will wait in task_wait_to_return */
466 	    &child_task);
467 	if (result != KERN_SUCCESS) {
468 		printf("%s: task_create_internal failed.  Code: %d\n",
469 		    __func__, result);
470 		goto bad;
471 	}
472 
473 	if (!in_exec) {
474 		/*
475 		 * Set the child process task to the new task if not in exec,
476 		 * will set the task for exec case in proc_exec_switch_task after image activation.
477 		 */
478 		proc_set_task(child_proc, child_task);
479 		if (proc_ro == NULL) {
480 			proc_switch_ro(child_proc, task_get_ro(child_task));
481 		}
482 	}
483 
484 	/* Set child task process to child proc */
485 	set_bsdtask_info(child_task, child_proc);
486 
487 	/* Propagate CPU limit timer from parent */
488 	if (timerisset(&child_proc->p_rlim_cpu)) {
489 		task_vtimer_set(child_task, TASK_VTIMER_RLIM);
490 	}
491 
492 	/*
493 	 * Set child process BSD visible scheduler priority if nice value
494 	 * inherited from parent
495 	 */
496 	if (child_proc->p_nice != 0) {
497 		resetpriority(child_proc);
498 	}
499 
500 	/*
501 	 * Create main thread for the child process. Its control port is not immovable/pinned
502 	 * until main_thread_set_immovable_pinned().
503 	 *
504 	 * The new thread is waiting on the event triggered by 'task_clear_return_wait'
505 	 */
506 	result = thread_create_waiting(child_task,
507 	    (thread_continue_t)task_wait_to_return,
508 	    task_get_return_wait_event(child_task),
509 	    TH_CREATE_WAITING_OPTION_NONE,
510 	    &child_thread);
511 
512 	if (result != KERN_SUCCESS) {
513 		printf("%s: thread_create failed. Code: %d\n",
514 		    __func__, result);
515 		task_deallocate(child_task);
516 		child_task = NULL;
517 	}
518 
519 	/*
520 	 * Tag thread as being the first thread in its task.
521 	 */
522 	thread_set_tag(child_thread, THREAD_TAG_MAINTHREAD);
523 
524 bad:
525 	thread_yield_internal(1);
526 
527 	return child_thread;
528 }
529 
530 
531 /*
532  * fork
533  *
534  * Description:	fork system call.
535  *
536  * Parameters:	parent			Parent process to fork
537  *		uap (void)		[unused]
538  *		retval			Return value
539  *
540  * Returns:	0			Success
541  *		EAGAIN			Resource unavailable, try again
542  *
543  * Notes:	Attempts to create a new child process which inherits state
544  *		from the parent process.  If successful, the call returns
545  *		having created an initially suspended child process with an
546  *		extra Mach task and thread reference, for which the thread
547  *		is initially suspended.  Until we resume the child process,
548  *		it is not yet running.
549  *
550  *		The return information to the child is contained in the
551  *		thread state structure of the new child, and does not
552  *		become visible to the child through a normal return process,
553  *		since it never made the call into the kernel itself in the
554  *		first place.
555  *
556  *		After resuming the thread, this function returns directly to
557  *		the parent process which invoked the fork() system call.
558  *
559  * Important:	The child thread_resume occurs before the parent returns;
560  *		depending on scheduling latency, this means that it is not
561  *		deterministic as to whether the parent or child is scheduled
562  *		to run first.  It is entirely possible that the child could
563  *		run to completion prior to the parent running.
564  */
565 int
fork(proc_t parent_proc,__unused struct fork_args * uap,int32_t * retval)566 fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval)
567 {
568 	thread_t child_thread;
569 	int err;
570 
571 	retval[1] = 0;          /* flag parent return for user space */
572 
573 	if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) {
574 		task_t child_task;
575 		proc_t child_proc;
576 
577 		/* Return to the parent */
578 		child_proc = (proc_t)get_bsdthreadtask_info(child_thread);
579 		retval[0] = proc_getpid(child_proc);
580 
581 		child_task = (task_t)get_threadtask(child_thread);
582 		assert(child_task != TASK_NULL);
583 
584 		/* task_control_port_options has been inherited from parent, apply it */
585 		task_set_immovable_pinned(child_task);
586 		main_thread_set_immovable_pinned(child_thread);
587 
588 		/*
589 		 * Drop the signal lock on the child which was taken on our
590 		 * behalf by forkproc()/cloneproc() to prevent signals being
591 		 * received by the child in a partially constructed state.
592 		 */
593 		proc_signalend(child_proc, 0);
594 		proc_transend(child_proc, 0);
595 
596 		/* flag the fork has occurred */
597 		proc_knote(parent_proc, NOTE_FORK | proc_getpid(child_proc));
598 		DTRACE_PROC1(create, proc_t, child_proc);
599 
600 #if CONFIG_DTRACE
601 		if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
602 			(*dtrace_proc_waitfor_hook)(child_proc);
603 		}
604 #endif
605 
606 		/* "Return" to the child */
607 		task_clear_return_wait(get_threadtask(child_thread), TCRW_CLEAR_ALL_WAIT);
608 
609 		/* drop the extra references we got during the creation */
610 		task_deallocate(child_task);
611 		thread_deallocate(child_thread);
612 	}
613 
614 	return err;
615 }
616 
617 
618 /*
619  * cloneproc
620  *
621  * Description: Create a new process from a specified process.
622  *
623  * Parameters:	parent_task		The parent task to be cloned, or
624  *					TASK_NULL is task characteristics
625  *					are not to be inherited
626  *					be cloned, or TASK_NULL if the new
627  *					task is not to inherit the VM
628  *					characteristics of the parent
629  *		parent_proc		The parent process to be cloned
630  *		inherit_memory		True if the child is to inherit
631  *					memory from the parent; if this is
632  *					non-NULL, then the parent_task must
633  *					also be non-NULL
634  *		memstat_internal	Whether to track the process in the
635  *					jetsam priority list (if configured)
636  *
637  * Returns:	!NULL			pointer to new child thread
638  *		NULL			Failure (unspecified)
639  *
640  * Note:	On return newly created child process has signal lock held
641  *		to block delivery of signal to it if called with lock set.
642  *		fork() code needs to explicity remove this lock before
643  *		signals can be delivered
644  *
645  *		In the case of bootstrap, this function can be called from
646  *		bsd_utaskbootstrap() in order to bootstrap the first process;
647  *		the net effect is to provide a uthread structure for the
648  *		kernel process associated with the kernel task.
649  *
650  * XXX:		Tristating using the value parent_task as the major key
651  *		and inherit_memory as the minor key is something we should
652  *		refactor later; we owe the current semantics, ultimately,
653  *		to the semantics of task_create_internal.  For now, we will
654  *		live with this being somewhat awkward.
655  */
656 thread_t
cloneproc(task_t parent_task,coalition_t * parent_coalitions,proc_t parent_proc,int inherit_memory,int memstat_internal)657 cloneproc(task_t parent_task, coalition_t *parent_coalitions, proc_t parent_proc, int inherit_memory, int memstat_internal)
658 {
659 #if !CONFIG_MEMORYSTATUS
660 #pragma unused(memstat_internal)
661 #endif
662 	task_t child_task;
663 	proc_t child_proc;
664 	thread_t child_thread = NULL;
665 
666 	if ((child_proc = forkproc(parent_proc)) == NULL) {
667 		/* Failed to allocate new process */
668 		goto bad;
669 	}
670 
671 	/*
672 	 * In the case where the parent_task is TASK_NULL (during the init path)
673 	 * we make the assumption that the register size will be the same as the
674 	 * address space size since there's no way to determine the possible
675 	 * register size until an image is exec'd.
676 	 *
677 	 * The only architecture that has different address space and register sizes
678 	 * (arm64_32) isn't being used within kernel-space, so the above assumption
679 	 * always holds true for the init path.
680 	 */
681 	const int parent_64bit_addr = parent_proc->p_flag & P_LP64;
682 	const int parent_64bit_data = (parent_task == TASK_NULL) ? parent_64bit_addr : task_get_64bit_data(parent_task);
683 
684 	child_thread = fork_create_child(parent_task,
685 	    parent_coalitions,
686 	    child_proc,
687 	    inherit_memory,
688 	    parent_64bit_addr,
689 	    parent_64bit_data,
690 	    FALSE);
691 
692 	if (child_thread == NULL) {
693 		/*
694 		 * Failed to create thread; now we must deconstruct the new
695 		 * process previously obtained from forkproc().
696 		 */
697 		forkproc_free(child_proc);
698 		goto bad;
699 	}
700 
701 	child_task = get_threadtask(child_thread);
702 	if (parent_64bit_addr) {
703 		OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
704 		get_bsdthread_info(child_thread)->uu_flag |= UT_LP64;
705 	} else {
706 		OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
707 		get_bsdthread_info(child_thread)->uu_flag &= ~UT_LP64;
708 	}
709 
710 #if CONFIG_MEMORYSTATUS
711 	if (memstat_internal) {
712 		proc_list_lock();
713 		child_proc->p_memstat_state |= P_MEMSTAT_INTERNAL;
714 		proc_list_unlock();
715 	}
716 #endif
717 
718 	/* make child visible */
719 	pinsertchild(parent_proc, child_proc);
720 
721 	/*
722 	 * Make child runnable, set start time.
723 	 */
724 	child_proc->p_stat = SRUN;
725 bad:
726 	return child_thread;
727 }
728 
729 __abortlike
730 static void
panic_sigacts_backref_mismatch(struct sigacts * sa)731 panic_sigacts_backref_mismatch(struct sigacts *sa)
732 {
733 	panic("sigacts_ro backref mismatch: sigacts=%p, ro=%p, backref=%p",
734 	    sa, sa->ps_ro, sa->ps_ro->ps_rw);
735 }
736 
737 static struct sigacts_ro *
sigacts_ro(struct sigacts * sa)738 sigacts_ro(struct sigacts *sa)
739 {
740 	struct sigacts_ro *ro = sa->ps_ro;
741 
742 	zone_require_ro(ZONE_ID_PROC_SIGACTS_RO, sizeof(struct sigacts_ro),
743 	    ro);
744 
745 	if (__improbable(ro->ps_rw != sa)) {
746 		panic_sigacts_backref_mismatch(sa);
747 	}
748 
749 	return ro;
750 }
751 
752 void
proc_set_sigact(proc_t p,int sig,user_addr_t sigact)753 proc_set_sigact(proc_t p, int sig, user_addr_t sigact)
754 {
755 	assert((sig > 0) && (sig < NSIG));
756 
757 	zalloc_ro_update_field(ZONE_ID_PROC_SIGACTS_RO, sigacts_ro(&p->p_sigacts),
758 	    ps_sigact[sig], &sigact);
759 }
760 
761 void
proc_set_trampact(proc_t p,int sig,user_addr_t trampact)762 proc_set_trampact(proc_t p, int sig, user_addr_t trampact)
763 {
764 	assert((sig > 0) && (sig < NSIG));
765 
766 	zalloc_ro_update_field(ZONE_ID_PROC_SIGACTS_RO, sigacts_ro(&p->p_sigacts),
767 	    ps_trampact[sig], &trampact);
768 }
769 
770 void
proc_set_sigact_trampact(proc_t p,int sig,user_addr_t sigact,user_addr_t trampact)771 proc_set_sigact_trampact(proc_t p, int sig, user_addr_t sigact, user_addr_t trampact)
772 {
773 	struct sigacts_ro *ps_ro = sigacts_ro(&p->p_sigacts);
774 	struct sigacts_ro psro_local = *ps_ro;
775 
776 	assert((sig > 0) && (sig < NSIG));
777 
778 	psro_local.ps_sigact[sig] = sigact;
779 	psro_local.ps_trampact[sig] = trampact;
780 
781 	zalloc_ro_update_elem(ZONE_ID_PROC_SIGACTS_RO, ps_ro, &psro_local);
782 }
783 
784 void
proc_reset_sigact(proc_t p,sigset_t sigs)785 proc_reset_sigact(proc_t p, sigset_t sigs)
786 {
787 	int nc;
788 	user_addr_t sigacts[NSIG];
789 	bool changed = false;
790 	struct sigacts_ro *ro = sigacts_ro(&p->p_sigacts);
791 
792 	memcpy(sigacts, ro->ps_sigact, sizeof(sigacts));
793 
794 	while (sigs) {
795 		nc = ffs((unsigned int)sigs);
796 		if (sigacts[nc] != SIG_DFL) {
797 			sigacts[nc] = SIG_DFL;
798 			changed = true;
799 		}
800 		sigs &= ~sigmask(nc);
801 	}
802 
803 	if (changed) {
804 		zalloc_ro_update_field(ZONE_ID_PROC_SIGACTS_RO, ro, ps_sigact,
805 		    (user_addr_t const (*)[NSIG])sigacts);
806 	}
807 }
808 
809 void
proc_sigacts_copy(proc_t dst,proc_t src)810 proc_sigacts_copy(proc_t dst, proc_t src)
811 {
812 	struct sigacts_ro ro_local;
813 	struct sigacts_ro *ro;
814 
815 	if (src == NULL) {
816 		assert(dst == kernproc);
817 		bzero(&dst->p_sigacts, sizeof(struct sigacts));
818 		bzero(&ro_local, sizeof(struct sigacts_ro));
819 	} else {
820 		dst->p_sigacts = src->p_sigacts;
821 		ro_local = *sigacts_ro(&src->p_sigacts);
822 	}
823 
824 	ro_local.ps_rw = &dst->p_sigacts;
825 
826 	ro = zalloc_ro(ZONE_ID_PROC_SIGACTS_RO, Z_WAITOK | Z_NOFAIL | Z_ZERO);
827 	zalloc_ro_update_elem(ZONE_ID_PROC_SIGACTS_RO, ro, &ro_local);
828 
829 	dst->p_sigacts.ps_ro = ro;
830 }
831 
832 /*
833  * Destroy a process structure that resulted from a call to forkproc(), but
834  * which must be returned to the system because of a subsequent failure
835  * preventing it from becoming active.
836  *
837  * Parameters:	p			The incomplete process from forkproc()
838  *
839  * Returns:	(void)
840  *
841  * Note:	This function should only be used in an error handler following
842  *		a call to forkproc().
843  *
844  *		Operations occur in reverse order of those in forkproc().
845  */
846 void
forkproc_free(proc_t p)847 forkproc_free(proc_t p)
848 {
849 	struct pgrp *pg;
850 
851 #if CONFIG_PERSONAS
852 	persona_proc_drop(p);
853 #endif /* CONFIG_PERSONAS */
854 
855 #if PSYNCH
856 	pth_proc_hashdelete(p);
857 #endif /* PSYNCH */
858 
859 	/* We held signal and a transition locks; drop them */
860 	proc_signalend(p, 0);
861 	proc_transend(p, 0);
862 
863 	/*
864 	 * If we have our own copy of the resource limits structure, we
865 	 * need to free it.  If it's a shared copy, we need to drop our
866 	 * reference on it.
867 	 */
868 	proc_limitdrop(p);
869 
870 #if SYSV_SHM
871 	/* Need to drop references to the shared memory segment(s), if any */
872 	if (p->vm_shm) {
873 		/*
874 		 * Use shmexec(): we have no address space, so no mappings
875 		 *
876 		 * XXX Yes, the routine is badly named.
877 		 */
878 		shmexec(p);
879 	}
880 #endif
881 
882 	/* Need to undo the effects of the fdt_fork(), if any */
883 	fdt_invalidate(p);
884 	fdt_destroy(p);
885 
886 	/*
887 	 * Drop the reference on a text vnode pointer, if any
888 	 * XXX This code is broken in forkproc(); see <rdar://4256419>;
889 	 * XXX if anyone ever uses this field, we will be extremely unhappy.
890 	 */
891 	if (p->p_textvp) {
892 		vnode_rele(p->p_textvp);
893 		p->p_textvp = NULL;
894 	}
895 
896 	/* Update the audit session proc count */
897 	AUDIT_SESSION_PROCEXIT(p);
898 
899 	lck_mtx_destroy(&p->p_mlock, &proc_mlock_grp);
900 	lck_mtx_destroy(&p->p_ucred_mlock, &proc_ucred_mlock_grp);
901 #if CONFIG_DTRACE
902 	lck_mtx_destroy(&p->p_dtrace_sprlock, &proc_lck_grp);
903 #endif
904 	lck_spin_destroy(&p->p_slock, &proc_slock_grp);
905 
906 	/* Release the credential reference */
907 	proc_set_ucred(p, NOCRED);
908 
909 	proc_list_lock();
910 	/* Decrement the count of processes in the system */
911 	nprocs--;
912 
913 	/* quit the group */
914 	pg = pgrp_leave_locked(p);
915 
916 	/* Take it out of process hash */
917 	assert(os_ref_get_raw_mask(&p->p_refcount) ==
918 	    ((1U << P_REF_BITS) | P_REF_NEW));
919 	os_atomic_xor(&p->p_refcount, P_REF_NEW | P_REF_DEAD, relaxed);
920 	phash_remove_locked(proc_getpid(p), p);
921 
922 	proc_list_unlock();
923 
924 	pgrp_rele(pg);
925 
926 	thread_call_free(p->p_rcall);
927 
928 	/* Free allocated memory */
929 	zfree_ro(ZONE_ID_PROC_SIGACTS_RO, p->p_sigacts.ps_ro);
930 	zfree(proc_stats_zone, p->p_stats);
931 	p->p_stats = NULL;
932 	if (p->p_subsystem_root_path) {
933 		zfree(ZV_NAMEI, p->p_subsystem_root_path);
934 	}
935 
936 	p->p_proc_ro = proc_ro_release_proc(p->p_proc_ro);
937 	if (p->p_proc_ro != NULL) {
938 		proc_ro_free(p->p_proc_ro);
939 		p->p_proc_ro = NULL;
940 	}
941 
942 	proc_checkdeadrefs(p);
943 	proc_wait_release(p);
944 }
945 
946 
947 /*
948  * forkproc
949  *
950  * Description:	Create a new process structure, given a parent process
951  *		structure.
952  *
953  * Parameters:	parent_proc		The parent process
954  *
955  * Returns:	!NULL			The new process structure
956  *		NULL			Error (insufficient free memory)
957  *
958  * Note:	When successful, the newly created process structure is
959  *		partially initialized; if a caller needs to deconstruct the
960  *		returned structure, they must call forkproc_free() to do so.
961  */
962 proc_t
forkproc(proc_t parent_proc)963 forkproc(proc_t parent_proc)
964 {
965 	static uint64_t nextuniqueid = 0;
966 	static pid_t lastpid = 0;
967 
968 	proc_t child_proc;      /* Our new process */
969 	int error = 0;
970 	struct pgrp *pg;
971 	uthread_t parent_uthread = current_uthread();
972 	rlim_t rlimit_cpu_cur;
973 	pid_t pid;
974 	struct proc_ro_data proc_ro_data = {};
975 
976 	child_proc = zalloc_flags(proc_zone, Z_WAITOK | Z_ZERO);
977 	child_proc->p_stats = zalloc_flags(proc_stats_zone, Z_WAITOK | Z_ZERO);
978 	proc_sigacts_copy(child_proc, parent_proc);
979 	os_ref_init_mask(&child_proc->p_refcount, P_REF_BITS, &p_refgrp, P_REF_NEW);
980 	os_ref_init_raw(&child_proc->p_waitref, &p_refgrp);
981 
982 	/* allocate a callout for use by interval timers */
983 	child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc);
984 
985 
986 	/*
987 	 * Find an unused PID.
988 	 */
989 
990 	fdt_init(child_proc);
991 
992 	proc_list_lock();
993 
994 	pid = lastpid;
995 	do {
996 		/*
997 		 * If the process ID prototype has wrapped around,
998 		 * restart somewhat above 0, as the low-numbered procs
999 		 * tend to include daemons that don't exit.
1000 		 */
1001 		if (++pid >= PID_MAX) {
1002 			pid = 100;
1003 		}
1004 		if (pid == lastpid) {
1005 			panic("Unable to allocate a new pid");
1006 		}
1007 
1008 		/* if the pid stays in hash both for zombie and runniing state */
1009 	} while (phash_find_locked(pid) != PROC_NULL ||
1010 	    pghash_find_locked(pid) != PGRP_NULL ||
1011 	    session_find_locked(pid) != SESSION_NULL);
1012 
1013 	lastpid = pid;
1014 	nprocs++;
1015 
1016 	child_proc->p_pid = pid;
1017 	proc_ro_data.p_idversion = OSIncrementAtomic(&nextpidversion);
1018 	/* kernel process is handcrafted and not from fork, so start from 1 */
1019 	proc_ro_data.p_uniqueid = ++nextuniqueid;
1020 
1021 	/* Insert in the hash, and inherit our group (and session) */
1022 	phash_insert_locked(pid, child_proc);
1023 	pg = pgrp_enter_locked(parent_proc, child_proc);
1024 	proc_list_unlock();
1025 
1026 	if (proc_ro_data.p_uniqueid == startup_serial_num_procs) {
1027 		/*
1028 		 * Turn off startup serial logging now that we have reached
1029 		 * the defined number of startup processes.
1030 		 */
1031 		startup_serial_logging_active = false;
1032 	}
1033 
1034 	/*
1035 	 * We've identified the PID we are going to use;
1036 	 * initialize the new process structure.
1037 	 */
1038 	child_proc->p_stat = SIDL;
1039 
1040 	/*
1041 	 * The zero'ing of the proc was at the allocation time due to need
1042 	 * for insertion to hash.  Copy the section that is to be copied
1043 	 * directly from the parent.
1044 	 */
1045 	child_proc->p_forkcopy = parent_proc->p_forkcopy;
1046 
1047 	proc_ro_data.syscall_filter_mask = proc_syscall_filter_mask(parent_proc);
1048 	proc_ro_data.p_platform_data = proc_get_ro(parent_proc)->p_platform_data;
1049 
1050 	/*
1051 	 * Some flags are inherited from the parent.
1052 	 * Duplicate sub-structures as needed.
1053 	 * Increase reference counts on shared objects.
1054 	 * The p_stats substruct is set in vm_fork.
1055 	 */
1056 #if CONFIG_DELAY_IDLE_SLEEP
1057 	child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID | P_AFFINITY));
1058 #else /* CONFIG_DELAY_IDLE_SLEEP */
1059 	child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_DISABLE_ASLR | P_SUGID));
1060 #endif /* CONFIG_DELAY_IDLE_SLEEP */
1061 
1062 	child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_VALID_MASK));
1063 
1064 	child_proc->p_responsible_pid = parent_proc->p_responsible_pid;
1065 
1066 	/*
1067 	 * Note that if the current thread has an assumed identity, this
1068 	 * credential will be granted to the new process.
1069 	 */
1070 	kauth_cred_set(&proc_ro_data.p_ucred, kauth_cred_get());
1071 
1072 	lck_mtx_init(&child_proc->p_mlock, &proc_mlock_grp, &proc_lck_attr);
1073 	lck_mtx_init(&child_proc->p_ucred_mlock, &proc_ucred_mlock_grp, &proc_lck_attr);
1074 #if CONFIG_DTRACE
1075 	lck_mtx_init(&child_proc->p_dtrace_sprlock, &proc_lck_grp, &proc_lck_attr);
1076 #endif
1077 	lck_spin_init(&child_proc->p_slock, &proc_slock_grp, &proc_lck_attr);
1078 
1079 	klist_init(&child_proc->p_klist);
1080 
1081 	if (child_proc->p_textvp != NULLVP) {
1082 		/* bump references to the text vnode */
1083 		/* Need to hold iocount across the ref call */
1084 		if ((error = vnode_getwithref(child_proc->p_textvp)) == 0) {
1085 			error = vnode_ref(child_proc->p_textvp);
1086 			vnode_put(child_proc->p_textvp);
1087 		}
1088 
1089 		if (error != 0) {
1090 			child_proc->p_textvp = NULLVP;
1091 		}
1092 	}
1093 
1094 	/* Inherit the parent flags for code sign */
1095 	proc_ro_data.p_csflags = ((uint32_t)proc_getcsflags(parent_proc) & ~CS_KILLED);
1096 
1097 	child_proc->p_proc_ro = proc_ro_alloc(child_proc, &proc_ro_data, NULL, NULL);
1098 
1099 	/* update cred on proc */
1100 	proc_update_creds_onproc(child_proc);
1101 
1102 	/* update audit session proc count */
1103 	AUDIT_SESSION_PROCNEW(child_proc);
1104 
1105 	/*
1106 	 * Copy the parents per process open file table to the child; if
1107 	 * there is a per-thread current working directory, set the childs
1108 	 * per-process current working directory to that instead of the
1109 	 * parents.
1110 	 */
1111 	if (fdt_fork(&child_proc->p_fd, parent_proc, parent_uthread->uu_cdir) != 0) {
1112 		forkproc_free(child_proc);
1113 		child_proc = NULL;
1114 		goto bad;
1115 	}
1116 
1117 #if SYSV_SHM
1118 	if (parent_proc->vm_shm) {
1119 		/* XXX may fail to attach shm to child */
1120 		(void)shmfork(parent_proc, child_proc);
1121 	}
1122 #endif
1123 
1124 	/*
1125 	 * Child inherits the parent's plimit
1126 	 */
1127 	proc_limitfork(parent_proc, child_proc);
1128 
1129 	rlimit_cpu_cur = proc_limitgetcur(child_proc, RLIMIT_CPU);
1130 	if (rlimit_cpu_cur != RLIM_INFINITY) {
1131 		child_proc->p_rlim_cpu.tv_sec = (rlimit_cpu_cur > __INT_MAX__) ? __INT_MAX__ : rlimit_cpu_cur;
1132 	}
1133 
1134 	/* Intialize new process stats, including start time */
1135 	/* <rdar://6640543> non-zeroed portion contains garbage AFAICT */
1136 	microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start);
1137 
1138 	if (pg->pg_session->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT) {
1139 		os_atomic_or(&child_proc->p_flag, P_CONTROLT, relaxed);
1140 	}
1141 
1142 	/*
1143 	 * block all signals to reach the process.
1144 	 * no transition race should be occuring with the child yet,
1145 	 * but indicate that the process is in (the creation) transition.
1146 	 */
1147 	proc_signalstart(child_proc, 0);
1148 	proc_transstart(child_proc, 0, 0);
1149 
1150 	child_proc->p_pcaction = 0;
1151 
1152 	TAILQ_INIT(&child_proc->p_uthlist);
1153 	TAILQ_INIT(&child_proc->p_aio_activeq);
1154 	TAILQ_INIT(&child_proc->p_aio_doneq);
1155 
1156 	/*
1157 	 * Copy work queue information
1158 	 *
1159 	 * Note: This should probably only happen in the case where we are
1160 	 *	creating a child that is a copy of the parent; since this
1161 	 *	routine is called in the non-duplication case of vfork()
1162 	 *	or posix_spawn(), then this information should likely not
1163 	 *	be duplicated.
1164 	 *
1165 	 * <rdar://6640553> Work queue pointers that no longer point to code
1166 	 */
1167 	child_proc->p_wqthread = parent_proc->p_wqthread;
1168 	child_proc->p_threadstart = parent_proc->p_threadstart;
1169 	child_proc->p_pthsize = parent_proc->p_pthsize;
1170 	if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
1171 		child_proc->p_lflag |= P_LREGISTER;
1172 	}
1173 	child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
1174 	child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
1175 	child_proc->p_dispatchqueue_label_offset = parent_proc->p_dispatchqueue_label_offset;
1176 	child_proc->p_return_to_kernel_offset = parent_proc->p_return_to_kernel_offset;
1177 	child_proc->p_mach_thread_self_offset = parent_proc->p_mach_thread_self_offset;
1178 	child_proc->p_pth_tsd_offset = parent_proc->p_pth_tsd_offset;
1179 	child_proc->p_pthread_wq_quantum_offset = parent_proc->p_pthread_wq_quantum_offset;
1180 #if PSYNCH
1181 	pth_proc_hashinit(child_proc);
1182 #endif /* PSYNCH */
1183 
1184 #if CONFIG_PERSONAS
1185 	child_proc->p_persona = NULL;
1186 	error = persona_proc_inherit(child_proc, parent_proc);
1187 	if (error != 0) {
1188 		printf("forkproc: persona_proc_inherit failed (persona %d being destroyed?)\n", persona_get_uid(parent_proc->p_persona));
1189 		forkproc_free(child_proc);
1190 		child_proc = NULL;
1191 		goto bad;
1192 	}
1193 #endif
1194 
1195 #if CONFIG_MEMORYSTATUS
1196 	/* Memorystatus init */
1197 	child_proc->p_memstat_state = 0;
1198 	child_proc->p_memstat_effectivepriority = JETSAM_PRIORITY_DEFAULT;
1199 	child_proc->p_memstat_requestedpriority = JETSAM_PRIORITY_DEFAULT;
1200 	child_proc->p_memstat_assertionpriority = 0;
1201 	child_proc->p_memstat_userdata          = 0;
1202 	child_proc->p_memstat_idle_start        = 0;
1203 	child_proc->p_memstat_idle_delta        = 0;
1204 	child_proc->p_memstat_memlimit          = 0;
1205 	child_proc->p_memstat_memlimit_active   = 0;
1206 	child_proc->p_memstat_memlimit_inactive = 0;
1207 	child_proc->p_memstat_relaunch_flags    = P_MEMSTAT_RELAUNCH_UNKNOWN;
1208 #if CONFIG_FREEZE
1209 	child_proc->p_memstat_freeze_sharedanon_pages = 0;
1210 #endif
1211 	child_proc->p_memstat_dirty = 0;
1212 	child_proc->p_memstat_idledeadline = 0;
1213 #endif /* CONFIG_MEMORYSTATUS */
1214 
1215 	if (parent_proc->p_subsystem_root_path) {
1216 		size_t parent_length = strlen(parent_proc->p_subsystem_root_path) + 1;
1217 		assert(parent_length <= MAXPATHLEN);
1218 		child_proc->p_subsystem_root_path = zalloc_flags(ZV_NAMEI,
1219 		    Z_WAITOK | Z_ZERO);
1220 		memcpy(child_proc->p_subsystem_root_path, parent_proc->p_subsystem_root_path, parent_length);
1221 	}
1222 
1223 bad:
1224 	return child_proc;
1225 }
1226 
1227 void
proc_lock(proc_t p)1228 proc_lock(proc_t p)
1229 {
1230 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
1231 	lck_mtx_lock(&p->p_mlock);
1232 }
1233 
1234 void
proc_unlock(proc_t p)1235 proc_unlock(proc_t p)
1236 {
1237 	lck_mtx_unlock(&p->p_mlock);
1238 }
1239 
1240 void
proc_spinlock(proc_t p)1241 proc_spinlock(proc_t p)
1242 {
1243 	lck_spin_lock_grp(&p->p_slock, &proc_slock_grp);
1244 }
1245 
1246 void
proc_spinunlock(proc_t p)1247 proc_spinunlock(proc_t p)
1248 {
1249 	lck_spin_unlock(&p->p_slock);
1250 }
1251 
1252 void
proc_list_lock(void)1253 proc_list_lock(void)
1254 {
1255 	lck_mtx_lock(&proc_list_mlock);
1256 }
1257 
1258 void
proc_list_unlock(void)1259 proc_list_unlock(void)
1260 {
1261 	lck_mtx_unlock(&proc_list_mlock);
1262 }
1263 
1264 void
proc_ucred_lock(proc_t p)1265 proc_ucred_lock(proc_t p)
1266 {
1267 	lck_mtx_lock(&p->p_ucred_mlock);
1268 }
1269 
1270 void
proc_ucred_unlock(proc_t p)1271 proc_ucred_unlock(proc_t p)
1272 {
1273 	lck_mtx_unlock(&p->p_ucred_mlock);
1274 }
1275 
1276 void
proc_update_creds_onproc(proc_t p)1277 proc_update_creds_onproc(proc_t p)
1278 {
1279 	kauth_cred_t cred = proc_ucred(p);
1280 
1281 	p->p_uid = kauth_cred_getuid(cred);
1282 	p->p_gid = kauth_cred_getgid(cred);
1283 	p->p_ruid = kauth_cred_getruid(cred);
1284 	p->p_rgid = kauth_cred_getrgid(cred);
1285 	p->p_svuid = kauth_cred_getsvuid(cred);
1286 	p->p_svgid = kauth_cred_getsvgid(cred);
1287 }
1288 
1289 
1290 bool
uthread_is64bit(struct uthread * uth)1291 uthread_is64bit(struct uthread *uth)
1292 {
1293 	return uth->uu_flag & UT_LP64;
1294 }
1295 
1296 void
uthread_init(task_t task,uthread_t uth,thread_ro_t tro_tpl,int workq_thread)1297 uthread_init(task_t task, uthread_t uth, thread_ro_t tro_tpl, int workq_thread)
1298 {
1299 	uthread_t uth_parent = current_uthread();
1300 
1301 	lck_spin_init(&uth->uu_rethrottle_lock, &rethrottle_lock_grp,
1302 	    LCK_ATTR_NULL);
1303 
1304 	/*
1305 	 * Lazily set the thread on the kernel VFS context
1306 	 * to the first thread made which will be vm_pageout_scan_thread.
1307 	 */
1308 	if (__improbable(vfs_context0.vc_thread == NULL)) {
1309 		extern thread_t vm_pageout_scan_thread;
1310 
1311 		assert(task == kernel_task);
1312 		assert(get_machthread(uth) == vm_pageout_scan_thread);
1313 		vfs_context0.vc_thread = get_machthread(uth);
1314 	}
1315 
1316 	if (task_get_64bit_addr(task)) {
1317 		uth->uu_flag |= UT_LP64;
1318 	}
1319 
1320 	/*
1321 	 * Thread inherits credential from the creating thread, if both
1322 	 * are in the same task.
1323 	 *
1324 	 * If the creating thread has no credential or is from another
1325 	 * task we can leave the new thread credential NULL.  If it needs
1326 	 * one later, it will be lazily assigned from the task's process.
1327 	 */
1328 	if (task == kernel_task) {
1329 		kauth_cred_set(&tro_tpl->tro_cred, vfs_context0.vc_ucred);
1330 		tro_tpl->tro_proc = kernproc;
1331 		tro_tpl->tro_proc_ro = kernproc->p_proc_ro;
1332 	} else if (!is_corpsetask(task)) {
1333 		thread_ro_t curtro = current_thread_ro();
1334 		proc_t p = get_bsdtask_info(task);
1335 
1336 		if (task == curtro->tro_task &&
1337 		    ((curtro->tro_flags & TRO_SETUID) == 0 || !workq_thread)) {
1338 			kauth_cred_set(&tro_tpl->tro_cred, curtro->tro_cred);
1339 			tro_tpl->tro_flags = (curtro->tro_flags & TRO_SETUID);
1340 			tro_tpl->tro_proc_ro = curtro->tro_proc_ro;
1341 		} else {
1342 			kauth_cred_t cred = kauth_cred_proc_ref(p);
1343 			kauth_cred_set_and_unref(&tro_tpl->tro_cred, &cred);
1344 			tro_tpl->tro_proc_ro = task_get_ro(task);
1345 		}
1346 		tro_tpl->tro_proc = p;
1347 
1348 		proc_lock(p);
1349 		if (workq_thread) {
1350 			/* workq_thread threads will not inherit masks */
1351 			uth->uu_sigmask = ~workq_threadmask;
1352 		} else if (uth_parent->uu_flag & UT_SAS_OLDMASK) {
1353 			uth->uu_sigmask = uth_parent->uu_oldmask;
1354 		} else {
1355 			uth->uu_sigmask = uth_parent->uu_sigmask;
1356 		}
1357 
1358 
1359 		/*
1360 		 * Do not add the uthread to proc uthlist for exec copy task,
1361 		 * since they do not hold a ref on proc.
1362 		 */
1363 		if (!task_is_exec_copy(task)) {
1364 			TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
1365 		}
1366 		proc_unlock(p);
1367 
1368 #if CONFIG_DTRACE
1369 		if (p->p_dtrace_ptss_pages != NULL && !task_is_exec_copy(task)) {
1370 			uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
1371 		}
1372 #endif
1373 	} else {
1374 		tro_tpl->tro_proc_ro = task_get_ro(task);
1375 	}
1376 
1377 	uthread_init_proc_refcount(uth);
1378 }
1379 
1380 /*
1381  * This routine frees the thread name field of the uthread_t structure. Split out of
1382  * uthread_cleanup() so thread name does not get deallocated while generating a corpse fork.
1383  */
1384 void
uthread_cleanup_name(uthread_t uth)1385 uthread_cleanup_name(uthread_t uth)
1386 {
1387 	/*
1388 	 * <rdar://17834538>
1389 	 * Set pth_name to NULL before calling free().
1390 	 * Previously there was a race condition in the
1391 	 * case this code was executing during a stackshot
1392 	 * where the stackshot could try and copy pth_name
1393 	 * after it had been freed and before if was marked
1394 	 * as null.
1395 	 */
1396 	if (uth->pth_name != NULL) {
1397 		void *pth_name = uth->pth_name;
1398 		uth->pth_name = NULL;
1399 		kfree_data(pth_name, MAXTHREADNAMESIZE);
1400 	}
1401 	return;
1402 }
1403 
1404 /*
1405  * This routine frees all the BSD context in uthread except the credential.
1406  * It does not free the uthread structure as well
1407  */
1408 void
uthread_cleanup(uthread_t uth,thread_ro_t tro)1409 uthread_cleanup(uthread_t uth, thread_ro_t tro)
1410 {
1411 	task_t task = tro->tro_task;
1412 	proc_t p    = tro->tro_proc;
1413 
1414 	uthread_assert_zero_proc_refcount(uth);
1415 
1416 	if (uth->uu_lowpri_window || uth->uu_throttle_info) {
1417 		/*
1418 		 * task is marked as a low priority I/O type
1419 		 * and we've somehow managed to not dismiss the throttle
1420 		 * through the normal exit paths back to user space...
1421 		 * no need to throttle this thread since its going away
1422 		 * but we do need to update our bookeeping w/r to throttled threads
1423 		 *
1424 		 * Calling this routine will clean up any throttle info reference
1425 		 * still inuse by the thread.
1426 		 */
1427 		throttle_lowpri_io(0);
1428 	}
1429 
1430 #if CONFIG_AUDIT
1431 	/*
1432 	 * Per-thread audit state should never last beyond system
1433 	 * call return.  Since we don't audit the thread creation/
1434 	 * removal, the thread state pointer should never be
1435 	 * non-NULL when we get here.
1436 	 */
1437 	assert(uth->uu_ar == NULL);
1438 #endif
1439 
1440 	if (uth->uu_select.nbytes) {
1441 		select_cleanup_uthread(&uth->uu_select);
1442 	}
1443 
1444 	if (uth->uu_cdir) {
1445 		vnode_rele(uth->uu_cdir);
1446 		uth->uu_cdir = NULLVP;
1447 	}
1448 
1449 	if (uth->uu_wqset) {
1450 		if (waitq_set_is_valid(uth->uu_wqset)) {
1451 			waitq_set_deinit(uth->uu_wqset);
1452 		}
1453 		kheap_free(KHEAP_DEFAULT, uth->uu_wqset, uth->uu_wqstate_sz);
1454 		uth->uu_wqset = NULL;
1455 		uth->uu_wqstate_sz = 0;
1456 	}
1457 
1458 	os_reason_free(uth->uu_exit_reason);
1459 
1460 	if ((task != kernel_task) && p) {
1461 		/*
1462 		 * Remove the thread from the process list and
1463 		 * transfer [appropriate] pending signals to the process.
1464 		 * Do not remove the uthread from proc uthlist for exec
1465 		 * copy task, since they does not have a ref on proc and
1466 		 * would not have been added to the list.
1467 		 */
1468 		if (uth->uu_kqr_bound) {
1469 			kqueue_threadreq_unbind(p, uth->uu_kqr_bound);
1470 		}
1471 
1472 		if (get_bsdtask_info(task) == p && !task_is_exec_copy(task)) {
1473 			proc_lock(p);
1474 			TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
1475 			p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
1476 			proc_unlock(p);
1477 		}
1478 
1479 #if CONFIG_DTRACE
1480 		struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
1481 		uth->t_dtrace_scratch = NULL;
1482 		if (tmpptr != NULL && !task_is_exec_copy(task)) {
1483 			dtrace_ptss_release_entry(p, tmpptr);
1484 		}
1485 #endif
1486 	} else {
1487 		assert(!uth->uu_kqr_bound);
1488 	}
1489 }
1490 
1491 /* This routine releases the credential stored in uthread */
1492 void
uthread_cred_ref(struct ucred * ucred)1493 uthread_cred_ref(struct ucred *ucred)
1494 {
1495 	kauth_cred_ref(ucred);
1496 }
1497 
1498 void
uthread_cred_free(struct ucred * ucred)1499 uthread_cred_free(struct ucred *ucred)
1500 {
1501 	kauth_cred_set(&ucred, NOCRED);
1502 }
1503 
1504 /* This routine frees the uthread structure held in thread structure */
1505 void
uthread_destroy(uthread_t uth)1506 uthread_destroy(uthread_t uth)
1507 {
1508 	uthread_destroy_proc_refcount(uth);
1509 
1510 	if (uth->t_tombstone) {
1511 		kfree_type(struct doc_tombstone, uth->t_tombstone);
1512 		uth->t_tombstone = NULL;
1513 	}
1514 
1515 #if CONFIG_DEBUG_SYSCALL_REJECTION
1516 	size_t const bitstr_len = BITMAP_SIZE(mach_trap_count + nsysent);
1517 
1518 	if (uth->syscall_rejection_mask) {
1519 		kfree_data(uth->syscall_rejection_mask, bitstr_len);
1520 		uth->syscall_rejection_mask = NULL;
1521 	}
1522 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
1523 
1524 	lck_spin_destroy(&uth->uu_rethrottle_lock, &rethrottle_lock_grp);
1525 
1526 	uthread_cleanup_name(uth);
1527 }
1528