1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74 /*
75 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
76 * support for mandatory and extensible security protections. This notice
77 * is included in support of clause 2.2 (b) of the Apple Public License,
78 * Version 2.0.
79 */
80
81 #include <kern/assert.h>
82 #include <kern/bits.h>
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/filedesc.h>
86 #include <sys/kernel.h>
87 #include <sys/malloc.h>
88 #include <sys/proc_internal.h>
89 #include <sys/kauth.h>
90 #include <sys/user.h>
91 #include <sys/reason.h>
92 #include <sys/resourcevar.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/file_internal.h>
95 #include <sys/acct.h>
96 #include <sys/codesign.h>
97 #include <sys/sysent.h>
98 #include <sys/sysproto.h>
99 #include <sys/ulock.h>
100 #if CONFIG_PERSONAS
101 #include <sys/persona.h>
102 #endif
103 #include <sys/doc_tombstone.h>
104 #if CONFIG_DTRACE
105 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
106 extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
107 extern void dtrace_proc_fork(proc_t, proc_t, int);
108
109 /*
110 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
111 * we will store its value before actually calling it.
112 */
113 static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
114
115 #include <sys/dtrace_ptss.h>
116 #endif
117
118 #include <security/audit/audit.h>
119
120 #include <mach/mach_types.h>
121 #include <kern/coalition.h>
122 #include <kern/kern_types.h>
123 #include <kern/kalloc.h>
124 #include <kern/mach_param.h>
125 #include <kern/task.h>
126 #include <kern/thread.h>
127 #include <kern/thread_call.h>
128 #include <kern/zalloc.h>
129
130 #include <os/log.h>
131
132 #if CONFIG_MACF
133 #include <security/mac_framework.h>
134 #include <security/mac_mach_internal.h>
135 #endif
136
137 #include <vm/vm_map.h>
138 #include <vm/vm_protos.h>
139 #include <vm/vm_shared_region.h>
140
141 #include <sys/shm_internal.h> /* for shmfork() */
142 #include <mach/task.h> /* for thread_create() */
143 #include <mach/thread_act.h> /* for thread_resume() */
144
145 #include <sys/sdt.h>
146
147 #if CONFIG_MEMORYSTATUS
148 #include <sys/kern_memorystatus.h>
149 #endif
150
151 /* XXX routines which should have Mach prototypes, but don't */
152 void thread_set_parent(thread_t parent, int pid);
153 extern void act_thread_catt(void *ctx);
154 void thread_set_child(thread_t child, int pid);
155 void *act_thread_csave(void);
156 extern boolean_t task_is_exec_copy(task_t);
157 int nextpidversion = 0;
158
159
160 thread_t cloneproc(task_t, coalition_t *, proc_t, int, int);
161 proc_t forkproc(proc_t);
162 void forkproc_free(proc_t);
163 thread_t fork_create_child(task_t parent_task,
164 coalition_t *parent_coalitions,
165 proc_t child,
166 int inherit_memory,
167 int is_64bit_addr,
168 int is_64bit_data,
169 int in_exec);
170
171 __private_extern__ const size_t uthread_size = sizeof(struct uthread);
172 static LCK_GRP_DECLARE(rethrottle_lock_grp, "rethrottle");
173
174 os_refgrp_decl(, p_refgrp, "proc", NULL);
175 ZONE_DEFINE_ID(ZONE_ID_PROC, "proc", struct proc,
176 ZC_ZFREE_CLEARMEM | ZC_SEQUESTER); /* sequester is needed for proc_rele() */
177
178 ZONE_DEFINE_ID(ZONE_ID_PROC_SIGACTS_RO, "sigacts_ro", struct sigacts_ro,
179 ZC_READONLY | ZC_ZFREE_CLEARMEM);
180
181 KALLOC_TYPE_DEFINE(proc_stats_zone, struct pstats, KT_DEFAULT);
182
183 /*
184 * fork1
185 *
186 * Description: common code used by all new process creation other than the
187 * bootstrap of the initial process on the system
188 *
189 * Parameters: parent_proc parent process of the process being
190 * child_threadp pointer to location to receive the
191 * Mach thread_t of the child process
192 * created
193 * kind kind of creation being requested
194 * coalitions if spawn, the set of coalitions the
195 * child process should join, or NULL to
196 * inherit the parent's. On non-spawns,
197 * this param is ignored and the child
198 * always inherits the parent's
199 * coalitions.
200 *
201 * Notes: Permissable values for 'kind':
202 *
203 * PROC_CREATE_FORK Create a complete process which will
204 * return actively running in both the
205 * parent and the child; the child copies
206 * the parent address space.
207 * PROC_CREATE_SPAWN Create a complete process which will
208 * return actively running in the parent
209 * only after returning actively running
210 * in the child; the child address space
211 * is newly created by an image activator,
212 * after which the child is run.
213 *
214 * At first it may seem strange that we return the child thread
215 * address rather than process structure, since the process is
216 * the only part guaranteed to be "new"; however, since we do
217 * not actualy adjust other references between Mach and BSD, this
218 * is the only method which guarantees us the ability to get
219 * back to the other information.
220 */
221 int
fork1(proc_t parent_proc,thread_t * child_threadp,int kind,coalition_t * coalitions)222 fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalitions)
223 {
224 proc_t child_proc = NULL; /* set in switch, but compiler... */
225 thread_t child_thread = NULL;
226 uid_t uid;
227 size_t count;
228 int err = 0;
229 int spawn = 0;
230 rlim_t rlimit_nproc_cur;
231
232 /*
233 * Although process entries are dynamically created, we still keep
234 * a global limit on the maximum number we will create. Don't allow
235 * a nonprivileged user to use the last process; don't let root
236 * exceed the limit. The variable nprocs is the current number of
237 * processes, maxproc is the limit.
238 */
239 uid = kauth_getruid();
240 proc_list_lock();
241 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
242 #if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX)
243 /*
244 * On the development kernel, panic so that the fact that we hit
245 * the process limit is obvious, as this may very well wedge the
246 * system.
247 */
248 panic("The process table is full; parent pid=%d", proc_getpid(parent_proc));
249 #endif
250 proc_list_unlock();
251 tablefull("proc");
252 return EAGAIN;
253 }
254 proc_list_unlock();
255
256 /*
257 * Increment the count of procs running with this uid. Don't allow
258 * a nonprivileged user to exceed their current limit, which is
259 * always less than what an rlim_t can hold.
260 * (locking protection is provided by list lock held in chgproccnt)
261 */
262 count = chgproccnt(uid, 1);
263 rlimit_nproc_cur = proc_limitgetcur(parent_proc, RLIMIT_NPROC);
264 if (uid != 0 &&
265 (rlim_t)count > rlimit_nproc_cur) {
266 #if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX)
267 /*
268 * On the development kernel, panic so that the fact that we hit
269 * the per user process limit is obvious. This may be less dire
270 * than hitting the global process limit, but we cannot rely on
271 * that.
272 */
273 panic("The per-user process limit has been hit; parent pid=%d, uid=%d", proc_getpid(parent_proc), uid);
274 #endif
275 err = EAGAIN;
276 goto bad;
277 }
278
279 #if CONFIG_MACF
280 /*
281 * Determine if MAC policies applied to the process will allow
282 * it to fork. This is an advisory-only check.
283 */
284 err = mac_proc_check_fork(parent_proc);
285 if (err != 0) {
286 goto bad;
287 }
288 #endif
289
290 switch (kind) {
291 case PROC_CREATE_SPAWN:
292 /*
293 * A spawned process differs from a forked process in that
294 * the spawned process does not carry around the parents
295 * baggage with regard to address space copying, dtrace,
296 * and so on.
297 */
298 spawn = 1;
299
300 OS_FALLTHROUGH;
301
302 case PROC_CREATE_FORK:
303 /*
304 * When we clone the parent process, we are going to inherit
305 * its task attributes and memory, since when we fork, we
306 * will, in effect, create a duplicate of it, with only minor
307 * differences. Contrarily, spawned processes do not inherit.
308 */
309 if ((child_thread = cloneproc(parent_proc->task,
310 spawn ? coalitions : NULL,
311 parent_proc,
312 spawn ? FALSE : TRUE,
313 FALSE)) == NULL) {
314 /* Failed to create thread */
315 err = EAGAIN;
316 goto bad;
317 }
318
319 /* copy current thread state into the child thread (only for fork) */
320 if (!spawn) {
321 thread_dup(child_thread);
322 }
323
324 /* child_proc = child_thread->task->proc; */
325 child_proc = (proc_t)(get_bsdtask_info(get_threadtask(child_thread)));
326
327 // XXX BEGIN: wants to move to be common code (and safe)
328 #if CONFIG_MACF
329 /*
330 * allow policies to associate the credential/label that
331 * we referenced from the parent ... with the child
332 * JMM - this really isn't safe, as we can drop that
333 * association without informing the policy in other
334 * situations (keep long enough to get policies changed)
335 */
336 mac_cred_label_associate_fork(proc_ucred(child_proc), child_proc);
337 #endif
338
339 /*
340 * Propogate change of PID - may get new cred if auditing.
341 */
342 set_security_token(child_proc);
343
344 AUDIT_ARG(pid, proc_getpid(child_proc));
345
346 // XXX END: wants to move to be common code (and safe)
347
348 /*
349 * Blow thread state information; this is what gives the child
350 * process its "return" value from a fork() call.
351 *
352 * Note: this should probably move to fork() proper, since it
353 * is not relevent to spawn, and the value won't matter
354 * until we resume the child there. If you are in here
355 * refactoring code, consider doing this at the same time.
356 */
357 thread_set_child(child_thread, proc_getpid(child_proc));
358
359 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
360
361 #if CONFIG_DTRACE
362 dtrace_proc_fork(parent_proc, child_proc, spawn);
363 #endif /* CONFIG_DTRACE */
364 if (!spawn) {
365 /*
366 * Of note, we need to initialize the bank context behind
367 * the protection of the proc_trans lock to prevent a race with exit.
368 */
369 task_bank_init(get_threadtask(child_thread));
370 }
371
372 break;
373
374 default:
375 panic("fork1 called with unknown kind %d", kind);
376 break;
377 }
378
379
380 /* return the thread pointer to the caller */
381 *child_threadp = child_thread;
382
383 bad:
384 /*
385 * In the error case, we return a 0 value for the returned pid (but
386 * it is ignored in the trampoline due to the error return); this
387 * is probably not necessary.
388 */
389 if (err) {
390 (void)chgproccnt(uid, -1);
391 }
392
393 return err;
394 }
395
396
397
398
399 /*
400 * fork_create_child
401 *
402 * Description: Common operations associated with the creation of a child
403 * process. Return with new task and first thread's control port movable
404 * and not pinned.
405 *
406 * Parameters: parent_task parent task
407 * parent_coalitions parent's set of coalitions
408 * child_proc child process
409 * inherit_memory TRUE, if the parents address space is
410 * to be inherited by the child
411 * is_64bit_addr TRUE, if the child being created will
412 * be associated with a 64 bit address space
413 * is_64bit_data TRUE if the child being created will use a
414 * 64-bit register state
415 * in_exec TRUE, if called from execve or posix spawn set exec
416 * FALSE, if called from fork or vfexec
417 *
418 * Note: This code is called in the fork() case, from the execve() call
419 * graph, from the posix_spawn() call graph (which implicitly
420 * includes a vfork() equivalent call, and in the system
421 * bootstrap case.
422 *
423 * It creates a new task and thread (and as a side effect of the
424 * thread creation, a uthread) in the parent coalition set, which is
425 * then associated with the process 'child'. If the parent
426 * process address space is to be inherited, then a flag
427 * indicates that the newly created task should inherit this from
428 * the child task.
429 *
430 * As a special concession to bootstrapping the initial process
431 * in the system, it's possible for 'parent_task' to be TASK_NULL;
432 * in this case, 'inherit_memory' MUST be FALSE.
433 */
434 thread_t
fork_create_child(task_t parent_task,coalition_t * parent_coalitions,proc_t child_proc,int inherit_memory,int is_64bit_addr,int is_64bit_data,int in_exec)435 fork_create_child(task_t parent_task,
436 coalition_t *parent_coalitions,
437 proc_t child_proc,
438 int inherit_memory,
439 int is_64bit_addr,
440 int is_64bit_data,
441 int in_exec)
442 {
443 thread_t child_thread = NULL;
444 task_t child_task;
445 kern_return_t result;
446 proc_ro_t proc_ro;
447
448 proc_ro = proc_get_ro(child_proc);
449 if (proc_ro_task(proc_ro) != NULL) {
450 /* task will need to allocate its own proc_ro: */
451 proc_ro = NULL;
452 }
453
454 /* Create a new task for the child process */
455 result = task_create_internal(parent_task,
456 proc_ro,
457 parent_coalitions,
458 inherit_memory,
459 is_64bit_addr,
460 is_64bit_data,
461 TF_NONE,
462 in_exec ? TPF_EXEC_COPY : TPF_NONE, /* Mark the task exec copy if in execve */
463 (TRW_LRETURNWAIT | TRW_LRETURNWAITER), /* All created threads will wait in task_wait_to_return */
464 &child_task);
465 if (result != KERN_SUCCESS) {
466 printf("%s: task_create_internal failed. Code: %d\n",
467 __func__, result);
468 goto bad;
469 }
470
471 if (!in_exec) {
472 /*
473 * Set the child process task to the new task if not in exec,
474 * will set the task for exec case in proc_exec_switch_task after image activation.
475 */
476 proc_set_task(child_proc, child_task);
477 if (proc_ro == NULL) {
478 proc_switch_ro(child_proc, task_get_ro(child_task));
479 }
480 }
481
482 /* Set child task process to child proc */
483 set_bsdtask_info(child_task, child_proc);
484
485 /* Propagate CPU limit timer from parent */
486 if (timerisset(&child_proc->p_rlim_cpu)) {
487 task_vtimer_set(child_task, TASK_VTIMER_RLIM);
488 }
489
490 /*
491 * Set child process BSD visible scheduler priority if nice value
492 * inherited from parent
493 */
494 if (child_proc->p_nice != 0) {
495 resetpriority(child_proc);
496 }
497
498 /*
499 * Create main thread for the child process. Its control port is not immovable/pinned
500 * until main_thread_set_immovable_pinned().
501 *
502 * The new thread is waiting on the event triggered by 'task_clear_return_wait'
503 */
504 result = main_thread_create_waiting(child_task,
505 (thread_continue_t)task_wait_to_return,
506 task_get_return_wait_event(child_task),
507 &child_thread);
508
509 if (result != KERN_SUCCESS) {
510 printf("%s: thread_create failed. Code: %d\n",
511 __func__, result);
512 task_deallocate(child_task);
513 child_task = NULL;
514 }
515
516 /*
517 * Tag thread as being the first thread in its task.
518 */
519 thread_set_tag(child_thread, THREAD_TAG_MAINTHREAD);
520
521 bad:
522 thread_yield_internal(1);
523
524 return child_thread;
525 }
526
527
528 /*
529 * fork
530 *
531 * Description: fork system call.
532 *
533 * Parameters: parent Parent process to fork
534 * uap (void) [unused]
535 * retval Return value
536 *
537 * Returns: 0 Success
538 * EAGAIN Resource unavailable, try again
539 *
540 * Notes: Attempts to create a new child process which inherits state
541 * from the parent process. If successful, the call returns
542 * having created an initially suspended child process with an
543 * extra Mach task and thread reference, for which the thread
544 * is initially suspended. Until we resume the child process,
545 * it is not yet running.
546 *
547 * The return information to the child is contained in the
548 * thread state structure of the new child, and does not
549 * become visible to the child through a normal return process,
550 * since it never made the call into the kernel itself in the
551 * first place.
552 *
553 * After resuming the thread, this function returns directly to
554 * the parent process which invoked the fork() system call.
555 *
556 * Important: The child thread_resume occurs before the parent returns;
557 * depending on scheduling latency, this means that it is not
558 * deterministic as to whether the parent or child is scheduled
559 * to run first. It is entirely possible that the child could
560 * run to completion prior to the parent running.
561 */
562 int
fork(proc_t parent_proc,__unused struct fork_args * uap,int32_t * retval)563 fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval)
564 {
565 thread_t child_thread;
566 int err;
567
568 retval[1] = 0; /* flag parent return for user space */
569
570 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) {
571 task_t child_task;
572 proc_t child_proc;
573
574 /* Return to the parent */
575 child_proc = (proc_t)get_bsdthreadtask_info(child_thread);
576 retval[0] = proc_getpid(child_proc);
577
578 child_task = (task_t)get_threadtask(child_thread);
579 assert(child_task != TASK_NULL);
580
581 /* task_control_port_options has been inherited from parent, apply it */
582 task_set_immovable_pinned(child_task);
583 main_thread_set_immovable_pinned(child_thread);
584
585 /*
586 * Drop the signal lock on the child which was taken on our
587 * behalf by forkproc()/cloneproc() to prevent signals being
588 * received by the child in a partially constructed state.
589 */
590 proc_signalend(child_proc, 0);
591 proc_transend(child_proc, 0);
592
593 /* flag the fork has occurred */
594 proc_knote(parent_proc, NOTE_FORK | proc_getpid(child_proc));
595 DTRACE_PROC1(create, proc_t, child_proc);
596
597 #if CONFIG_DTRACE
598 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
599 (*dtrace_proc_waitfor_hook)(child_proc);
600 }
601 #endif
602
603 /* "Return" to the child */
604 task_clear_return_wait(get_threadtask(child_thread), TCRW_CLEAR_ALL_WAIT);
605
606 /* drop the extra references we got during the creation */
607 task_deallocate(child_task);
608 thread_deallocate(child_thread);
609 }
610
611 return err;
612 }
613
614
615 /*
616 * cloneproc
617 *
618 * Description: Create a new process from a specified process.
619 *
620 * Parameters: parent_task The parent task to be cloned, or
621 * TASK_NULL is task characteristics
622 * are not to be inherited
623 * be cloned, or TASK_NULL if the new
624 * task is not to inherit the VM
625 * characteristics of the parent
626 * parent_proc The parent process to be cloned
627 * inherit_memory True if the child is to inherit
628 * memory from the parent; if this is
629 * non-NULL, then the parent_task must
630 * also be non-NULL
631 * memstat_internal Whether to track the process in the
632 * jetsam priority list (if configured)
633 *
634 * Returns: !NULL pointer to new child thread
635 * NULL Failure (unspecified)
636 *
637 * Note: On return newly created child process has signal lock held
638 * to block delivery of signal to it if called with lock set.
639 * fork() code needs to explicity remove this lock before
640 * signals can be delivered
641 *
642 * In the case of bootstrap, this function can be called from
643 * bsd_utaskbootstrap() in order to bootstrap the first process;
644 * the net effect is to provide a uthread structure for the
645 * kernel process associated with the kernel task.
646 *
647 * XXX: Tristating using the value parent_task as the major key
648 * and inherit_memory as the minor key is something we should
649 * refactor later; we owe the current semantics, ultimately,
650 * to the semantics of task_create_internal. For now, we will
651 * live with this being somewhat awkward.
652 */
653 thread_t
cloneproc(task_t parent_task,coalition_t * parent_coalitions,proc_t parent_proc,int inherit_memory,int memstat_internal)654 cloneproc(task_t parent_task, coalition_t *parent_coalitions, proc_t parent_proc, int inherit_memory, int memstat_internal)
655 {
656 #if !CONFIG_MEMORYSTATUS
657 #pragma unused(memstat_internal)
658 #endif
659 task_t child_task;
660 proc_t child_proc;
661 thread_t child_thread = NULL;
662
663 if ((child_proc = forkproc(parent_proc)) == NULL) {
664 /* Failed to allocate new process */
665 goto bad;
666 }
667
668 /*
669 * In the case where the parent_task is TASK_NULL (during the init path)
670 * we make the assumption that the register size will be the same as the
671 * address space size since there's no way to determine the possible
672 * register size until an image is exec'd.
673 *
674 * The only architecture that has different address space and register sizes
675 * (arm64_32) isn't being used within kernel-space, so the above assumption
676 * always holds true for the init path.
677 */
678 const int parent_64bit_addr = parent_proc->p_flag & P_LP64;
679 const int parent_64bit_data = (parent_task == TASK_NULL) ? parent_64bit_addr : task_get_64bit_data(parent_task);
680
681 child_thread = fork_create_child(parent_task,
682 parent_coalitions,
683 child_proc,
684 inherit_memory,
685 parent_64bit_addr,
686 parent_64bit_data,
687 FALSE);
688
689 if (child_thread == NULL) {
690 /*
691 * Failed to create thread; now we must deconstruct the new
692 * process previously obtained from forkproc().
693 */
694 forkproc_free(child_proc);
695 goto bad;
696 }
697
698 child_task = get_threadtask(child_thread);
699 if (parent_64bit_addr) {
700 OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
701 get_bsdthread_info(child_thread)->uu_flag |= UT_LP64;
702 } else {
703 OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
704 get_bsdthread_info(child_thread)->uu_flag &= ~UT_LP64;
705 }
706
707 #if CONFIG_MEMORYSTATUS
708 if (memstat_internal) {
709 proc_list_lock();
710 child_proc->p_memstat_state |= P_MEMSTAT_INTERNAL;
711 proc_list_unlock();
712 }
713 #endif
714
715 /* make child visible */
716 pinsertchild(parent_proc, child_proc);
717
718 /*
719 * Make child runnable, set start time.
720 */
721 child_proc->p_stat = SRUN;
722 bad:
723 return child_thread;
724 }
725
726 __abortlike
727 static void
panic_sigacts_backref_mismatch(struct sigacts * sa)728 panic_sigacts_backref_mismatch(struct sigacts *sa)
729 {
730 panic("sigacts_ro backref mismatch: sigacts=%p, ro=%p, backref=%p",
731 sa, sa->ps_ro, sa->ps_ro->ps_rw);
732 }
733
734 static struct sigacts_ro *
sigacts_ro(struct sigacts * sa)735 sigacts_ro(struct sigacts *sa)
736 {
737 struct sigacts_ro *ro = sa->ps_ro;
738
739 zone_require_ro(ZONE_ID_PROC_SIGACTS_RO, sizeof(struct sigacts_ro),
740 ro);
741
742 if (__improbable(ro->ps_rw != sa)) {
743 panic_sigacts_backref_mismatch(sa);
744 }
745
746 return ro;
747 }
748
749 void
proc_set_sigact(proc_t p,int sig,user_addr_t sigact)750 proc_set_sigact(proc_t p, int sig, user_addr_t sigact)
751 {
752 assert((sig > 0) && (sig < NSIG));
753
754 zalloc_ro_update_field(ZONE_ID_PROC_SIGACTS_RO, sigacts_ro(&p->p_sigacts),
755 ps_sigact[sig], &sigact);
756 }
757
758 void
proc_set_trampact(proc_t p,int sig,user_addr_t trampact)759 proc_set_trampact(proc_t p, int sig, user_addr_t trampact)
760 {
761 assert((sig > 0) && (sig < NSIG));
762
763 zalloc_ro_update_field(ZONE_ID_PROC_SIGACTS_RO, sigacts_ro(&p->p_sigacts),
764 ps_trampact[sig], &trampact);
765 }
766
767 void
proc_set_sigact_trampact(proc_t p,int sig,user_addr_t sigact,user_addr_t trampact)768 proc_set_sigact_trampact(proc_t p, int sig, user_addr_t sigact, user_addr_t trampact)
769 {
770 struct sigacts_ro *ps_ro = sigacts_ro(&p->p_sigacts);
771 struct sigacts_ro psro_local = *ps_ro;
772
773 assert((sig > 0) && (sig < NSIG));
774
775 psro_local.ps_sigact[sig] = sigact;
776 psro_local.ps_trampact[sig] = trampact;
777
778 zalloc_ro_update_elem(ZONE_ID_PROC_SIGACTS_RO, ps_ro, &psro_local);
779 }
780
781 void
proc_reset_sigact(proc_t p,sigset_t sigs)782 proc_reset_sigact(proc_t p, sigset_t sigs)
783 {
784 int nc;
785 user_addr_t sigacts[NSIG];
786 bool changed = false;
787 struct sigacts_ro *ro = sigacts_ro(&p->p_sigacts);
788
789 memcpy(sigacts, ro->ps_sigact, sizeof(sigacts));
790
791 while (sigs) {
792 nc = ffs((unsigned int)sigs);
793 if (sigacts[nc] != SIG_DFL) {
794 sigacts[nc] = SIG_DFL;
795 changed = true;
796 }
797 sigs &= ~sigmask(nc);
798 }
799
800 if (changed) {
801 zalloc_ro_update_field(ZONE_ID_PROC_SIGACTS_RO, ro, ps_sigact,
802 (user_addr_t const (*)[NSIG])sigacts);
803 }
804 }
805
806 void
proc_sigacts_copy(proc_t dst,proc_t src)807 proc_sigacts_copy(proc_t dst, proc_t src)
808 {
809 struct sigacts_ro ro_local;
810 struct sigacts_ro *ro;
811
812 if (src == NULL) {
813 assert(dst == kernproc);
814 bzero(&dst->p_sigacts, sizeof(struct sigacts));
815 bzero(&ro_local, sizeof(struct sigacts_ro));
816 } else {
817 dst->p_sigacts = src->p_sigacts;
818 ro_local = *sigacts_ro(&src->p_sigacts);
819 }
820
821 ro_local.ps_rw = &dst->p_sigacts;
822
823 ro = zalloc_ro(ZONE_ID_PROC_SIGACTS_RO, Z_WAITOK | Z_NOFAIL | Z_ZERO);
824 zalloc_ro_update_elem(ZONE_ID_PROC_SIGACTS_RO, ro, &ro_local);
825
826 dst->p_sigacts.ps_ro = ro;
827 }
828
829 /*
830 * Destroy a process structure that resulted from a call to forkproc(), but
831 * which must be returned to the system because of a subsequent failure
832 * preventing it from becoming active.
833 *
834 * Parameters: p The incomplete process from forkproc()
835 *
836 * Returns: (void)
837 *
838 * Note: This function should only be used in an error handler following
839 * a call to forkproc().
840 *
841 * Operations occur in reverse order of those in forkproc().
842 */
843 void
forkproc_free(proc_t p)844 forkproc_free(proc_t p)
845 {
846 struct pgrp *pg;
847
848 #if CONFIG_PERSONAS
849 persona_proc_drop(p);
850 #endif /* CONFIG_PERSONAS */
851
852 #if PSYNCH
853 pth_proc_hashdelete(p);
854 #endif /* PSYNCH */
855
856 /* We held signal and a transition locks; drop them */
857 proc_signalend(p, 0);
858 proc_transend(p, 0);
859
860 /*
861 * If we have our own copy of the resource limits structure, we
862 * need to free it. If it's a shared copy, we need to drop our
863 * reference on it.
864 */
865 proc_limitdrop(p);
866
867 #if SYSV_SHM
868 /* Need to drop references to the shared memory segment(s), if any */
869 if (p->vm_shm) {
870 /*
871 * Use shmexec(): we have no address space, so no mappings
872 *
873 * XXX Yes, the routine is badly named.
874 */
875 shmexec(p);
876 }
877 #endif
878
879 /* Need to undo the effects of the fdt_fork(), if any */
880 fdt_invalidate(p);
881 fdt_destroy(p);
882
883 /*
884 * Drop the reference on a text vnode pointer, if any
885 * XXX This code is broken in forkproc(); see <rdar://4256419>;
886 * XXX if anyone ever uses this field, we will be extremely unhappy.
887 */
888 if (p->p_textvp) {
889 vnode_rele(p->p_textvp);
890 p->p_textvp = NULL;
891 }
892
893 /* Update the audit session proc count */
894 AUDIT_SESSION_PROCEXIT(p);
895
896 lck_mtx_destroy(&p->p_mlock, &proc_mlock_grp);
897 lck_mtx_destroy(&p->p_ucred_mlock, &proc_ucred_mlock_grp);
898 #if CONFIG_DTRACE
899 lck_mtx_destroy(&p->p_dtrace_sprlock, &proc_lck_grp);
900 #endif
901 lck_spin_destroy(&p->p_slock, &proc_slock_grp);
902
903 /* Release the credential reference */
904 proc_set_ucred(p, NOCRED);
905
906 proc_list_lock();
907 /* Decrement the count of processes in the system */
908 nprocs--;
909
910 /* quit the group */
911 pg = pgrp_leave_locked(p);
912
913 /* Take it out of process hash */
914 assert(os_ref_get_raw_mask(&p->p_refcount) ==
915 ((1U << P_REF_BITS) | P_REF_NEW));
916 os_atomic_xor(&p->p_refcount, P_REF_NEW | P_REF_DEAD, relaxed);
917 phash_remove_locked(proc_getpid(p), p);
918
919 proc_list_unlock();
920
921 pgrp_rele(pg);
922
923 thread_call_free(p->p_rcall);
924
925 /* Free allocated memory */
926 zfree_ro(ZONE_ID_PROC_SIGACTS_RO, p->p_sigacts.ps_ro);
927 zfree(proc_stats_zone, p->p_stats);
928 p->p_stats = NULL;
929 if (p->p_subsystem_root_path) {
930 zfree(ZV_NAMEI, p->p_subsystem_root_path);
931 }
932
933 p->p_proc_ro = proc_ro_release_proc(p->p_proc_ro);
934 if (p->p_proc_ro != NULL) {
935 proc_ro_free(p->p_proc_ro);
936 p->p_proc_ro = NULL;
937 }
938
939 proc_checkdeadrefs(p);
940 proc_wait_release(p);
941 }
942
943
944 /*
945 * forkproc
946 *
947 * Description: Create a new process structure, given a parent process
948 * structure.
949 *
950 * Parameters: parent_proc The parent process
951 *
952 * Returns: !NULL The new process structure
953 * NULL Error (insufficient free memory)
954 *
955 * Note: When successful, the newly created process structure is
956 * partially initialized; if a caller needs to deconstruct the
957 * returned structure, they must call forkproc_free() to do so.
958 */
959 proc_t
forkproc(proc_t parent_proc)960 forkproc(proc_t parent_proc)
961 {
962 static uint64_t nextuniqueid = 0;
963 static pid_t lastpid = 0;
964
965 proc_t child_proc; /* Our new process */
966 int error = 0;
967 struct pgrp *pg;
968 uthread_t parent_uthread = current_uthread();
969 rlim_t rlimit_cpu_cur;
970 pid_t pid;
971 struct proc_ro_data proc_ro_data = {};
972
973 child_proc = zalloc_id(ZONE_ID_PROC, Z_WAITOK | Z_ZERO);
974 child_proc->p_stats = zalloc_flags(proc_stats_zone, Z_WAITOK | Z_ZERO);
975 proc_sigacts_copy(child_proc, parent_proc);
976 os_ref_init_mask(&child_proc->p_refcount, P_REF_BITS, &p_refgrp, P_REF_NEW);
977 os_ref_init_raw(&child_proc->p_waitref, &p_refgrp);
978
979 /* allocate a callout for use by interval timers */
980 child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc);
981
982
983 /*
984 * Find an unused PID.
985 */
986
987 fdt_init(child_proc);
988
989 proc_list_lock();
990
991 pid = lastpid;
992 do {
993 /*
994 * If the process ID prototype has wrapped around,
995 * restart somewhat above 0, as the low-numbered procs
996 * tend to include daemons that don't exit.
997 */
998 if (++pid >= PID_MAX) {
999 pid = 100;
1000 }
1001 if (pid == lastpid) {
1002 panic("Unable to allocate a new pid");
1003 }
1004
1005 /* if the pid stays in hash both for zombie and runniing state */
1006 } while (phash_find_locked(pid) != PROC_NULL ||
1007 pghash_find_locked(pid) != PGRP_NULL ||
1008 session_find_locked(pid) != SESSION_NULL);
1009
1010 lastpid = pid;
1011 nprocs++;
1012
1013 child_proc->p_pid = pid;
1014 proc_ro_data.p_idversion = OSIncrementAtomic(&nextpidversion);
1015 /* kernel process is handcrafted and not from fork, so start from 1 */
1016 proc_ro_data.p_uniqueid = ++nextuniqueid;
1017
1018 /* Insert in the hash, and inherit our group (and session) */
1019 phash_insert_locked(pid, child_proc);
1020 pg = pgrp_enter_locked(parent_proc, child_proc);
1021 proc_list_unlock();
1022
1023 if (proc_ro_data.p_uniqueid == startup_serial_num_procs) {
1024 /*
1025 * Turn off startup serial logging now that we have reached
1026 * the defined number of startup processes.
1027 */
1028 startup_serial_logging_active = false;
1029 }
1030
1031 /*
1032 * We've identified the PID we are going to use;
1033 * initialize the new process structure.
1034 */
1035 child_proc->p_stat = SIDL;
1036
1037 /*
1038 * The zero'ing of the proc was at the allocation time due to need
1039 * for insertion to hash. Copy the section that is to be copied
1040 * directly from the parent.
1041 */
1042 child_proc->p_forkcopy = parent_proc->p_forkcopy;
1043
1044 proc_ro_data.syscall_filter_mask = proc_syscall_filter_mask(parent_proc);
1045 proc_ro_data.p_platform_data = proc_get_ro(parent_proc)->p_platform_data;
1046
1047 /*
1048 * Some flags are inherited from the parent.
1049 * Duplicate sub-structures as needed.
1050 * Increase reference counts on shared objects.
1051 * The p_stats substruct is set in vm_fork.
1052 */
1053 #if CONFIG_DELAY_IDLE_SLEEP
1054 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID | P_AFFINITY));
1055 #else /* CONFIG_DELAY_IDLE_SLEEP */
1056 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_DISABLE_ASLR | P_SUGID));
1057 #endif /* CONFIG_DELAY_IDLE_SLEEP */
1058
1059 child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_VALID_MASK));
1060
1061 child_proc->p_responsible_pid = parent_proc->p_responsible_pid;
1062
1063 /*
1064 * Note that if the current thread has an assumed identity, this
1065 * credential will be granted to the new process.
1066 */
1067 kauth_cred_set(&proc_ro_data.p_ucred, kauth_cred_get());
1068
1069 lck_mtx_init(&child_proc->p_mlock, &proc_mlock_grp, &proc_lck_attr);
1070 lck_mtx_init(&child_proc->p_ucred_mlock, &proc_ucred_mlock_grp, &proc_lck_attr);
1071 #if CONFIG_DTRACE
1072 lck_mtx_init(&child_proc->p_dtrace_sprlock, &proc_lck_grp, &proc_lck_attr);
1073 #endif
1074 lck_spin_init(&child_proc->p_slock, &proc_slock_grp, &proc_lck_attr);
1075
1076 klist_init(&child_proc->p_klist);
1077
1078 if (child_proc->p_textvp != NULLVP) {
1079 /* bump references to the text vnode */
1080 /* Need to hold iocount across the ref call */
1081 if ((error = vnode_getwithref(child_proc->p_textvp)) == 0) {
1082 error = vnode_ref(child_proc->p_textvp);
1083 vnode_put(child_proc->p_textvp);
1084 }
1085
1086 if (error != 0) {
1087 child_proc->p_textvp = NULLVP;
1088 }
1089 }
1090
1091 /* Inherit the parent flags for code sign */
1092 proc_ro_data.p_csflags = ((uint32_t)proc_getcsflags(parent_proc) & ~CS_KILLED);
1093
1094 child_proc->p_proc_ro = proc_ro_alloc(child_proc, &proc_ro_data, NULL, NULL);
1095
1096 /* update cred on proc */
1097 proc_update_creds_onproc(child_proc);
1098
1099 /* update audit session proc count */
1100 AUDIT_SESSION_PROCNEW(child_proc);
1101
1102 /*
1103 * Copy the parents per process open file table to the child; if
1104 * there is a per-thread current working directory, set the childs
1105 * per-process current working directory to that instead of the
1106 * parents.
1107 */
1108 if (fdt_fork(&child_proc->p_fd, parent_proc, parent_uthread->uu_cdir) != 0) {
1109 forkproc_free(child_proc);
1110 child_proc = NULL;
1111 goto bad;
1112 }
1113
1114 #if SYSV_SHM
1115 if (parent_proc->vm_shm) {
1116 /* XXX may fail to attach shm to child */
1117 (void)shmfork(parent_proc, child_proc);
1118 }
1119 #endif
1120
1121 /*
1122 * Child inherits the parent's plimit
1123 */
1124 proc_limitfork(parent_proc, child_proc);
1125
1126 rlimit_cpu_cur = proc_limitgetcur(child_proc, RLIMIT_CPU);
1127 if (rlimit_cpu_cur != RLIM_INFINITY) {
1128 child_proc->p_rlim_cpu.tv_sec = (rlimit_cpu_cur > __INT_MAX__) ? __INT_MAX__ : rlimit_cpu_cur;
1129 }
1130
1131 /* Intialize new process stats, including start time */
1132 /* <rdar://6640543> non-zeroed portion contains garbage AFAICT */
1133 microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start);
1134
1135 if (pg->pg_session->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT) {
1136 os_atomic_or(&child_proc->p_flag, P_CONTROLT, relaxed);
1137 }
1138
1139 /*
1140 * block all signals to reach the process.
1141 * no transition race should be occuring with the child yet,
1142 * but indicate that the process is in (the creation) transition.
1143 */
1144 proc_signalstart(child_proc, 0);
1145 proc_transstart(child_proc, 0, 0);
1146
1147 child_proc->p_pcaction = 0;
1148
1149 TAILQ_INIT(&child_proc->p_uthlist);
1150 TAILQ_INIT(&child_proc->p_aio_activeq);
1151 TAILQ_INIT(&child_proc->p_aio_doneq);
1152
1153 /*
1154 * Copy work queue information
1155 *
1156 * Note: This should probably only happen in the case where we are
1157 * creating a child that is a copy of the parent; since this
1158 * routine is called in the non-duplication case of vfork()
1159 * or posix_spawn(), then this information should likely not
1160 * be duplicated.
1161 *
1162 * <rdar://6640553> Work queue pointers that no longer point to code
1163 */
1164 child_proc->p_wqthread = parent_proc->p_wqthread;
1165 child_proc->p_threadstart = parent_proc->p_threadstart;
1166 child_proc->p_pthsize = parent_proc->p_pthsize;
1167 if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
1168 child_proc->p_lflag |= P_LREGISTER;
1169 }
1170 child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
1171 child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
1172 child_proc->p_dispatchqueue_label_offset = parent_proc->p_dispatchqueue_label_offset;
1173 child_proc->p_return_to_kernel_offset = parent_proc->p_return_to_kernel_offset;
1174 child_proc->p_mach_thread_self_offset = parent_proc->p_mach_thread_self_offset;
1175 child_proc->p_pth_tsd_offset = parent_proc->p_pth_tsd_offset;
1176 child_proc->p_pthread_wq_quantum_offset = parent_proc->p_pthread_wq_quantum_offset;
1177 #if PSYNCH
1178 pth_proc_hashinit(child_proc);
1179 #endif /* PSYNCH */
1180
1181 #if CONFIG_PERSONAS
1182 child_proc->p_persona = NULL;
1183 error = persona_proc_inherit(child_proc, parent_proc);
1184 if (error != 0) {
1185 printf("forkproc: persona_proc_inherit failed (persona %d being destroyed?)\n", persona_get_uid(parent_proc->p_persona));
1186 forkproc_free(child_proc);
1187 child_proc = NULL;
1188 goto bad;
1189 }
1190 #endif
1191
1192 #if CONFIG_MEMORYSTATUS
1193 /* Memorystatus init */
1194 child_proc->p_memstat_state = 0;
1195 child_proc->p_memstat_effectivepriority = JETSAM_PRIORITY_DEFAULT;
1196 child_proc->p_memstat_requestedpriority = JETSAM_PRIORITY_DEFAULT;
1197 child_proc->p_memstat_assertionpriority = 0;
1198 child_proc->p_memstat_userdata = 0;
1199 child_proc->p_memstat_idle_start = 0;
1200 child_proc->p_memstat_idle_delta = 0;
1201 child_proc->p_memstat_memlimit = 0;
1202 child_proc->p_memstat_memlimit_active = 0;
1203 child_proc->p_memstat_memlimit_inactive = 0;
1204 child_proc->p_memstat_relaunch_flags = P_MEMSTAT_RELAUNCH_UNKNOWN;
1205 #if CONFIG_FREEZE
1206 child_proc->p_memstat_freeze_sharedanon_pages = 0;
1207 #endif
1208 child_proc->p_memstat_dirty = 0;
1209 child_proc->p_memstat_idledeadline = 0;
1210 #endif /* CONFIG_MEMORYSTATUS */
1211
1212 if (parent_proc->p_subsystem_root_path) {
1213 size_t parent_length = strlen(parent_proc->p_subsystem_root_path) + 1;
1214 assert(parent_length <= MAXPATHLEN);
1215 child_proc->p_subsystem_root_path = zalloc_flags(ZV_NAMEI,
1216 Z_WAITOK | Z_ZERO);
1217 memcpy(child_proc->p_subsystem_root_path, parent_proc->p_subsystem_root_path, parent_length);
1218 }
1219
1220 bad:
1221 return child_proc;
1222 }
1223
1224 void
proc_lock(proc_t p)1225 proc_lock(proc_t p)
1226 {
1227 LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
1228 lck_mtx_lock(&p->p_mlock);
1229 }
1230
1231 void
proc_unlock(proc_t p)1232 proc_unlock(proc_t p)
1233 {
1234 lck_mtx_unlock(&p->p_mlock);
1235 }
1236
1237 void
proc_spinlock(proc_t p)1238 proc_spinlock(proc_t p)
1239 {
1240 lck_spin_lock_grp(&p->p_slock, &proc_slock_grp);
1241 }
1242
1243 void
proc_spinunlock(proc_t p)1244 proc_spinunlock(proc_t p)
1245 {
1246 lck_spin_unlock(&p->p_slock);
1247 }
1248
1249 void
proc_list_lock(void)1250 proc_list_lock(void)
1251 {
1252 lck_mtx_lock(&proc_list_mlock);
1253 }
1254
1255 void
proc_list_unlock(void)1256 proc_list_unlock(void)
1257 {
1258 lck_mtx_unlock(&proc_list_mlock);
1259 }
1260
1261 void
proc_ucred_lock(proc_t p)1262 proc_ucred_lock(proc_t p)
1263 {
1264 lck_mtx_lock(&p->p_ucred_mlock);
1265 }
1266
1267 void
proc_ucred_unlock(proc_t p)1268 proc_ucred_unlock(proc_t p)
1269 {
1270 lck_mtx_unlock(&p->p_ucred_mlock);
1271 }
1272
1273 void
proc_update_creds_onproc(proc_t p)1274 proc_update_creds_onproc(proc_t p)
1275 {
1276 kauth_cred_t cred = proc_ucred(p);
1277
1278 p->p_uid = kauth_cred_getuid(cred);
1279 p->p_gid = kauth_cred_getgid(cred);
1280 p->p_ruid = kauth_cred_getruid(cred);
1281 p->p_rgid = kauth_cred_getrgid(cred);
1282 p->p_svuid = kauth_cred_getsvuid(cred);
1283 p->p_svgid = kauth_cred_getsvgid(cred);
1284 }
1285
1286
1287 bool
uthread_is64bit(struct uthread * uth)1288 uthread_is64bit(struct uthread *uth)
1289 {
1290 return uth->uu_flag & UT_LP64;
1291 }
1292
1293 void
uthread_init(task_t task,uthread_t uth,thread_ro_t tro_tpl,int workq_thread)1294 uthread_init(task_t task, uthread_t uth, thread_ro_t tro_tpl, int workq_thread)
1295 {
1296 uthread_t uth_parent = current_uthread();
1297
1298 lck_spin_init(&uth->uu_rethrottle_lock, &rethrottle_lock_grp,
1299 LCK_ATTR_NULL);
1300
1301 /*
1302 * Lazily set the thread on the kernel VFS context
1303 * to the first thread made which will be vm_pageout_scan_thread.
1304 */
1305 if (__improbable(vfs_context0.vc_thread == NULL)) {
1306 extern thread_t vm_pageout_scan_thread;
1307
1308 assert(task == kernel_task);
1309 assert(get_machthread(uth) == vm_pageout_scan_thread);
1310 vfs_context0.vc_thread = get_machthread(uth);
1311 }
1312
1313 if (task_get_64bit_addr(task)) {
1314 uth->uu_flag |= UT_LP64;
1315 }
1316
1317 /*
1318 * Thread inherits credential from the creating thread, if both
1319 * are in the same task.
1320 *
1321 * If the creating thread has no credential or is from another
1322 * task we can leave the new thread credential NULL. If it needs
1323 * one later, it will be lazily assigned from the task's process.
1324 */
1325 if (task == kernel_task) {
1326 kauth_cred_set(&tro_tpl->tro_cred, vfs_context0.vc_ucred);
1327 tro_tpl->tro_proc = kernproc;
1328 tro_tpl->tro_proc_ro = kernproc->p_proc_ro;
1329 } else if (!is_corpsetask(task)) {
1330 thread_ro_t curtro = current_thread_ro();
1331 proc_t p = get_bsdtask_info(task);
1332
1333 if (task == curtro->tro_task &&
1334 ((curtro->tro_flags & TRO_SETUID) == 0 || !workq_thread)) {
1335 kauth_cred_set(&tro_tpl->tro_cred, curtro->tro_cred);
1336 tro_tpl->tro_flags = (curtro->tro_flags & TRO_SETUID);
1337 tro_tpl->tro_proc_ro = curtro->tro_proc_ro;
1338 } else {
1339 kauth_cred_t cred = kauth_cred_proc_ref(p);
1340 kauth_cred_set_and_unref(&tro_tpl->tro_cred, &cred);
1341 tro_tpl->tro_proc_ro = task_get_ro(task);
1342 }
1343 tro_tpl->tro_proc = p;
1344
1345 proc_lock(p);
1346 if (workq_thread) {
1347 /* workq_thread threads will not inherit masks */
1348 uth->uu_sigmask = ~workq_threadmask;
1349 } else if (uth_parent->uu_flag & UT_SAS_OLDMASK) {
1350 uth->uu_sigmask = uth_parent->uu_oldmask;
1351 } else {
1352 uth->uu_sigmask = uth_parent->uu_sigmask;
1353 }
1354
1355
1356 /*
1357 * Do not add the uthread to proc uthlist for exec copy task,
1358 * since they do not hold a ref on proc.
1359 */
1360 if (!task_is_exec_copy(task)) {
1361 TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
1362 }
1363 proc_unlock(p);
1364
1365 #if CONFIG_DTRACE
1366 if (p->p_dtrace_ptss_pages != NULL && !task_is_exec_copy(task)) {
1367 uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
1368 }
1369 #endif
1370 } else {
1371 tro_tpl->tro_proc_ro = task_get_ro(task);
1372 }
1373
1374 uth->uu_pending_sigreturn = 0;
1375 uthread_init_proc_refcount(uth);
1376 }
1377
1378 mach_port_name_t
uthread_joiner_port(struct uthread * uth)1379 uthread_joiner_port(struct uthread *uth)
1380 {
1381 return uth->uu_save.uus_bsdthread_terminate.kport;
1382 }
1383
1384 user_addr_t
uthread_joiner_address(uthread_t uth)1385 uthread_joiner_address(uthread_t uth)
1386 {
1387 return uth->uu_save.uus_bsdthread_terminate.ulock_addr;
1388 }
1389
1390 void
uthread_joiner_wake(task_t task,uthread_t uth)1391 uthread_joiner_wake(task_t task, uthread_t uth)
1392 {
1393 struct _bsdthread_terminate bts = uth->uu_save.uus_bsdthread_terminate;
1394
1395 assert(bts.ulock_addr);
1396 bzero(&uth->uu_save.uus_bsdthread_terminate, sizeof(bts));
1397
1398 int flags = UL_UNFAIR_LOCK | ULF_WAKE_ALL | ULF_WAKE_ALLOW_NON_OWNER;
1399 (void)ulock_wake(task, flags, bts.ulock_addr, 0);
1400 mach_port_deallocate(get_task_ipcspace(task), bts.kport);
1401 }
1402
1403 /*
1404 * This routine frees the thread name field of the uthread_t structure. Split out of
1405 * uthread_cleanup() so thread name does not get deallocated while generating a corpse fork.
1406 */
1407 void
uthread_cleanup_name(uthread_t uth)1408 uthread_cleanup_name(uthread_t uth)
1409 {
1410 /*
1411 * <rdar://17834538>
1412 * Set pth_name to NULL before calling free().
1413 * Previously there was a race condition in the
1414 * case this code was executing during a stackshot
1415 * where the stackshot could try and copy pth_name
1416 * after it had been freed and before if was marked
1417 * as null.
1418 */
1419 if (uth->pth_name != NULL) {
1420 void *pth_name = uth->pth_name;
1421 uth->pth_name = NULL;
1422 kfree_data(pth_name, MAXTHREADNAMESIZE);
1423 }
1424 return;
1425 }
1426
1427 /*
1428 * This routine frees all the BSD context in uthread except the credential.
1429 * It does not free the uthread structure as well
1430 */
1431 void
uthread_cleanup(uthread_t uth,thread_ro_t tro)1432 uthread_cleanup(uthread_t uth, thread_ro_t tro)
1433 {
1434 task_t task = tro->tro_task;
1435 proc_t p = tro->tro_proc;
1436
1437 uthread_assert_zero_proc_refcount(uth);
1438
1439 if (uth->uu_lowpri_window || uth->uu_throttle_info) {
1440 /*
1441 * task is marked as a low priority I/O type
1442 * and we've somehow managed to not dismiss the throttle
1443 * through the normal exit paths back to user space...
1444 * no need to throttle this thread since its going away
1445 * but we do need to update our bookeeping w/r to throttled threads
1446 *
1447 * Calling this routine will clean up any throttle info reference
1448 * still inuse by the thread.
1449 */
1450 throttle_lowpri_io(0);
1451 }
1452
1453 #if CONFIG_AUDIT
1454 /*
1455 * Per-thread audit state should never last beyond system
1456 * call return. Since we don't audit the thread creation/
1457 * removal, the thread state pointer should never be
1458 * non-NULL when we get here.
1459 */
1460 assert(uth->uu_ar == NULL);
1461 #endif
1462
1463 if (uth->uu_select.nbytes) {
1464 select_cleanup_uthread(&uth->uu_select);
1465 }
1466
1467 if (uth->uu_cdir) {
1468 vnode_rele(uth->uu_cdir);
1469 uth->uu_cdir = NULLVP;
1470 }
1471
1472 if (uth->uu_selset) {
1473 select_set_free(uth->uu_selset);
1474 uth->uu_selset = NULL;
1475 }
1476
1477 os_reason_free(uth->uu_exit_reason);
1478
1479 if ((task != kernel_task) && p) {
1480 /*
1481 * Remove the thread from the process list and
1482 * transfer [appropriate] pending signals to the process.
1483 * Do not remove the uthread from proc uthlist for exec
1484 * copy task, since they does not have a ref on proc and
1485 * would not have been added to the list.
1486 */
1487 if (uth->uu_kqr_bound) {
1488 kqueue_threadreq_unbind(p, uth->uu_kqr_bound);
1489 }
1490
1491 if (get_bsdtask_info(task) == p && !task_is_exec_copy(task)) {
1492 proc_lock(p);
1493 TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
1494 p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
1495 proc_unlock(p);
1496 }
1497
1498 #if CONFIG_DTRACE
1499 struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
1500 uth->t_dtrace_scratch = NULL;
1501 if (tmpptr != NULL && !task_is_exec_copy(task)) {
1502 dtrace_ptss_release_entry(p, tmpptr);
1503 }
1504 #endif
1505 } else {
1506 assert(!uth->uu_kqr_bound);
1507 }
1508 }
1509
1510 /* This routine releases the credential stored in uthread */
1511 void
uthread_cred_ref(struct ucred * ucred)1512 uthread_cred_ref(struct ucred *ucred)
1513 {
1514 kauth_cred_ref(ucred);
1515 }
1516
1517 void
uthread_cred_free(struct ucred * ucred)1518 uthread_cred_free(struct ucred *ucred)
1519 {
1520 kauth_cred_set(&ucred, NOCRED);
1521 }
1522
1523 /* This routine frees the uthread structure held in thread structure */
1524 void
uthread_destroy(uthread_t uth)1525 uthread_destroy(uthread_t uth)
1526 {
1527 uthread_destroy_proc_refcount(uth);
1528
1529 if (uth->t_tombstone) {
1530 kfree_type(struct doc_tombstone, uth->t_tombstone);
1531 uth->t_tombstone = NULL;
1532 }
1533
1534 #if CONFIG_DEBUG_SYSCALL_REJECTION
1535 size_t const bitstr_len = BITMAP_SIZE(mach_trap_count + nsysent);
1536
1537 if (uth->syscall_rejection_mask) {
1538 kfree_data(uth->syscall_rejection_mask, bitstr_len);
1539 uth->syscall_rejection_mask = NULL;
1540 }
1541 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
1542
1543 lck_spin_destroy(&uth->uu_rethrottle_lock, &rethrottle_lock_grp);
1544
1545 uthread_cleanup_name(uth);
1546 }
1547
1548 user_addr_t
thread_get_sigreturn_token(thread_t thread)1549 thread_get_sigreturn_token(thread_t thread)
1550 {
1551 uthread_t ut = (struct uthread *) get_bsdthread_info(thread);
1552 return ut->uu_sigreturn_token;
1553 }
1554
1555 uint32_t
thread_get_sigreturn_diversifier(thread_t thread)1556 thread_get_sigreturn_diversifier(thread_t thread)
1557 {
1558 uthread_t ut = (struct uthread *) get_bsdthread_info(thread);
1559 return ut->uu_sigreturn_diversifier;
1560 }
1561