xref: /xnu-12377.1.9/bsd/kern/mach_loader.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  *	Copyright (C) 1988, 1989,  NeXT, Inc.
30  *
31  *	File:	kern/mach_loader.c
32  *	Author:	Avadis Tevanian, Jr.
33  *
34  *	Mach object file loader (kernel version, for now).
35  *
36  * 21-Jul-88  Avadis Tevanian, Jr. (avie) at NeXT
37  *	Started.
38  */
39 
40 #include <sys/param.h>
41 #include <sys/vnode_internal.h>
42 #include <sys/uio.h>
43 #include <sys/namei.h>
44 #include <sys/proc_internal.h>
45 #include <sys/kauth.h>
46 #include <sys/stat.h>
47 #include <sys/malloc.h>
48 #include <sys/mount_internal.h>
49 #include <sys/fcntl.h>
50 #include <sys/file_internal.h>
51 #include <sys/ubc_internal.h>
52 #include <sys/imgact.h>
53 #include <sys/codesign.h>
54 #include <sys/proc_uuid_policy.h>
55 #include <sys/reason.h>
56 #include <sys/kdebug.h>
57 #include <sys/spawn_internal.h>
58 
59 #include <mach/mach_types.h>
60 #include <mach/vm_map.h>        /* vm_allocate() */
61 #include <mach/mach_vm.h>       /* mach_vm_allocate() */
62 #include <mach/vm_statistics.h>
63 #include <mach/task.h>
64 #include <mach/thread_act.h>
65 
66 #include <machine/vmparam.h>
67 #include <machine/exec.h>
68 #include <machine/pal_routines.h>
69 
70 #include <kern/ast.h>
71 #include <kern/kern_types.h>
72 #include <kern/cpu_number.h>
73 #include <kern/mach_loader.h>
74 #include <kern/mach_fat.h>
75 #include <kern/kalloc.h>
76 #include <kern/task.h>
77 #include <kern/thread.h>
78 #include <kern/page_decrypt.h>
79 
80 #include <mach-o/fat.h>
81 #include <mach-o/loader.h>
82 
83 #include <vm/pmap.h>
84 #include <vm/vm_map_xnu.h>
85 #include <vm/vm_kern_xnu.h>
86 #include <vm/vm_pager_xnu.h>
87 #include <vm/vnode_pager.h>
88 #include <vm/vm_protos.h>
89 #include <vm/vm_shared_region.h>
90 #include <IOKit/IOReturn.h>     /* for kIOReturnNotPrivileged */
91 #include <IOKit/IOBSD.h>        /* for IOVnodeHasEntitlement */
92 
93 #include <os/log.h>
94 #include <os/overflow.h>
95 
96 #include <pexpert/pexpert.h>
97 #include <libkern/libkern.h>
98 
99 #include "kern_exec_internal.h"
100 
101 #if APPLEVIRTUALPLATFORM
102 #define ALLOW_FORCING_ARM64_32 1
103 #endif /* APPLEVIRTUALPLATFORM */
104 
105 #if ALLOW_FORCING_ARM64_32
106 #if DEVELOPMENT || DEBUG
107 TUNABLE_DT(uint32_t, force_arm64_32, "/defaults", "force-arm64-32", "force-arm64-32", 0, TUNABLE_DT_NONE);
108 #else
109 TUNABLE_DT(uint32_t, force_arm64_32, "/defaults", "force-arm64-32", "force-arm64-32", 0, TUNABLE_DT_NO_BOOTARG);
110 #endif
111 #endif /* ALLOW_FORCING_ARM64_32 */
112 
113 #if ALLOW_FORCING_ARM64_32 || DEVELOPMENT || DEBUG
114 /*
115  * The binary grading priority for the highest priority override.  Each progressive override
116  * receives a priority 1 less than its neighbor.
117  */
118 #define    BINGRADE_OVERRIDE_MAX 200
119 #endif /* ALLOW_FORCING_ARM64_32 || DEVELOPMENT || DEBUG */
120 
121 #if DEVELOPMENT || DEBUG
122 /*
123  * Maxmum number of overrides that can be passed via the bingrade boot-arg property.
124  */
125 #define MAX_BINGRADE_OVERRIDES 4
126 /*
127  * Max size of one bingrade override + 1 comma
128  * (technically, sizeof will also include the terminating NUL here, but an overestimation of
129  * buffer space is fine).
130  */
131 #define BINGRADE_MAXSTRINGLEN sizeof("0x12345678:0x12345678:0x12345678,")
132 
133 /*
134  * Each binary grading override has a cpu type and cpu subtype to match against the values in
135  * the Mach-o header.
136  */
137 typedef struct bingrade {
138 	uint32_t cputype;
139 	uint32_t cpusubtype;
140 	uint32_t execfeatures;
141 #define EXECFEATURES_OVERRIDE_WILDCARD (~(uint32_t)0)
142 } bingrade_t;
143 
144 /* The number of binary grading overrides that are active */
145 static int num_bingrade_overrides = -1;
146 
147 /*
148  * The bingrade_overrides array is an ordered list of binary grading overrides.  The first element in the array
149  * has the highest priority.  When parsing the `bingrade' boot-arg, elements are added to this array in order.
150  */
151 static bingrade_t bingrade_overrides[MAX_BINGRADE_OVERRIDES] = { 0 };
152 #endif /* DEVELOPMENT || DEBUG */
153 
154 
155 /* An empty load_result_t */
156 static const load_result_t load_result_null = {
157 	.mach_header = MACH_VM_MIN_ADDRESS,
158 	.entry_point = MACH_VM_MIN_ADDRESS,
159 	.user_stack = MACH_VM_MIN_ADDRESS,
160 	.user_stack_size = 0,
161 	.user_stack_alloc = MACH_VM_MIN_ADDRESS,
162 	.user_stack_alloc_size = 0,
163 	.all_image_info_addr = MACH_VM_MIN_ADDRESS,
164 	.all_image_info_size = 0,
165 	.thread_count = 0,
166 	.unixproc = 0,
167 	.dynlinker = 0,
168 	.needs_dynlinker = 0,
169 	.validentry = 0,
170 	.using_lcmain = 0,
171 	.is_64bit_addr = 0,
172 	.is_64bit_data = 0,
173 	.custom_stack = 0,
174 	.csflags = 0,
175 	.has_pagezero = 0,
176 	.uuid = { 0 },
177 	.min_vm_addr = MACH_VM_MAX_ADDRESS,
178 	.max_vm_addr = MACH_VM_MIN_ADDRESS,
179 	.ro_vm_start = MACH_VM_MIN_ADDRESS,
180 	.ro_vm_end = MACH_VM_MIN_ADDRESS,
181 	.cs_end_offset = 0,
182 	.threadstate = NULL,
183 	.threadstate_sz = 0,
184 	.is_rosetta = 0,
185 	.dynlinker_ro_vm_start = 0,
186 	.dynlinker_ro_vm_end = 0,
187 	.dynlinker_mach_header = MACH_VM_MIN_ADDRESS,
188 	.dynlinker_fd = -1,
189 };
190 
191 /*
192  * Prototypes of static functions.
193  */
194 static load_return_t
195 parse_machfile(
196 	struct vnode            *vp,
197 	vm_map_t                map,
198 	thread_t                thread,
199 	struct mach_header      *header,
200 	off_t                   file_offset,
201 	off_t                   macho_size,
202 	int                     depth,
203 	int64_t                 slide,
204 	int64_t                 dyld_slide,
205 	load_result_t           *result,
206 	load_result_t           *binresult,
207 	struct image_params     *imgp
208 	);
209 
210 static load_return_t
211 load_segment(
212 	struct load_command             *lcp,
213 	uint32_t                        filetype,
214 	void                            *control,
215 	off_t                           pager_offset,
216 	off_t                           macho_size,
217 	struct vnode                    *vp,
218 	vm_map_t                        map,
219 	int64_t                         slide,
220 	load_result_t                   *result,
221 	struct image_params             *imgp
222 	);
223 
224 static load_return_t
225 load_uuid(
226 	struct uuid_command             *uulp,
227 	char                            *command_end,
228 	load_result_t                   *result
229 	);
230 
231 static load_return_t
232 load_version(
233 	struct version_min_command     *vmc,
234 	boolean_t               *found_version_cmd,
235 	struct image_params             *imgp,
236 	load_result_t           *result
237 	);
238 
239 static load_return_t
240 load_code_signature(
241 	struct linkedit_data_command    *lcp,
242 	struct vnode                    *vp,
243 	off_t                           macho_offset,
244 	off_t                           macho_size,
245 	cpu_type_t                      cputype,
246 	cpu_subtype_t                   cpusubtype,
247 	load_result_t                   *result,
248 	struct image_params             *imgp);
249 
250 #if CONFIG_CODE_DECRYPTION
251 static load_return_t
252 set_code_unprotect(
253 	struct encryption_info_command  *lcp,
254 	caddr_t                         addr,
255 	vm_map_t                        map,
256 	int64_t                         slide,
257 	struct vnode                    *vp,
258 	off_t                           macho_offset,
259 	cpu_type_t                      cputype,
260 	cpu_subtype_t                   cpusubtype);
261 #endif
262 
263 static
264 load_return_t
265 load_main(
266 	struct entry_point_command      *epc,
267 	thread_t                thread,
268 	int64_t                         slide,
269 	load_result_t           *result
270 	);
271 
272 static
273 load_return_t
274 setup_driver_main(
275 	thread_t                thread,
276 	int64_t                         slide,
277 	load_result_t           *result
278 	);
279 
280 static load_return_t
281 load_unixthread(
282 	struct thread_command   *tcp,
283 	thread_t                        thread,
284 	int64_t                         slide,
285 	boolean_t                       is_x86_64_compat_binary,
286 	load_result_t                   *result
287 	);
288 
289 static load_return_t
290 load_threadstate(
291 	thread_t                thread,
292 	uint32_t        *ts,
293 	uint32_t        total_size,
294 	load_result_t *
295 	);
296 
297 static load_return_t
298 load_threadstack(
299 	thread_t                thread,
300 	uint32_t                *ts,
301 	uint32_t                total_size,
302 	mach_vm_offset_t        *user_stack,
303 	int                     *customstack,
304 	boolean_t               is_x86_64_compat_binary,
305 	load_result_t           *result
306 	);
307 
308 static load_return_t
309 load_threadentry(
310 	thread_t                thread,
311 	uint32_t        *ts,
312 	uint32_t        total_size,
313 	mach_vm_offset_t        *entry_point
314 	);
315 
316 static load_return_t
317 load_dylinker(
318 	struct dylinker_command *lcp,
319 	integer_t               archbits,
320 	vm_map_t                map,
321 	thread_t                thread,
322 	int                     depth,
323 	int64_t                 slide,
324 	load_result_t           *result,
325 	struct image_params     *imgp
326 	);
327 
328 
329 #if CONFIG_ROSETTA
330 static load_return_t
331 load_rosetta(
332 	vm_map_t                        map,
333 	thread_t                        thread,
334 	load_result_t           *result,
335 	struct image_params     *imgp
336 	);
337 #endif
338 
339 #if __x86_64__
340 extern int bootarg_no32exec;
341 static boolean_t
342 check_if_simulator_binary(
343 	struct image_params     *imgp,
344 	off_t                   file_offset,
345 	off_t                   macho_size);
346 #endif
347 
348 struct macho_data;
349 
350 static load_return_t
351 get_macho_vnode(
352 	const char                      *path,
353 	integer_t               archbits,
354 	struct mach_header      *mach_header,
355 	off_t                   *file_offset,
356 	off_t                   *macho_size,
357 	struct macho_data       *macho_data,
358 	struct vnode            **vpp,
359 	struct image_params     *imgp
360 	);
361 
362 #if DEVELOPMENT || DEBUG
363 /*
364  * Parse the bingrade boot-arg, adding cputype/cpusubtype/execfeatures tuples to the global binary grading
365  * override array.  The bingrade boot-arg must be of the form:
366  *
367  * NUM := '0x' <HEXDIGITS> | '0' <OCTALDIGITS> | <DECIMALDIGITS>
368  * OVERRIDESPEC := <NUM> | <NUM> ':' <NUM> | <NUM> ':' <NUM> ':' <NUM>
369  * BINSPEC_BOOTARG := <OVERRIDESPEC> ',' <BINSPEC_BOOTARG> | <OVERRIDESPEC>
370  *
371  * Returns the number of overrides specified in the boot-arg, or 0 if there were no overrides or the
372  * syntax of the overrides was found to be invalid.
373  */
374 static int
parse_bingrade_override_bootarg(bingrade_t * overrides,int max_overrides,char * overrides_arg_string)375 parse_bingrade_override_bootarg(bingrade_t *overrides, int max_overrides, char *overrides_arg_string)
376 {
377 	char bingrade_arg[BINGRADE_MAXSTRINGLEN * MAX_BINGRADE_OVERRIDES + 1];
378 	int cputypespec_count = 0;
379 
380 	/* Look for the bingrade boot-arg */
381 	if (overrides_arg_string != NULL || PE_parse_boot_arg_str("bingrade", bingrade_arg, sizeof(bingrade_arg))) {
382 		char *bingrade_str = (overrides_arg_string != NULL) ? overrides_arg_string : &bingrade_arg[0];
383 		char *cputypespec;
384 
385 		/* Skip leading whitespace */
386 		while (*bingrade_str == ' ' || *bingrade_str == '\t') {
387 			bingrade_str++;
388 		}
389 
390 		if (*bingrade_str == 0) {
391 			/* empty string, so just return 0 */
392 			return 0;
393 		}
394 
395 		/* If we found the boot-arg, iterate on each OVERRIDESPEC in the BOOTSPEC_BOOTARG */
396 		while ((cputypespec_count < max_overrides) && ((cputypespec = strsep(&bingrade_str, ",")) != NULL)) {
397 			char *colon = strchr(cputypespec, ':');
398 			char *end;
399 			char *cputypeptr;
400 			char cputypestr[16] = { 0 };
401 			unsigned long cputype, cpusubtype, execfeatures;
402 
403 			/* If there's a colon present, process the cpu subtype and possibly the execfeatures */
404 			if (colon != NULL) {
405 				colon++;        /* Move past the colon before parsing */
406 
407 				char execfeat_buf[16] = { 0 }; /* This *MUST* be preinitialized to zeroes */
408 				char *second_colon = strchr(colon, ':');
409 				ptrdiff_t amt_to_copy = 0;
410 
411 				if (second_colon != NULL) {
412 					strlcpy(execfeat_buf, second_colon + 1, MIN(strlen(second_colon + 1) + 1, sizeof(execfeat_buf)));
413 
414 					execfeatures = strtoul(execfeat_buf, &end, 0);
415 					if (execfeat_buf == end || execfeatures > UINT_MAX) {
416 						printf("Invalid bingrade boot-arg (`%s').\n", cputypespec);
417 						return 0;
418 					}
419 
420 					overrides[cputypespec_count].execfeatures = (uint32_t)execfeatures;
421 
422 					/*
423 					 * Note there is no "+ 1" here because we are only copying up to but not
424 					 * including the second colon.  Since cputypestr was initialized to all 0s
425 					 * above, the terminating NUL will already be there.
426 					 */
427 					amt_to_copy = second_colon - colon;
428 				} else {
429 					/* No second colon, so use the wildcard for execfeatures */
430 					overrides[cputypespec_count].execfeatures = EXECFEATURES_OVERRIDE_WILDCARD;
431 					/*
432 					 * There is no "+ 1" here because colon was already moved forward by 1 (above).
433 					 * which allows this computation to include the terminating NUL in the length
434 					 * computed.
435 					 */
436 					amt_to_copy = colon - cputypespec;
437 				}
438 
439 				/* Now determine the cpu subtype */
440 				cpusubtype = strtoul(colon, &end, 0);
441 				if (colon == end || cpusubtype > UINT_MAX) {
442 					printf("Invalid bingrade boot-arg (`%s').\n", cputypespec);
443 					return 0;
444 				}
445 				overrides[cputypespec_count].cpusubtype = (uint32_t)cpusubtype;
446 
447 				/* Copy the cputype string into a temp buffer */
448 				strlcpy(cputypestr, cputypespec, MIN(sizeof(cputypestr), amt_to_copy));
449 
450 				cputypeptr = &cputypestr[0];
451 			} else {
452 				/*
453 				 * No colon present, set the cpu subtype to 0, the execfeatures to EXECFEATURES_OVERRIDE_WILDCARD
454 				 * and use the whole string as the cpu type
455 				 */
456 				overrides[cputypespec_count].cpusubtype = 0;
457 				overrides[cputypespec_count].execfeatures = EXECFEATURES_OVERRIDE_WILDCARD;
458 				cputypeptr = cputypespec;
459 			}
460 
461 			cputype = strtoul(cputypeptr, &end, 0);
462 			if (cputypeptr == end || cputype > UINT_MAX) {
463 				printf("Invalid bingrade boot-arg (`%s').\n", cputypespec);
464 				return 0;
465 			}
466 			overrides[cputypespec_count].cputype = (uint32_t)cputype;
467 
468 			cputypespec_count++;
469 		}
470 	} else {
471 		/* No bingrade boot-arg; return 0 overrides */
472 		return 0;
473 	}
474 
475 	return cputypespec_count;
476 }
477 
478 size_t
bingrade_get_override_string(char * existing_overrides,size_t existing_overrides_bufsize)479 bingrade_get_override_string(char *existing_overrides, size_t existing_overrides_bufsize)
480 {
481 	if (num_bingrade_overrides <= 0) {
482 		return 0;       /* No overrides set */
483 	}
484 
485 	/* Init the empty string for strlcat */
486 	existing_overrides[0] = 0;
487 
488 	for (int i = 0; i < num_bingrade_overrides; i++) {
489 		char next_override[33]; /* 10char + ':' + 10char + ([future] ':' + 10char) */
490 		snprintf(next_override, sizeof(next_override), "0x%x:0x%x", bingrade_overrides[i].cputype, bingrade_overrides[i].cpusubtype);
491 		if (i > 0) {
492 			strlcat(existing_overrides, ",", existing_overrides_bufsize);
493 		}
494 		strlcat(existing_overrides, next_override, existing_overrides_bufsize);
495 	}
496 
497 	return strlen(existing_overrides);
498 }
499 
500 int
binary_grade_overrides_update(char * overrides_arg)501 binary_grade_overrides_update(char *overrides_arg)
502 {
503 #if ALLOW_FORCING_ARM64_32
504 	if (force_arm64_32) {
505 		/* If forcing arm64_32, don't allow bingrade override. */
506 		return 0;
507 	}
508 #endif /* ALLOW_FORCING_ARM64_32 */
509 	num_bingrade_overrides = parse_bingrade_override_bootarg(bingrade_overrides, MAX_BINGRADE_OVERRIDES, overrides_arg);
510 	return num_bingrade_overrides;
511 }
512 #endif /* DEVELOPMENT || DEBUG */
513 
514 static inline void
widen_segment_command(const struct segment_command * scp32,struct segment_command_64 * scp)515 widen_segment_command(const struct segment_command *scp32,
516     struct segment_command_64 *scp)
517 {
518 	scp->cmd = scp32->cmd;
519 	scp->cmdsize = scp32->cmdsize;
520 	bcopy(scp32->segname, scp->segname, sizeof(scp->segname));
521 	scp->vmaddr = scp32->vmaddr;
522 	scp->vmsize = scp32->vmsize;
523 	scp->fileoff = scp32->fileoff;
524 	scp->filesize = scp32->filesize;
525 	scp->maxprot = scp32->maxprot;
526 	scp->initprot = scp32->initprot;
527 	scp->nsects = scp32->nsects;
528 	scp->flags = scp32->flags;
529 }
530 
531 static void
note_all_image_info_section(const struct segment_command_64 * scp,boolean_t is64,size_t section_size,const void * sections,int64_t slide,load_result_t * result)532 note_all_image_info_section(const struct segment_command_64 *scp,
533     boolean_t is64, size_t section_size, const void *sections,
534     int64_t slide, load_result_t *result)
535 {
536 	const union {
537 		struct section s32;
538 		struct section_64 s64;
539 	} *sectionp;
540 	unsigned int i;
541 
542 
543 	if (strncmp(scp->segname, "__DATA_DIRTY", sizeof(scp->segname)) != 0 &&
544 	    strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) {
545 		return;
546 	}
547 	for (i = 0; i < scp->nsects; ++i) {
548 		sectionp = (const void *)
549 		    ((const char *)sections + section_size * i);
550 		if (0 == strncmp(sectionp->s64.sectname, "__all_image_info",
551 		    sizeof(sectionp->s64.sectname))) {
552 			result->all_image_info_addr =
553 			    is64 ? sectionp->s64.addr : sectionp->s32.addr;
554 			result->all_image_info_addr += slide;
555 			result->all_image_info_size =
556 			    is64 ? sectionp->s64.size : sectionp->s32.size;
557 			return;
558 		}
559 	}
560 }
561 
562 #if __arm64__
563 /*
564  * Allow bypassing some security rules (hard pagezero, no write+execute)
565  * in exchange for better binary compatibility for legacy apps built
566  * before 16KB-alignment was enforced.
567  */
568 const int fourk_binary_compatibility_unsafe = TRUE;
569 #endif /* __arm64__ */
570 
571 #if XNU_TARGET_OS_OSX
572 
573 /* Determines whether this process may host/run third party plugins. */
574 static inline bool
process_is_plugin_host(struct image_params * imgp,load_result_t * result)575 process_is_plugin_host(struct image_params *imgp, load_result_t *result)
576 {
577 	if (imgp->ip_flags & IMGPF_NOJOP) {
578 		return false;
579 	}
580 
581 	if (!result->platform_binary) {
582 		return false;
583 	}
584 
585 	struct cs_blob *csblob = csvnode_get_blob(imgp->ip_vp, imgp->ip_arch_offset);
586 	const char *identity = csblob_get_identity(csblob);
587 	if (!identity) {
588 		return false;
589 	}
590 
591 	/* Check if override host plugin entitlement is present and posix spawn attribute to disable A keys is passed */
592 	if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, OVERRIDE_PLUGIN_HOST_ENTITLEMENT)) {
593 		bool ret = imgp->ip_flags & IMGPF_PLUGIN_HOST_DISABLE_A_KEYS;
594 		if (ret) {
595 			proc_t p = vfs_context_proc(imgp->ip_vfs_context);
596 			set_proc_name(imgp, p);
597 			os_log(OS_LOG_DEFAULT, "%s: running binary \"%s\" in keys-off mode due to posix_spawnattr_disable_ptr_auth_a_keys_np", __func__, p->p_name);
598 		}
599 		return ret;
600 	}
601 
602 	/* Disabling library validation is a good signal that this process plans to host plugins */
603 	const char *const disable_lv_entitlements[] = {
604 		"com.apple.security.cs.disable-library-validation",
605 		"com.apple.private.cs.automator-plugins",
606 		CLEAR_LV_ENTITLEMENT,
607 	};
608 	for (size_t i = 0; i < ARRAY_COUNT(disable_lv_entitlements); i++) {
609 		const char *entitlement = disable_lv_entitlements[i];
610 		if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, entitlement)) {
611 			proc_t p = vfs_context_proc(imgp->ip_vfs_context);
612 			set_proc_name(imgp, p);
613 			os_log(OS_LOG_DEFAULT, "%s: running binary \"%s\" in keys-off mode due to entitlement: %s", __func__, p->p_name, entitlement);
614 			return true;
615 		}
616 	}
617 
618 	/* From /System/Library/Security/HardeningExceptions.plist */
619 	const char *const hardening_exceptions[] = {
620 		"com.apple.perl5", /* Scripting engines may load third party code and jit*/
621 		"com.apple.perl", /* Scripting engines may load third party code and jit*/
622 		"org.python.python", /* Scripting engines may load third party code and jit*/
623 		"com.apple.expect", /* Scripting engines may load third party code and jit*/
624 		"com.tcltk.wish", /* Scripting engines may load third party code and jit*/
625 		"com.tcltk.tclsh", /* Scripting engines may load third party code and jit*/
626 		"com.apple.ruby", /* Scripting engines may load third party code and jit*/
627 		"com.apple.bash", /* Required for the 'enable' command */
628 		"com.apple.zsh", /* Required for the 'zmodload' command */
629 		"com.apple.ksh", /* Required for 'builtin' command */
630 		"com.apple.sh", /* rdar://138353488: sh re-execs into zsh or bash, which are exempted */
631 	};
632 	for (size_t i = 0; i < ARRAY_COUNT(hardening_exceptions); i++) {
633 		if (strncmp(hardening_exceptions[i], identity, strlen(hardening_exceptions[i])) == 0) {
634 			proc_t p = vfs_context_proc(imgp->ip_vfs_context);
635 			set_proc_name(imgp, p);
636 			os_log(OS_LOG_DEFAULT, "%s: running binary \"%s\" in keys-off mode due to identity: %s", __func__, p->p_name, identity);
637 			return true;
638 		}
639 	}
640 
641 	return false;
642 }
643 #endif /* XNU_TARGET_OS_OSX */
644 
645 static int
grade_binary_override(cpu_type_t __unused exectype,cpu_subtype_t __unused execsubtype,cpu_subtype_t execfeatures __unused,bool allow_simulator_binary __unused)646 grade_binary_override(cpu_type_t __unused exectype, cpu_subtype_t __unused execsubtype, cpu_subtype_t execfeatures __unused,
647     bool allow_simulator_binary __unused)
648 {
649 #if ALLOW_FORCING_ARM64_32
650 	if (force_arm64_32) {
651 		/* Forcing ARM64_32 takes precedence over 'bingrade' boot-arg. */
652 		if (exectype == CPU_TYPE_ARM64_32 && execsubtype == CPU_SUBTYPE_ARM64_32_V8) {
653 			return BINGRADE_OVERRIDE_MAX;
654 		} else {
655 			/* Stop trying to match. */
656 			return 0;
657 		}
658 	}
659 #endif /* ALLOW_FORCING_ARM64_32 */
660 
661 #if DEVELOPMENT || DEBUG
662 	if (num_bingrade_overrides == -1) {
663 		num_bingrade_overrides = parse_bingrade_override_bootarg(bingrade_overrides, MAX_BINGRADE_OVERRIDES, NULL);
664 	}
665 
666 	if (num_bingrade_overrides == 0) {
667 		return -1;
668 	}
669 
670 	for (int i = 0; i < num_bingrade_overrides; i++) {
671 		if (bingrade_overrides[i].cputype == exectype && bingrade_overrides[i].cpusubtype == execsubtype &&
672 		    (bingrade_overrides[i].execfeatures == EXECFEATURES_OVERRIDE_WILDCARD ||
673 		    bingrade_overrides[i].execfeatures == execfeatures)) {
674 			return BINGRADE_OVERRIDE_MAX - i;
675 		}
676 	}
677 #endif /* DEVELOPMENT || DEBUG */
678 	/* exectype/execsubtype Not found in override list */
679 	return -1;
680 }
681 
682 load_return_t
load_machfile(struct image_params * imgp,struct mach_header * header,thread_t thread,vm_map_t * mapp,load_result_t * result)683 load_machfile(
684 	struct image_params     *imgp,
685 	struct mach_header      *header,
686 	thread_t                thread,
687 	vm_map_t                *mapp,
688 	load_result_t           *result
689 	)
690 {
691 	struct vnode            *vp = imgp->ip_vp;
692 	off_t                   file_offset = imgp->ip_arch_offset;
693 	off_t                   macho_size = imgp->ip_arch_size;
694 	off_t                   total_size = 0;
695 	off_t                   file_size = imgp->ip_vattr->va_data_size;
696 	pmap_t                  pmap = 0;       /* protected by create_map */
697 	vm_map_t                map;
698 	load_result_t           myresult;
699 	load_return_t           lret;
700 	boolean_t enforce_hard_pagezero = TRUE;
701 	int in_exec = (imgp->ip_flags & IMGPF_EXEC);
702 	task_t task = current_task();
703 	int64_t                 aslr_page_offset = 0;
704 	int64_t                 dyld_aslr_page_offset = 0;
705 	int64_t                 aslr_section_size = 0;
706 	int64_t                 aslr_section_offset = 0;
707 	kern_return_t           kret;
708 	unsigned int            pmap_flags = 0;
709 
710 	if (os_add_overflow(file_offset, macho_size, &total_size) ||
711 	    total_size > file_size) {
712 		return LOAD_BADMACHO;
713 	}
714 
715 	result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
716 	result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
717 #if defined(HAS_APPLE_PAC)
718 	pmap_flags |= (imgp->ip_flags & IMGPF_NOJOP) ? PMAP_CREATE_DISABLE_JOP : 0;
719 #endif /* defined(HAS_APPLE_PAC) */
720 #if CONFIG_ROSETTA
721 	pmap_flags |= (imgp->ip_flags & IMGPF_ROSETTA) ? PMAP_CREATE_ROSETTA : 0;
722 #endif
723 	pmap_flags |= result->is_64bit_addr ? PMAP_CREATE_64BIT : 0;
724 
725 	task_t ledger_task;
726 	if (imgp->ip_new_thread) {
727 		ledger_task = get_threadtask(imgp->ip_new_thread);
728 	} else {
729 		ledger_task = task;
730 	}
731 
732 #if XNU_TARGET_OS_OSX && _POSIX_SPAWN_FORCE_4K_PAGES && PMAP_CREATE_FORCE_4K_PAGES
733 	if (imgp->ip_px_sa != NULL) {
734 		struct _posix_spawnattr* psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
735 		if (psa->psa_flags & _POSIX_SPAWN_FORCE_4K_PAGES) {
736 			pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES;
737 		}
738 	}
739 #endif /* XNU_TARGET_OS_OSX && _POSIX_SPAWN_FORCE_4K_PAGES && PMAP_CREATE_FORCE_4K_PAGE */
740 
741 	pmap = pmap_create_options(get_task_ledger(ledger_task),
742 	    (vm_map_size_t) 0,
743 	    pmap_flags);
744 	if (pmap == NULL) {
745 		return LOAD_RESOURCE;
746 	}
747 	map = vm_map_create_options(pmap, 0,
748 	    vm_compute_max_offset(result->is_64bit_addr),
749 	    VM_MAP_CREATE_PAGEABLE);
750 
751 #if defined(__arm64__)
752 	if (result->is_64bit_addr) {
753 		/* enforce 16KB alignment of VM map entries */
754 		vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
755 	} else {
756 		vm_map_set_page_shift(map, page_shift_user32);
757 	}
758 #endif /* __arm64__ */
759 
760 #if PMAP_CREATE_FORCE_4K_PAGES
761 	if (pmap_flags & PMAP_CREATE_FORCE_4K_PAGES) {
762 		DEBUG4K_LIFE("***** launching '%s' as 4k *****\n", vp->v_name);
763 		vm_map_set_page_shift(map, FOURK_PAGE_SHIFT);
764 	}
765 #endif /* PMAP_CREATE_FORCE_4K_PAGES */
766 
767 #ifndef CONFIG_ENFORCE_SIGNED_CODE
768 	/* This turns off faulting for executable pages, which allows
769 	 * to circumvent Code Signing Enforcement. The per process
770 	 * flag (CS_ENFORCEMENT) is not set yet, but we can use the
771 	 * global flag.
772 	 */
773 	if (!cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION)) {
774 		vm_map_disable_NX(map);
775 		// TODO: Message Trace or log that this is happening
776 	}
777 #endif
778 
779 	/* Forcibly disallow execution from data pages on even if the arch
780 	 * normally permits it. */
781 	if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC)) {
782 		vm_map_disallow_data_exec(map);
783 	}
784 
785 	/*
786 	 * Compute a random offset for ASLR, and an independent random offset for dyld.
787 	 */
788 	if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
789 		vm_map_get_max_aslr_slide_section(map, &aslr_section_offset, &aslr_section_size);
790 		aslr_section_offset = (random() % aslr_section_offset) * aslr_section_size;
791 
792 		aslr_page_offset = random();
793 		aslr_page_offset = (aslr_page_offset % (vm_map_get_max_aslr_slide_pages(map) - 1)) + 1;
794 		aslr_page_offset <<= vm_map_page_shift(map);
795 
796 		dyld_aslr_page_offset = random();
797 		dyld_aslr_page_offset = (dyld_aslr_page_offset %
798 		    (vm_map_get_max_loader_aslr_slide_pages(map) - 1)) + 1;
799 		dyld_aslr_page_offset <<= vm_map_page_shift(map);
800 
801 		aslr_page_offset += aslr_section_offset;
802 	}
803 	if (vm_map_page_shift(map) < (int)PAGE_SHIFT) {
804 		DEBUG4K_LOAD("slide=0x%llx dyld_slide=0x%llx\n", aslr_page_offset, dyld_aslr_page_offset);
805 	}
806 
807 	if (!result) {
808 		result = &myresult;
809 	}
810 
811 	*result = load_result_null;
812 
813 	/*
814 	 * re-set the bitness on the load result since we cleared the load result above.
815 	 */
816 	result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
817 	result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
818 
819 	lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
820 	    0, aslr_page_offset, dyld_aslr_page_offset, result,
821 	    NULL, imgp);
822 
823 	if (lret != LOAD_SUCCESS) {
824 		imgp->ip_free_map = map;
825 		return lret;
826 	}
827 
828 	/*
829 	 * From now on it's safe to query entitlements via the vnode interface. Let's get figuring
830 	 * out whether we're a security relevant binary out of the way immediately.
831 	 */
832 	switch (exec_check_security_entitlement(imgp, HARDENED_PROCESS)) {
833 	case EXEC_SECURITY_INVALID_CONFIG:
834 		imgp->ip_free_map = map;
835 		return LOAD_BADMACHO;
836 	case EXEC_SECURITY_ENTITLED:
837 		result->is_hardened_process = true;
838 		break;
839 	case EXEC_SECURITY_NOT_ENTITLED:
840 		result->is_hardened_process = false;
841 		break;
842 	}
843 
844 #if __x86_64__
845 	/*
846 	 * On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries.
847 	 */
848 	if (!result->is_64bit_addr) {
849 		enforce_hard_pagezero = FALSE;
850 	}
851 
852 	/*
853 	 * For processes with IMGPF_HIGH_BITS_ASLR, add a few random high bits
854 	 * to the start address for "anywhere" memory allocations.
855 	 */
856 #define VM_MAP_HIGH_START_BITS_COUNT 8
857 #define VM_MAP_HIGH_START_BITS_SHIFT 27
858 	if (result->is_64bit_addr &&
859 	    (imgp->ip_flags & IMGPF_HIGH_BITS_ASLR)) {
860 		int random_bits;
861 		vm_map_offset_t high_start;
862 
863 		random_bits = random();
864 		random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT) - 1;
865 		high_start = (((vm_map_offset_t)random_bits)
866 		        << VM_MAP_HIGH_START_BITS_SHIFT);
867 		vm_map_set_high_start(map, high_start);
868 	}
869 #endif /* __x86_64__ */
870 
871 	/*
872 	 * Check to see if the page zero is enforced by the map->min_offset.
873 	 */
874 	if (enforce_hard_pagezero &&
875 	    (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) {
876 #if __arm64__
877 		if (
878 			!result->is_64bit_addr && /* not 64-bit address space */
879 			!(header->flags & MH_PIE) &&      /* not PIE */
880 			(vm_map_page_shift(map) != FOURK_PAGE_SHIFT ||
881 			PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */
882 			result->has_pagezero && /* has a "soft" page zero */
883 			fourk_binary_compatibility_unsafe) {
884 			/*
885 			 * For backwards compatibility of "4K" apps on
886 			 * a 16K system, do not enforce a hard page zero...
887 			 */
888 		} else
889 #endif /* __arm64__ */
890 		{
891 			imgp->ip_free_map = map;
892 			return LOAD_BADMACHO;
893 		}
894 	}
895 
896 #if __arm64__
897 	if (enforce_hard_pagezero && result->is_64bit_addr && (header->cputype == CPU_TYPE_ARM64)) {
898 		/* 64 bit ARM binary must have "hard page zero" of 4GB to cover the lower 32 bit address space */
899 		if (vm_map_has_hard_pagezero(map, 0x100000000) == FALSE) {
900 			imgp->ip_free_map = map;
901 			return LOAD_BADMACHO;
902 		}
903 	}
904 #endif
905 
906 	vm_commit_pagezero_status(map);
907 
908 	/*
909 	 * If this is an exec, then we are going to destroy the old
910 	 * task, and it's correct to halt it; if it's spawn, the
911 	 * task is not yet running, and it makes no sense.
912 	 */
913 	if (in_exec) {
914 		proc_t p = current_proc();
915 		/*
916 		 * Mark the task as halting and start the other
917 		 * threads towards terminating themselves.  Then
918 		 * make sure any threads waiting for a process
919 		 * transition get informed that we are committed to
920 		 * this transition, and then finally complete the
921 		 * task halting (wait for threads and then cleanup
922 		 * task resources).
923 		 *
924 		 * NOTE: task_start_halt() makes sure that no new
925 		 * threads are created in the task during the transition.
926 		 * We need to mark the workqueue as exiting before we
927 		 * wait for threads to terminate (at the end of which
928 		 * we no longer have a prohibition on thread creation).
929 		 *
930 		 * Finally, clean up any lingering workqueue data structures
931 		 * that may have been left behind by the workqueue threads
932 		 * as they exited (and then clean up the work queue itself).
933 		 */
934 		kret = task_start_halt(task);
935 		if (kret != KERN_SUCCESS) {
936 			imgp->ip_free_map = map;
937 			return LOAD_FAILURE;
938 		}
939 		proc_transcommit(p, 0);
940 		workq_mark_exiting(p);
941 		task_complete_halt(task);
942 		workq_exit(p);
943 
944 		/*
945 		 * Roll up accounting info to new task. The roll up is done after
946 		 * task_complete_halt to make sure the thread accounting info is
947 		 * rolled up to current_task.
948 		 */
949 		task_rollup_accounting_info(get_threadtask(thread), task);
950 	}
951 	*mapp = map;
952 
953 #if XNU_TARGET_OS_OSX
954 	if (process_is_plugin_host(imgp, result)) {
955 		/*
956 		 * We need to disable security policies for processes
957 		 * that run third party plugins.
958 		 */
959 		imgp->ip_flags |= IMGPF_3P_PLUGINS;
960 	}
961 
962 #if __has_feature(ptrauth_calls)
963 	/*
964 	 * arm64e plugin hosts currently run with JOP keys disabled, since they
965 	 * may need to run arm64 plugins.
966 	 */
967 	if (imgp->ip_flags & IMGPF_3P_PLUGINS) {
968 		imgp->ip_flags |= IMGPF_NOJOP;
969 		pmap_disable_user_jop(pmap);
970 	}
971 
972 #if CONFIG_ROSETTA
973 	/* Disable JOP keys if the Rosetta runtime being used isn't arm64e */
974 	if (result->is_rosetta && (imgp->ip_flags & IMGPF_NOJOP)) {
975 		pmap_disable_user_jop(pmap);
976 	}
977 #endif /* CONFIG_ROSETTA */
978 #endif /* __has_feature(ptrauth_calls)*/
979 #endif /* XNU_TARGET_OS_OSX */
980 
981 
982 	return LOAD_SUCCESS;
983 }
984 
985 int macho_printf = 0;
986 #define MACHO_PRINTF(args)                              \
987 	do {                                            \
988 	        if (macho_printf) {                     \
989 	                printf args;                    \
990 	        }                                       \
991 	} while (0)
992 
993 
994 static boolean_t
pie_required(cpu_type_t exectype,cpu_subtype_t execsubtype)995 pie_required(
996 	cpu_type_t exectype,
997 	cpu_subtype_t execsubtype)
998 {
999 	switch (exectype) {
1000 	case CPU_TYPE_X86_64:
1001 		return FALSE;
1002 	case CPU_TYPE_ARM64:
1003 		return TRUE;
1004 	case CPU_TYPE_ARM:
1005 		switch (execsubtype) {
1006 		case CPU_SUBTYPE_ARM_V7K:
1007 			return TRUE;
1008 		}
1009 		break;
1010 	}
1011 	return FALSE;
1012 }
1013 
1014 /*
1015  * Grades the specified CPU type, CPU subtype, CPU features to determine an absolute weight, used in the determination
1016  * of running the associated binary on this machine.
1017  *
1018  * If an override boot-arg is specified, the boot-arg is parsed and its values are stored for later use in overriding
1019  * the system's hard-coded binary grading values.
1020  */
1021 int
grade_binary(cpu_type_t exectype,cpu_subtype_t execsubtype,cpu_subtype_t execfeatures,bool allow_simulator_binary)1022 grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype, cpu_subtype_t execfeatures, bool allow_simulator_binary)
1023 {
1024 	extern int ml_grade_binary(cpu_type_t, cpu_subtype_t, cpu_subtype_t, bool);
1025 
1026 	int binary_grade;
1027 
1028 	if ((binary_grade = grade_binary_override(exectype, execsubtype, execfeatures, allow_simulator_binary)) < 0) {
1029 		return ml_grade_binary(exectype, execsubtype, execfeatures, allow_simulator_binary);
1030 	}
1031 
1032 	return binary_grade;
1033 }
1034 
1035 /*
1036  * The file size of a mach-o file is limited to 32 bits; this is because
1037  * this is the limit on the kalloc() of enough bytes for a mach_header and
1038  * the contents of its sizeofcmds, which is currently constrained to 32
1039  * bits in the file format itself.  We read into the kernel buffer the
1040  * commands section, and then parse it in order to parse the mach-o file
1041  * format load_command segment(s).  We are only interested in a subset of
1042  * the total set of possible commands. If "map"==VM_MAP_NULL or
1043  * "thread"==THREAD_NULL, do not make permament VM modifications,
1044  * just preflight the parse.
1045  */
1046 static
1047 load_return_t
parse_machfile(struct vnode * vp,vm_map_t map,thread_t thread,struct mach_header * header,off_t file_offset,off_t macho_size,int depth,int64_t aslr_offset,int64_t dyld_aslr_offset,load_result_t * result,load_result_t * binresult,struct image_params * imgp)1048 parse_machfile(
1049 	struct vnode            *vp,
1050 	vm_map_t                map,
1051 	thread_t                thread,
1052 	struct mach_header      *header,
1053 	off_t                   file_offset,
1054 	off_t                   macho_size,
1055 	int                     depth,
1056 	int64_t                 aslr_offset,
1057 	int64_t                 dyld_aslr_offset,
1058 	load_result_t           *result,
1059 	load_result_t           *binresult,
1060 	struct image_params     *imgp
1061 	)
1062 {
1063 	uint32_t                ncmds;
1064 	struct load_command     *lcp;
1065 	struct dylinker_command *dlp = 0;
1066 	void *                  control;
1067 	load_return_t           ret = LOAD_SUCCESS;
1068 	void *                  addr;
1069 	vm_size_t               alloc_size, cmds_size;
1070 	size_t                  offset;
1071 	size_t                  oldoffset;      /* for overflow check */
1072 	int                     pass;
1073 	proc_t                  p = vfs_context_proc(imgp->ip_vfs_context);
1074 	int                     error;
1075 	int                     resid = 0;
1076 	int                     spawn = (imgp->ip_flags & IMGPF_SPAWN);
1077 	size_t                  mach_header_sz = sizeof(struct mach_header);
1078 	boolean_t               abi64;
1079 	boolean_t               got_code_signatures = FALSE;
1080 	boolean_t               found_header_segment = FALSE;
1081 	boolean_t               found_xhdr = FALSE;
1082 	boolean_t               found_version_cmd = FALSE;
1083 	int64_t                 slide = 0;
1084 	boolean_t               dyld_no_load_addr = FALSE;
1085 	boolean_t               is_dyld = FALSE;
1086 	vm_map_offset_t         effective_page_mask = PAGE_MASK;
1087 #if __arm64__
1088 	uint64_t                pagezero_end = 0;
1089 	uint64_t                executable_end = 0;
1090 	uint64_t                writable_start = 0;
1091 	vm_map_size_t           effective_page_size;
1092 
1093 	effective_page_mask = vm_map_page_mask(map);
1094 	effective_page_size = vm_map_page_size(map);
1095 #endif /* __arm64__ */
1096 
1097 	if (header->magic == MH_MAGIC_64 ||
1098 	    header->magic == MH_CIGAM_64) {
1099 		mach_header_sz = sizeof(struct mach_header_64);
1100 	}
1101 
1102 	/*
1103 	 *	Break infinite recursion
1104 	 */
1105 	if (depth > 2) {
1106 		return LOAD_FAILURE;
1107 	}
1108 
1109 	depth++;
1110 
1111 	/*
1112 	 * Set CS_NO_UNTRUSTED_HELPERS by default; load_dylinker and load_rosetta
1113 	 * will unset it if necessary.
1114 	 */
1115 	if (depth == 1) {
1116 		result->csflags |= CS_NO_UNTRUSTED_HELPERS;
1117 	}
1118 
1119 	/*
1120 	 *	Check to see if right machine type.
1121 	 */
1122 	if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
1123 	    ) {
1124 		return LOAD_BADARCH;
1125 	}
1126 
1127 	if (!grade_binary(header->cputype,
1128 	    header->cpusubtype & ~CPU_SUBTYPE_MASK,
1129 	    header->cpusubtype & CPU_SUBTYPE_MASK, TRUE)) {
1130 		return LOAD_BADARCH;
1131 	}
1132 
1133 	abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
1134 
1135 	switch (header->filetype) {
1136 	case MH_EXECUTE:
1137 		if (depth != 1 && depth != 3) {
1138 			return LOAD_FAILURE;
1139 		}
1140 		if (header->flags & MH_DYLDLINK) {
1141 			/* Check properties of dynamic executables */
1142 			if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) {
1143 				return LOAD_FAILURE;
1144 			}
1145 			result->needs_dynlinker = TRUE;
1146 		} else if (header->cputype == CPU_TYPE_X86_64) {
1147 			/* x86_64 static binaries allowed */
1148 #if CONFIG_ROSETTA
1149 		} else if (imgp->ip_flags & IMGPF_ROSETTA) {
1150 			/* Rosetta runtime allowed */
1151 #endif /* CONFIG_X86_64_COMPAT */
1152 		} else {
1153 			/* Check properties of static executables (disallowed except for development) */
1154 #if !(DEVELOPMENT || DEBUG)
1155 			return LOAD_FAILURE;
1156 #endif
1157 		}
1158 		break;
1159 	case MH_DYLINKER:
1160 		if (depth != 2) {
1161 			return LOAD_FAILURE;
1162 		}
1163 		is_dyld = TRUE;
1164 		break;
1165 
1166 	default:
1167 		return LOAD_FAILURE;
1168 	}
1169 
1170 	/*
1171 	 *	For PIE and dyld, slide everything by the ASLR offset.
1172 	 */
1173 	if ((header->flags & MH_PIE) || is_dyld) {
1174 		slide = aslr_offset;
1175 	}
1176 
1177 	/*
1178 	 *	Get the pager for the file.
1179 	 */
1180 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
1181 
1182 	/* ensure header + sizeofcmds falls within the file */
1183 	if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
1184 	    (off_t)cmds_size > macho_size ||
1185 	    round_page_overflow(cmds_size, &alloc_size) ||
1186 	    alloc_size > INT_MAX) {
1187 		return LOAD_BADMACHO;
1188 	}
1189 
1190 	/*
1191 	 * Map the load commands into kernel memory.
1192 	 */
1193 	addr = kalloc_data(alloc_size, Z_WAITOK);
1194 	if (addr == NULL) {
1195 		return LOAD_NOSPACE;
1196 	}
1197 
1198 	error = vn_rdwr(UIO_READ, vp, addr, (int)alloc_size, file_offset,
1199 	    UIO_SYSSPACE, 0, vfs_context_ucred(imgp->ip_vfs_context), &resid, p);
1200 	if (error) {
1201 		kfree_data(addr, alloc_size);
1202 		return LOAD_IOERROR;
1203 	}
1204 
1205 	if (resid) {
1206 		{
1207 			/* We must be able to read in as much as the mach_header indicated */
1208 			kfree_data(addr, alloc_size);
1209 			return LOAD_BADMACHO;
1210 		}
1211 	}
1212 
1213 	/*
1214 	 *  Scan through the commands, processing each one as necessary.
1215 	 *  We parse in three passes through the headers:
1216 	 *  0: determine if TEXT and DATA boundary can be page-aligned, load platform version
1217 	 *  1: thread state, uuid, code signature
1218 	 *  2: segments
1219 	 *  3: dyld, encryption, check entry point
1220 	 */
1221 
1222 	boolean_t slide_realign = FALSE;
1223 #if __arm64__
1224 	if (!abi64) {
1225 		slide_realign = TRUE;
1226 	}
1227 #endif
1228 
1229 	for (pass = 0; pass <= 3; pass++) {
1230 		if (pass == 1) {
1231 #if __arm64__
1232 			boolean_t       is_pie;
1233 			int64_t         adjust;
1234 
1235 			is_pie = ((header->flags & MH_PIE) != 0);
1236 			if (pagezero_end != 0 &&
1237 			    pagezero_end < effective_page_size) {
1238 				/* need at least 1 page for PAGEZERO */
1239 				adjust = effective_page_size;
1240 				MACHO_PRINTF(("pagezero boundary at "
1241 				    "0x%llx; adjust slide from "
1242 				    "0x%llx to 0x%llx%s\n",
1243 				    (uint64_t) pagezero_end,
1244 				    slide,
1245 				    slide + adjust,
1246 				    (is_pie
1247 				    ? ""
1248 				    : " BUT NO PIE ****** :-(")));
1249 				if (is_pie) {
1250 					slide += adjust;
1251 					pagezero_end += adjust;
1252 					executable_end += adjust;
1253 					writable_start += adjust;
1254 				}
1255 			}
1256 			if (pagezero_end != 0) {
1257 				result->has_pagezero = TRUE;
1258 			}
1259 			if (executable_end == writable_start &&
1260 			    (executable_end & effective_page_mask) != 0 &&
1261 			    (executable_end & FOURK_PAGE_MASK) == 0) {
1262 				/*
1263 				 * The TEXT/DATA boundary is 4K-aligned but
1264 				 * not page-aligned.  Adjust the slide to make
1265 				 * it page-aligned and avoid having a page
1266 				 * with both write and execute permissions.
1267 				 */
1268 				adjust =
1269 				    (effective_page_size -
1270 				    (executable_end & effective_page_mask));
1271 				MACHO_PRINTF(("page-unaligned X-W boundary at "
1272 				    "0x%llx; adjust slide from "
1273 				    "0x%llx to 0x%llx%s\n",
1274 				    (uint64_t) executable_end,
1275 				    slide,
1276 				    slide + adjust,
1277 				    (is_pie
1278 				    ? ""
1279 				    : " BUT NO PIE ****** :-(")));
1280 				if (is_pie) {
1281 					slide += adjust;
1282 				}
1283 			}
1284 #endif /* __arm64__ */
1285 
1286 			if (dyld_no_load_addr && binresult) {
1287 				/*
1288 				 * The dyld Mach-O does not specify a load address. Try to locate
1289 				 * it right after the main binary. If binresult == NULL, load
1290 				 * directly to the given slide.
1291 				 */
1292 				mach_vm_address_t max_vm_addr = binresult->max_vm_addr;
1293 				slide = vm_map_round_page(slide + max_vm_addr, effective_page_mask);
1294 			}
1295 		}
1296 
1297 		/*
1298 		 * Check that the entry point is contained in an executable segment
1299 		 */
1300 		if ((pass == 3) && (thread != THREAD_NULL)) {
1301 			if (depth == 1 && imgp && (imgp->ip_flags & IMGPF_DRIVER)) {
1302 				/* Driver binaries must have driverkit platform */
1303 				if (result->ip_platform == PLATFORM_DRIVERKIT) {
1304 					/* Driver binaries have no entry point */
1305 					ret = setup_driver_main(thread, slide, result);
1306 				} else {
1307 					ret = LOAD_FAILURE;
1308 				}
1309 			} else if (!result->using_lcmain && result->validentry == 0) {
1310 				ret = LOAD_FAILURE;
1311 			}
1312 			if (ret != KERN_SUCCESS) {
1313 				thread_state_initialize(thread);
1314 				break;
1315 			}
1316 		}
1317 
1318 		/*
1319 		 * Check that some segment maps the start of the mach-o file, which is
1320 		 * needed by the dynamic loader to read the mach headers, etc.
1321 		 */
1322 		if ((pass == 3) && (found_header_segment == FALSE)) {
1323 			ret = LOAD_BADMACHO;
1324 			break;
1325 		}
1326 
1327 		/*
1328 		 * Loop through each of the load_commands indicated by the
1329 		 * Mach-O header; if an absurd value is provided, we just
1330 		 * run off the end of the reserved section by incrementing
1331 		 * the offset too far, so we are implicitly fail-safe.
1332 		 */
1333 		offset = mach_header_sz;
1334 		ncmds = header->ncmds;
1335 
1336 		while (ncmds--) {
1337 			/* ensure enough space for a minimal load command */
1338 			if (offset + sizeof(struct load_command) > cmds_size) {
1339 				ret = LOAD_BADMACHO;
1340 				break;
1341 			}
1342 
1343 			/*
1344 			 *	Get a pointer to the command.
1345 			 */
1346 			lcp = (struct load_command *)((uintptr_t)addr + offset);
1347 			oldoffset = offset;
1348 
1349 			/*
1350 			 * Perform prevalidation of the struct load_command
1351 			 * before we attempt to use its contents.  Invalid
1352 			 * values are ones which result in an overflow, or
1353 			 * which can not possibly be valid commands, or which
1354 			 * straddle or exist past the reserved section at the
1355 			 * start of the image.
1356 			 */
1357 			if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
1358 			    lcp->cmdsize < sizeof(struct load_command) ||
1359 			    offset > cmds_size) {
1360 				ret = LOAD_BADMACHO;
1361 				break;
1362 			}
1363 
1364 			/*
1365 			 * Act on struct load_command's for which kernel
1366 			 * intervention is required.
1367 			 * Note that each load command implementation is expected to validate
1368 			 * that lcp->cmdsize is large enough to fit its specific struct type
1369 			 * before dereferencing fields not covered by struct load_command.
1370 			 */
1371 			switch (lcp->cmd) {
1372 			case LC_SEGMENT: {
1373 				struct segment_command *scp = (struct segment_command *) lcp;
1374 				if (scp->cmdsize < sizeof(*scp)) {
1375 					ret = LOAD_BADMACHO;
1376 					break;
1377 				}
1378 				if (pass == 0) {
1379 					if (is_dyld && scp->vmaddr == 0 && scp->fileoff == 0) {
1380 						dyld_no_load_addr = TRUE;
1381 						if (!slide_realign) {
1382 							/* got what we need, bail early on pass 0 */
1383 							continue;
1384 						}
1385 					}
1386 
1387 #if __arm64__
1388 					assert(!abi64);
1389 
1390 					if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) {
1391 						/* PAGEZERO */
1392 						if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end) || pagezero_end > UINT32_MAX) {
1393 							ret = LOAD_BADMACHO;
1394 							break;
1395 						}
1396 					}
1397 					if (scp->initprot & VM_PROT_EXECUTE) {
1398 						/* TEXT */
1399 						if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end) || executable_end > UINT32_MAX) {
1400 							ret = LOAD_BADMACHO;
1401 							break;
1402 						}
1403 					}
1404 					if (scp->initprot & VM_PROT_WRITE) {
1405 						/* DATA */
1406 						if (os_add_overflow(scp->vmaddr, slide, &writable_start) || writable_start > UINT32_MAX) {
1407 							ret = LOAD_BADMACHO;
1408 							break;
1409 						}
1410 					}
1411 #endif /* __arm64__ */
1412 					break;
1413 				}
1414 
1415 				if (pass == 1 && !strncmp(scp->segname, "__XHDR", sizeof(scp->segname))) {
1416 					found_xhdr = TRUE;
1417 				}
1418 
1419 				if (pass != 2) {
1420 					break;
1421 				}
1422 
1423 				if (abi64) {
1424 					/*
1425 					 * Having an LC_SEGMENT command for the
1426 					 * wrong ABI is invalid <rdar://problem/11021230>
1427 					 */
1428 					ret = LOAD_BADMACHO;
1429 					break;
1430 				}
1431 
1432 				ret = load_segment(lcp,
1433 				    header->filetype,
1434 				    control,
1435 				    file_offset,
1436 				    macho_size,
1437 				    vp,
1438 				    map,
1439 				    slide,
1440 				    result,
1441 				    imgp);
1442 				if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) {
1443 					/* Enforce a single segment mapping offset zero, with R+X
1444 					 * protection. */
1445 					if (found_header_segment ||
1446 					    ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) {
1447 						ret = LOAD_BADMACHO;
1448 						break;
1449 					}
1450 					found_header_segment = TRUE;
1451 				}
1452 
1453 				break;
1454 			}
1455 			case LC_SEGMENT_64: {
1456 				struct segment_command_64 *scp64 = (struct segment_command_64 *) lcp;
1457 				if (scp64->cmdsize < sizeof(*scp64)) {
1458 					ret = LOAD_BADMACHO;
1459 					break;
1460 				}
1461 				if (pass == 0) {
1462 					if (is_dyld && scp64->vmaddr == 0 && scp64->fileoff == 0) {
1463 						dyld_no_load_addr = TRUE;
1464 					}
1465 					/* got what we need, bail early on pass 0 */
1466 					continue;
1467 				}
1468 
1469 				if (pass == 1 && !strncmp(scp64->segname, "__XHDR", sizeof(scp64->segname))) {
1470 					found_xhdr = TRUE;
1471 				}
1472 
1473 				if (pass != 2) {
1474 					break;
1475 				}
1476 
1477 				if (!abi64) {
1478 					/*
1479 					 * Having an LC_SEGMENT_64 command for the
1480 					 * wrong ABI is invalid <rdar://problem/11021230>
1481 					 */
1482 					ret = LOAD_BADMACHO;
1483 					break;
1484 				}
1485 
1486 				ret = load_segment(lcp,
1487 				    header->filetype,
1488 				    control,
1489 				    file_offset,
1490 				    macho_size,
1491 				    vp,
1492 				    map,
1493 				    slide,
1494 				    result,
1495 				    imgp);
1496 
1497 				if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) {
1498 					/* Enforce a single segment mapping offset zero, with R+X
1499 					 * protection. */
1500 					if (found_header_segment ||
1501 					    ((scp64->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) {
1502 						ret = LOAD_BADMACHO;
1503 						break;
1504 					}
1505 					found_header_segment = TRUE;
1506 				}
1507 
1508 				break;
1509 			}
1510 			case LC_UNIXTHREAD: {
1511 				boolean_t is_x86_64_compat_binary = FALSE;
1512 				if (pass != 1) {
1513 					break;
1514 				}
1515 #if CONFIG_ROSETTA
1516 				if (depth == 2 && (imgp->ip_flags & IMGPF_ROSETTA)) {
1517 					// Ignore dyld, Rosetta will parse it's load commands to get the
1518 					// entry point.
1519 					result->validentry = 1;
1520 					break;
1521 				}
1522 #endif
1523 				ret = load_unixthread(
1524 					(struct thread_command *) lcp,
1525 					thread,
1526 					slide,
1527 					is_x86_64_compat_binary,
1528 					result);
1529 				break;
1530 			}
1531 			case LC_MAIN:
1532 				if (pass != 1) {
1533 					break;
1534 				}
1535 				if (depth != 1) {
1536 					break;
1537 				}
1538 				ret = load_main(
1539 					(struct entry_point_command *) lcp,
1540 					thread,
1541 					slide,
1542 					result);
1543 				break;
1544 			case LC_LOAD_DYLINKER:
1545 				if (pass != 3) {
1546 					break;
1547 				}
1548 				if ((depth == 1) && (dlp == 0)) {
1549 					dlp = (struct dylinker_command *)lcp;
1550 				} else {
1551 					ret = LOAD_FAILURE;
1552 				}
1553 				break;
1554 			case LC_UUID:
1555 				if (pass == 1 && depth == 1) {
1556 					ret = load_uuid((struct uuid_command *) lcp,
1557 					    (char *)addr + cmds_size,
1558 					    result);
1559 				}
1560 				break;
1561 			case LC_CODE_SIGNATURE:
1562 				/* CODE SIGNING */
1563 				if (pass != 1) {
1564 					break;
1565 				}
1566 
1567 				/* pager -> uip ->
1568 				 *  load signatures & store in uip
1569 				 *  set VM object "signed_pages"
1570 				 */
1571 				ret = load_code_signature(
1572 					(struct linkedit_data_command *) lcp,
1573 					vp,
1574 					file_offset,
1575 					macho_size,
1576 					header->cputype,
1577 					header->cpusubtype,
1578 					result,
1579 					imgp);
1580 				if (ret != LOAD_SUCCESS) {
1581 					printf("proc %d: load code signature error %d "
1582 					    "for file \"%s\"\n",
1583 					    proc_getpid(p), ret, vp->v_name);
1584 					/*
1585 					 * Allow injections to be ignored on devices w/o enforcement enabled
1586 					 */
1587 					if (!cs_process_global_enforcement()) {
1588 						ret = LOAD_SUCCESS; /* ignore error */
1589 					}
1590 				} else {
1591 					got_code_signatures = TRUE;
1592 				}
1593 
1594 				if (got_code_signatures) {
1595 					unsigned tainted = CS_VALIDATE_TAINTED;
1596 					boolean_t valid = FALSE;
1597 					vm_size_t off = 0;
1598 
1599 
1600 					if (cs_debug > 10) {
1601 						printf("validating initial pages of %s\n", vp->v_name);
1602 					}
1603 
1604 					while (off < alloc_size && ret == LOAD_SUCCESS) {
1605 						tainted = CS_VALIDATE_TAINTED;
1606 
1607 						valid = cs_validate_range(vp,
1608 						    NULL,
1609 						    file_offset + off,
1610 						    (const void *)((uintptr_t)addr + off),
1611 						    MIN(PAGE_SIZE, cmds_size),
1612 						    &tainted);
1613 						if (!valid || (tainted & CS_VALIDATE_TAINTED)) {
1614 							if (cs_debug) {
1615 								printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n",
1616 								    vp->v_name, proc_getpid(p), (long long)(file_offset + off), valid, tainted, result->csflags);
1617 							}
1618 							if (cs_process_global_enforcement() ||
1619 							    (result->csflags & (CS_HARD | CS_KILL | CS_ENFORCEMENT))) {
1620 								ret = LOAD_FAILURE;
1621 							}
1622 							result->csflags &= ~CS_VALID;
1623 						}
1624 						off += PAGE_SIZE;
1625 					}
1626 				}
1627 
1628 				break;
1629 #if CONFIG_CODE_DECRYPTION
1630 			case LC_ENCRYPTION_INFO:
1631 			case LC_ENCRYPTION_INFO_64:
1632 				if (pass != 3) {
1633 					break;
1634 				}
1635 				ret = set_code_unprotect(
1636 					(struct encryption_info_command *) lcp,
1637 					addr, map, slide, vp, file_offset,
1638 					header->cputype, header->cpusubtype);
1639 				if (ret != LOAD_SUCCESS) {
1640 					os_reason_t load_failure_reason = OS_REASON_NULL;
1641 					printf("proc %d: set_code_unprotect() error %d "
1642 					    "for file \"%s\"\n",
1643 					    proc_getpid(p), ret, vp->v_name);
1644 					/*
1645 					 * Don't let the app run if it's
1646 					 * encrypted but we failed to set up the
1647 					 * decrypter. If the keys are missing it will
1648 					 * return LOAD_DECRYPTFAIL.
1649 					 */
1650 					if (ret == LOAD_DECRYPTFAIL) {
1651 						/* failed to load due to missing FP keys */
1652 						proc_lock(p);
1653 						p->p_lflag |= P_LTERM_DECRYPTFAIL;
1654 						proc_unlock(p);
1655 
1656 						KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1657 						    proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT, 0, 0);
1658 						load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT);
1659 					} else {
1660 						KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1661 						    proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT, 0, 0);
1662 						load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT);
1663 					}
1664 
1665 					/*
1666 					 * Don't signal the process if it was forked and in a partially constructed
1667 					 * state as part of a spawn -- it will just be torn down when the exec fails.
1668 					 */
1669 					if (!spawn) {
1670 						assert(load_failure_reason != OS_REASON_NULL);
1671 						{
1672 							psignal_with_reason(current_proc(), SIGKILL, load_failure_reason);
1673 							load_failure_reason = OS_REASON_NULL;
1674 						}
1675 					} else {
1676 						os_reason_free(load_failure_reason);
1677 						load_failure_reason = OS_REASON_NULL;
1678 					}
1679 				}
1680 				break;
1681 #endif
1682 			case LC_VERSION_MIN_IPHONEOS:
1683 			case LC_VERSION_MIN_MACOSX:
1684 			case LC_VERSION_MIN_WATCHOS:
1685 			case LC_VERSION_MIN_TVOS: {
1686 				struct version_min_command *vmc;
1687 
1688 				if (depth != 1 || pass != 0) {
1689 					break;
1690 				}
1691 				vmc = (struct version_min_command *) lcp;
1692 				ret = load_version(vmc, &found_version_cmd, imgp, result);
1693 #if XNU_TARGET_OS_OSX
1694 				if (ret == LOAD_SUCCESS) {
1695 					if (result->ip_platform == PLATFORM_IOS) {
1696 						vm_map_mark_alien(map);
1697 					} else {
1698 						assert(!vm_map_is_alien(map));
1699 					}
1700 				}
1701 #endif /* XNU_TARGET_OS_OSX */
1702 				break;
1703 			}
1704 			case LC_BUILD_VERSION: {
1705 				if (depth != 1 || pass != 0) {
1706 					break;
1707 				}
1708 				struct build_version_command* bvc = (struct build_version_command*)lcp;
1709 				if (bvc->cmdsize < sizeof(*bvc)) {
1710 					ret = LOAD_BADMACHO;
1711 					break;
1712 				}
1713 				if (found_version_cmd == TRUE) {
1714 					ret = LOAD_BADMACHO;
1715 					break;
1716 				}
1717 				result->ip_platform = bvc->platform;
1718 				result->lr_sdk = bvc->sdk;
1719 				result->lr_min_sdk = bvc->minos;
1720 				found_version_cmd = TRUE;
1721 #if XNU_TARGET_OS_OSX
1722 				if (result->ip_platform == PLATFORM_IOS) {
1723 					vm_map_mark_alien(map);
1724 				} else {
1725 					assert(!vm_map_is_alien(map));
1726 				}
1727 #endif /* XNU_TARGET_OS_OSX */
1728 				break;
1729 			}
1730 			default:
1731 				/* Other commands are ignored by the kernel */
1732 				ret = LOAD_SUCCESS;
1733 				break;
1734 			}
1735 			if (ret != LOAD_SUCCESS) {
1736 				break;
1737 			}
1738 		}
1739 		if (ret != LOAD_SUCCESS) {
1740 			break;
1741 		}
1742 	}
1743 
1744 	if (ret == LOAD_SUCCESS) {
1745 		if (!got_code_signatures && cs_process_global_enforcement()) {
1746 			ret = LOAD_FAILURE;
1747 		}
1748 
1749 		/* Make sure if we need dyld, we got it */
1750 		if (result->needs_dynlinker && !dlp) {
1751 			ret = LOAD_FAILURE;
1752 		}
1753 
1754 		if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
1755 			/*
1756 			 * load the dylinker, and slide it by the independent DYLD ASLR
1757 			 * offset regardless of the PIE-ness of the main binary.
1758 			 */
1759 			ret = load_dylinker(dlp, header->cputype, map, thread, depth,
1760 			    dyld_aslr_offset, result, imgp);
1761 		}
1762 
1763 #if CONFIG_ROSETTA
1764 		if ((ret == LOAD_SUCCESS) && (depth == 1) && (imgp->ip_flags & IMGPF_ROSETTA)) {
1765 			ret = load_rosetta(map, thread, result, imgp);
1766 			if (ret == LOAD_SUCCESS) {
1767 				if (result->user_stack_alloc_size != 0) {
1768 					// If a stack allocation is required then add a 4gb gap after the main
1769 					// binary/dyld for the worst case static translation size.
1770 					mach_vm_size_t reserved_aot_size = 0x100000000;
1771 					vm_map_offset_t mask = vm_map_page_mask(map);
1772 
1773 					mach_vm_address_t vm_end;
1774 					if (dlp != 0) {
1775 						vm_end = vm_map_round_page(result->dynlinker_max_vm_addr, mask);
1776 					} else {
1777 						vm_end = vm_map_round_page(result->max_vm_addr, mask);
1778 					}
1779 
1780 					mach_vm_size_t user_stack_size = vm_map_round_page(result->user_stack_alloc_size, mask);
1781 					result->user_stack = vm_map_round_page(vm_end + user_stack_size + reserved_aot_size + slide, mask);
1782 				}
1783 			}
1784 		}
1785 #endif
1786 
1787 		if ((ret == LOAD_SUCCESS) && (depth == 1)) {
1788 			if (result->thread_count == 0) {
1789 				ret = LOAD_FAILURE;
1790 			}
1791 #if CONFIG_ENFORCE_SIGNED_CODE
1792 			if (!(result->csflags & CS_NO_UNTRUSTED_HELPERS)) {
1793 				ret = LOAD_FAILURE;
1794 			}
1795 #endif
1796 		}
1797 	}
1798 
1799 	if (ret == LOAD_BADMACHO && found_xhdr) {
1800 		ret = LOAD_BADMACHO_UPX;
1801 	}
1802 
1803 	kfree_data(addr, alloc_size);
1804 
1805 	return ret;
1806 }
1807 
1808 load_return_t
validate_potential_simulator_binary(cpu_type_t exectype __unused,struct image_params * imgp __unused,off_t file_offset __unused,off_t macho_size __unused)1809 validate_potential_simulator_binary(
1810 	cpu_type_t               exectype __unused,
1811 	struct image_params      *imgp __unused,
1812 	off_t                    file_offset __unused,
1813 	off_t                    macho_size __unused)
1814 {
1815 #if __x86_64__
1816 	/* Allow 32 bit exec only for simulator binaries */
1817 	if (bootarg_no32exec && imgp != NULL && exectype == CPU_TYPE_X86) {
1818 		if (imgp->ip_simulator_binary == IMGPF_SB_DEFAULT) {
1819 			boolean_t simulator_binary = check_if_simulator_binary(imgp, file_offset, macho_size);
1820 			imgp->ip_simulator_binary = simulator_binary ? IMGPF_SB_TRUE : IMGPF_SB_FALSE;
1821 		}
1822 
1823 		if (imgp->ip_simulator_binary != IMGPF_SB_TRUE) {
1824 			return LOAD_BADARCH;
1825 		}
1826 	}
1827 #endif
1828 	return LOAD_SUCCESS;
1829 }
1830 
1831 #if __x86_64__
1832 static boolean_t
check_if_simulator_binary(struct image_params * imgp,off_t file_offset,off_t macho_size)1833 check_if_simulator_binary(
1834 	struct image_params     *imgp,
1835 	off_t                   file_offset,
1836 	off_t                   macho_size)
1837 {
1838 	struct mach_header      *header;
1839 	char                    *ip_vdata = NULL;
1840 	kauth_cred_t            cred = NULL;
1841 	uint32_t                ncmds;
1842 	struct load_command     *lcp;
1843 	boolean_t               simulator_binary = FALSE;
1844 	void *                  addr = NULL;
1845 	vm_size_t               alloc_size, cmds_size;
1846 	size_t                  offset;
1847 	proc_t                  p = current_proc();             /* XXXX */
1848 	int                     error;
1849 	int                     resid = 0;
1850 	size_t                  mach_header_sz = sizeof(struct mach_header);
1851 
1852 
1853 	cred =  kauth_cred_proc_ref(p);
1854 
1855 	/* Allocate page to copyin mach header */
1856 	ip_vdata = kalloc_data(PAGE_SIZE, Z_WAITOK | Z_ZERO);
1857 	if (ip_vdata == NULL) {
1858 		goto bad;
1859 	}
1860 
1861 	/* Read the Mach-O header */
1862 	error = vn_rdwr(UIO_READ, imgp->ip_vp, ip_vdata,
1863 	    PAGE_SIZE, file_offset,
1864 	    UIO_SYSSPACE, (IO_UNIT | IO_NODELOCKED),
1865 	    cred, &resid, p);
1866 	if (error) {
1867 		goto bad;
1868 	}
1869 
1870 	header = (struct mach_header *)ip_vdata;
1871 
1872 	if (header->magic == MH_MAGIC_64 ||
1873 	    header->magic == MH_CIGAM_64) {
1874 		mach_header_sz = sizeof(struct mach_header_64);
1875 	}
1876 
1877 	/* ensure header + sizeofcmds falls within the file */
1878 	if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
1879 	    (off_t)cmds_size > macho_size ||
1880 	    round_page_overflow(cmds_size, &alloc_size) ||
1881 	    alloc_size > INT_MAX) {
1882 		goto bad;
1883 	}
1884 
1885 	/*
1886 	 * Map the load commands into kernel memory.
1887 	 */
1888 	addr = kalloc_data(alloc_size, Z_WAITOK);
1889 	if (addr == NULL) {
1890 		goto bad;
1891 	}
1892 
1893 	error = vn_rdwr(UIO_READ, imgp->ip_vp, addr, (int)alloc_size, file_offset,
1894 	    UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p);
1895 	if (error) {
1896 		goto bad;
1897 	}
1898 
1899 	if (resid) {
1900 		/* We must be able to read in as much as the mach_header indicated */
1901 		goto bad;
1902 	}
1903 
1904 	/*
1905 	 * Loop through each of the load_commands indicated by the
1906 	 * Mach-O header; if an absurd value is provided, we just
1907 	 * run off the end of the reserved section by incrementing
1908 	 * the offset too far, so we are implicitly fail-safe.
1909 	 */
1910 	offset = mach_header_sz;
1911 	ncmds = header->ncmds;
1912 
1913 	while (ncmds--) {
1914 		/* ensure enough space for a minimal load command */
1915 		if (offset + sizeof(struct load_command) > cmds_size) {
1916 			break;
1917 		}
1918 
1919 		/*
1920 		 *	Get a pointer to the command.
1921 		 */
1922 		lcp = (struct load_command *)((uintptr_t)addr + offset);
1923 
1924 		/*
1925 		 * Perform prevalidation of the struct load_command
1926 		 * before we attempt to use its contents.  Invalid
1927 		 * values are ones which result in an overflow, or
1928 		 * which can not possibly be valid commands, or which
1929 		 * straddle or exist past the reserved section at the
1930 		 * start of the image.
1931 		 */
1932 		if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
1933 		    lcp->cmdsize < sizeof(struct load_command) ||
1934 		    offset > cmds_size) {
1935 			break;
1936 		}
1937 
1938 		/* Check if its a simulator binary. */
1939 		switch (lcp->cmd) {
1940 		case LC_VERSION_MIN_WATCHOS:
1941 			simulator_binary = TRUE;
1942 			break;
1943 
1944 		case LC_BUILD_VERSION: {
1945 			struct build_version_command *bvc;
1946 
1947 			bvc = (struct build_version_command *) lcp;
1948 			if (bvc->cmdsize < sizeof(*bvc)) {
1949 				/* unsafe to use this command struct if cmdsize
1950 				* validated above is too small for it to fit */
1951 				break;
1952 			}
1953 			if (bvc->platform == PLATFORM_IOSSIMULATOR ||
1954 			    bvc->platform == PLATFORM_WATCHOSSIMULATOR) {
1955 				simulator_binary = TRUE;
1956 			}
1957 
1958 			break;
1959 		}
1960 
1961 		case LC_VERSION_MIN_IPHONEOS: {
1962 			simulator_binary = TRUE;
1963 			break;
1964 		}
1965 
1966 		default:
1967 			/* ignore other load commands */
1968 			break;
1969 		}
1970 
1971 		if (simulator_binary == TRUE) {
1972 			break;
1973 		}
1974 	}
1975 
1976 bad:
1977 	if (ip_vdata) {
1978 		kfree_data(ip_vdata, PAGE_SIZE);
1979 	}
1980 
1981 	if (cred) {
1982 		kauth_cred_unref(&cred);
1983 	}
1984 
1985 	if (addr) {
1986 		kfree_data(addr, alloc_size);
1987 	}
1988 
1989 	return simulator_binary;
1990 }
1991 #endif /* __x86_64__ */
1992 
1993 #if CONFIG_CODE_DECRYPTION
1994 
1995 #define APPLE_UNPROTECTED_HEADER_SIZE   (3 * 4096)
1996 
1997 static load_return_t
unprotect_dsmos_segment(uint64_t file_off,uint64_t file_size,struct vnode * vp,off_t macho_offset,vm_map_t map,vm_map_offset_t map_addr,vm_map_size_t map_size)1998 unprotect_dsmos_segment(
1999 	uint64_t        file_off,
2000 	uint64_t        file_size,
2001 	struct vnode    *vp,
2002 	off_t           macho_offset,
2003 	vm_map_t        map,
2004 	vm_map_offset_t map_addr,
2005 	vm_map_size_t   map_size)
2006 {
2007 	kern_return_t   kr;
2008 	uint64_t        slice_off;
2009 
2010 	/*
2011 	 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
2012 	 * this part of a Universal binary) are not protected...
2013 	 * The rest needs to be "transformed".
2014 	 */
2015 	slice_off = file_off - macho_offset;
2016 	if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
2017 	    slice_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
2018 		/* it's all unprotected, nothing to do... */
2019 		kr = KERN_SUCCESS;
2020 	} else {
2021 		if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
2022 			/*
2023 			 * We start mapping in the unprotected area.
2024 			 * Skip the unprotected part...
2025 			 */
2026 			uint64_t delta_file;
2027 			vm_map_offset_t delta_map;
2028 
2029 			delta_file = (uint64_t)APPLE_UNPROTECTED_HEADER_SIZE;
2030 			delta_file -= slice_off;
2031 			if (os_convert_overflow(delta_file, &delta_map)) {
2032 				return LOAD_BADMACHO;
2033 			}
2034 			if (os_add_overflow(map_addr, delta_map, &map_addr)) {
2035 				return LOAD_BADMACHO;
2036 			}
2037 			if (os_sub_overflow(map_size, delta_map, &map_size)) {
2038 				return LOAD_BADMACHO;
2039 			}
2040 		}
2041 		/* ... transform the rest of the mapping. */
2042 		struct pager_crypt_info crypt_info;
2043 		crypt_info.page_decrypt = dsmos_page_transform;
2044 		crypt_info.crypt_ops = NULL;
2045 		crypt_info.crypt_end = NULL;
2046 #pragma unused(vp, macho_offset)
2047 		crypt_info.crypt_ops = (void *)0x2e69cf40;
2048 		vm_map_offset_t crypto_backing_offset;
2049 		crypto_backing_offset = -1; /* i.e. use map entry's offset */
2050 #if VM_MAP_DEBUG_APPLE_PROTECT
2051 		if (vm_map_debug_apple_protect) {
2052 			struct proc *p;
2053 			p = current_proc();
2054 			printf("APPLE_PROTECT: %d[%s] map %p "
2055 			    "[0x%llx:0x%llx] %s(%s)\n",
2056 			    proc_getpid(p), p->p_comm, map,
2057 			    (uint64_t) map_addr,
2058 			    (uint64_t) (map_addr + map_size),
2059 			    __FUNCTION__, vp->v_name);
2060 		}
2061 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
2062 
2063 		/* The DSMOS pager can only be used by apple signed code */
2064 		struct cs_blob * blob = csvnode_get_blob(vp, file_off);
2065 		if (blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path) {
2066 			return LOAD_FAILURE;
2067 		}
2068 
2069 		kr = vm_map_apple_protected(map,
2070 		    map_addr,
2071 		    map_addr + map_size,
2072 		    crypto_backing_offset,
2073 		    &crypt_info,
2074 		    CRYPTID_APP_ENCRYPTION);
2075 	}
2076 
2077 	if (kr != KERN_SUCCESS) {
2078 		return LOAD_FAILURE;
2079 	}
2080 	return LOAD_SUCCESS;
2081 }
2082 #else   /* CONFIG_CODE_DECRYPTION */
2083 static load_return_t
unprotect_dsmos_segment(__unused uint64_t file_off,__unused uint64_t file_size,__unused struct vnode * vp,__unused off_t macho_offset,__unused vm_map_t map,__unused vm_map_offset_t map_addr,__unused vm_map_size_t map_size)2084 unprotect_dsmos_segment(
2085 	__unused        uint64_t        file_off,
2086 	__unused        uint64_t        file_size,
2087 	__unused        struct vnode    *vp,
2088 	__unused        off_t           macho_offset,
2089 	__unused        vm_map_t        map,
2090 	__unused        vm_map_offset_t map_addr,
2091 	__unused        vm_map_size_t   map_size)
2092 {
2093 	return LOAD_SUCCESS;
2094 }
2095 #endif  /* CONFIG_CODE_DECRYPTION */
2096 
2097 
2098 /*
2099  * map_segment:
2100  *	Maps a Mach-O segment.
2101  */
2102 static kern_return_t
map_segment(vm_map_t map,vm_map_offset_t vm_start,vm_map_offset_t vm_end,vm_map_kernel_flags_t vmk_flags,memory_object_control_t control,vm_map_offset_t file_start,vm_map_offset_t file_end,vm_prot_t initprot,vm_prot_t maxprot,load_result_t * result)2103 map_segment(
2104 	vm_map_t                map,
2105 	vm_map_offset_t         vm_start,
2106 	vm_map_offset_t         vm_end,
2107 	vm_map_kernel_flags_t   vmk_flags,
2108 	memory_object_control_t control,
2109 	vm_map_offset_t         file_start,
2110 	vm_map_offset_t         file_end,
2111 	vm_prot_t               initprot,
2112 	vm_prot_t               maxprot,
2113 	load_result_t           *result)
2114 {
2115 	kern_return_t   ret;
2116 	vm_map_offset_t effective_page_mask;
2117 
2118 	if (vm_end < vm_start ||
2119 	    file_end < file_start) {
2120 		return LOAD_BADMACHO;
2121 	}
2122 	if (vm_end == vm_start ||
2123 	    file_end == file_start) {
2124 		/* nothing to map... */
2125 		return LOAD_SUCCESS;
2126 	}
2127 
2128 	effective_page_mask = vm_map_page_mask(map);
2129 
2130 	if (vm_map_page_aligned(vm_start, effective_page_mask) &&
2131 	    vm_map_page_aligned(vm_end, effective_page_mask) &&
2132 	    vm_map_page_aligned(file_start, effective_page_mask) &&
2133 	    vm_map_page_aligned(file_end, effective_page_mask)) {
2134 		/* all page-aligned and map-aligned: proceed */
2135 	} else {
2136 		/*
2137 		 * There's no more fourk_pager to handle mis-alignments;
2138 		 * all binaries should be page-aligned and map-aligned
2139 		 */
2140 		return LOAD_BADMACHO;
2141 	}
2142 
2143 #if !defined(XNU_TARGET_OS_OSX)
2144 	(void) result;
2145 #else /* !defined(XNU_TARGET_OS_OSX) */
2146 	/*
2147 	 * This process doesn't have its new csflags (from
2148 	 * the image being loaded) yet, so tell VM to override the
2149 	 * current process's CS_ENFORCEMENT for this mapping.
2150 	 */
2151 	if (result->csflags & CS_ENFORCEMENT) {
2152 		vmk_flags.vmkf_cs_enforcement = TRUE;
2153 	} else {
2154 		vmk_flags.vmkf_cs_enforcement = FALSE;
2155 	}
2156 	vmk_flags.vmkf_cs_enforcement_override = TRUE;
2157 #endif /* !defined(XNU_TARGET_OS_OSX) */
2158 
2159 	if (result->is_rosetta && (initprot & VM_PROT_EXECUTE) == VM_PROT_EXECUTE) {
2160 		vmk_flags.vmkf_translated_allow_execute = TRUE;
2161 	}
2162 
2163 	if (control != MEMORY_OBJECT_CONTROL_NULL) {
2164 		/* no copy-on-read for mapped binaries */
2165 		vmk_flags.vmkf_no_copy_on_read = 1;
2166 		ret = vm_map_enter_mem_object_control(
2167 			map,
2168 			&vm_start,
2169 			file_end - file_start,
2170 			(mach_vm_offset_t)0,
2171 			vmk_flags,
2172 			control,
2173 			file_start,
2174 			TRUE, /* copy */
2175 			initprot, maxprot,
2176 			VM_INHERIT_DEFAULT);
2177 	} else {
2178 		ret = mach_vm_map_kernel(
2179 			map,
2180 			&vm_start,
2181 			file_end - file_start,
2182 			(mach_vm_offset_t)0,
2183 			vmk_flags,
2184 			IPC_PORT_NULL,
2185 			0, /* offset */
2186 			TRUE, /* copy */
2187 			initprot, maxprot,
2188 			VM_INHERIT_DEFAULT);
2189 	}
2190 	if (ret != KERN_SUCCESS) {
2191 		return LOAD_NOSPACE;
2192 	}
2193 	return LOAD_SUCCESS;
2194 }
2195 
2196 static
2197 load_return_t
load_segment(struct load_command * lcp,uint32_t filetype,void * control,off_t pager_offset,off_t macho_size,struct vnode * vp,vm_map_t map,int64_t slide,load_result_t * result,struct image_params * imgp)2198 load_segment(
2199 	struct load_command     *lcp,
2200 	uint32_t                filetype,
2201 	void *                  control,
2202 	off_t                   pager_offset,
2203 	off_t                   macho_size,
2204 	struct vnode            *vp,
2205 	vm_map_t                map,
2206 	int64_t                 slide,
2207 	load_result_t           *result,
2208 	struct image_params     *imgp)
2209 {
2210 	struct segment_command_64 segment_command, *scp;
2211 	kern_return_t           ret;
2212 	vm_map_size_t           delta_size;
2213 	vm_prot_t               initprot;
2214 	vm_prot_t               maxprot;
2215 	size_t                  segment_command_size, total_section_size,
2216 	    single_section_size;
2217 	uint64_t                file_offset, file_size;
2218 	vm_map_offset_t         vm_offset;
2219 	size_t                  vm_size;
2220 	vm_map_offset_t         vm_start, vm_end, vm_end_aligned;
2221 	vm_map_offset_t         file_start, file_end;
2222 	vm_map_kernel_flags_t   vmk_flags;
2223 	kern_return_t           kr;
2224 	boolean_t               verbose;
2225 	vm_map_size_t           effective_page_size;
2226 	vm_map_offset_t         effective_page_mask;
2227 #if __arm64__
2228 	boolean_t               fourk_align;
2229 #endif /* __arm64__ */
2230 
2231 	(void)imgp;
2232 
2233 	effective_page_size = vm_map_page_size(map);
2234 	effective_page_mask = vm_map_page_mask(map);
2235 	vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
2236 
2237 	verbose = FALSE;
2238 	if (LC_SEGMENT_64 == lcp->cmd) {
2239 		segment_command_size = sizeof(struct segment_command_64);
2240 		single_section_size  = sizeof(struct section_64);
2241 #if __arm64__
2242 		/* 64-bit binary: should already be 16K-aligned */
2243 		fourk_align = FALSE;
2244 
2245 		if (vm_map_page_shift(map) == FOURK_PAGE_SHIFT &&
2246 		    PAGE_SHIFT != FOURK_PAGE_SHIFT) {
2247 			fourk_align = TRUE;
2248 			verbose = TRUE;
2249 		}
2250 #endif /* __arm64__ */
2251 	} else {
2252 		segment_command_size = sizeof(struct segment_command);
2253 		single_section_size  = sizeof(struct section);
2254 #if __arm64__
2255 		/* 32-bit binary or arm64_32 binary: should already be page-aligned */
2256 		fourk_align = FALSE;
2257 #endif /* __arm64__ */
2258 	}
2259 	if (lcp->cmdsize < segment_command_size) {
2260 		DEBUG4K_ERROR("LOAD_BADMACHO cmdsize %d < %zu\n", lcp->cmdsize, segment_command_size);
2261 		return LOAD_BADMACHO;
2262 	}
2263 	total_section_size = lcp->cmdsize - segment_command_size;
2264 
2265 	if (LC_SEGMENT_64 == lcp->cmd) {
2266 		scp = (struct segment_command_64 *)lcp;
2267 	} else {
2268 		scp = &segment_command;
2269 		widen_segment_command((struct segment_command *)lcp, scp);
2270 	}
2271 
2272 	if (verbose) {
2273 		MACHO_PRINTF(("+++ load_segment %s "
2274 		    "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
2275 		    "prot %d/%d flags 0x%x\n",
2276 		    scp->segname,
2277 		    (uint64_t)(slide + scp->vmaddr),
2278 		    (uint64_t)(slide + scp->vmaddr + scp->vmsize),
2279 		    pager_offset + scp->fileoff,
2280 		    pager_offset + scp->fileoff + scp->filesize,
2281 		    scp->initprot,
2282 		    scp->maxprot,
2283 		    scp->flags));
2284 	}
2285 
2286 	/*
2287 	 * Make sure what we get from the file is really ours (as specified
2288 	 * by macho_size).
2289 	 */
2290 	if (scp->fileoff + scp->filesize < scp->fileoff ||
2291 	    scp->fileoff + scp->filesize > (uint64_t)macho_size) {
2292 		DEBUG4K_ERROR("LOAD_BADMACHO fileoff 0x%llx filesize 0x%llx macho_size 0x%llx\n", scp->fileoff, scp->filesize, (uint64_t)macho_size);
2293 		return LOAD_BADMACHO;
2294 	}
2295 	/*
2296 	 * Ensure that the number of sections specified would fit
2297 	 * within the load command size.
2298 	 */
2299 	if (total_section_size / single_section_size < scp->nsects) {
2300 		DEBUG4K_ERROR("LOAD_BADMACHO 0x%zx 0x%zx %d\n", total_section_size, single_section_size, scp->nsects);
2301 		return LOAD_BADMACHO;
2302 	}
2303 	/*
2304 	 * Make sure the segment is page-aligned in the file.
2305 	 */
2306 	if (os_add_overflow(pager_offset, scp->fileoff, &file_offset)) {
2307 		DEBUG4K_ERROR("LOAD_BADMACHO file_offset: 0x%llx + 0x%llx\n", pager_offset, scp->fileoff);
2308 		return LOAD_BADMACHO;
2309 	}
2310 	file_size = scp->filesize;
2311 #if __arm64__
2312 	if (fourk_align) {
2313 		if ((file_offset & FOURK_PAGE_MASK) != 0) {
2314 			/*
2315 			 * we can't mmap() it if it's not at least 4KB-aligned
2316 			 * in the file
2317 			 */
2318 			DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset);
2319 			return LOAD_BADMACHO;
2320 		}
2321 	} else
2322 #endif /* __arm64__ */
2323 	if ((file_offset & PAGE_MASK_64) != 0 ||
2324 	    /* we can't mmap() it if it's not page-aligned in the file */
2325 	    (file_offset & vm_map_page_mask(map)) != 0) {
2326 		/*
2327 		 * The 1st test would have failed if the system's page size
2328 		 * was what this process believe is the page size, so let's
2329 		 * fail here too for the sake of consistency.
2330 		 */
2331 		DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset);
2332 		return LOAD_BADMACHO;
2333 	}
2334 
2335 	/*
2336 	 * If we have a code signature attached for this slice
2337 	 * require that the segments are within the signed part
2338 	 * of the file.
2339 	 */
2340 	if (result->cs_end_offset &&
2341 	    result->cs_end_offset < (off_t)scp->fileoff &&
2342 	    result->cs_end_offset - scp->fileoff < scp->filesize) {
2343 		if (cs_debug) {
2344 			printf("section outside code signature\n");
2345 		}
2346 		DEBUG4K_ERROR("LOAD_BADMACHO end_offset 0x%llx fileoff 0x%llx filesize 0x%llx\n", result->cs_end_offset, scp->fileoff, scp->filesize);
2347 		return LOAD_BADMACHO;
2348 	}
2349 
2350 	if (os_add_overflow(scp->vmaddr, slide, &vm_offset)) {
2351 		if (cs_debug) {
2352 			printf("vmaddr too large\n");
2353 		}
2354 		DEBUG4K_ERROR("LOAD_BADMACHO vmaddr 0x%llx slide 0x%llx vm_offset 0x%llx\n", scp->vmaddr, slide, (uint64_t)vm_offset);
2355 		return LOAD_BADMACHO;
2356 	}
2357 
2358 	if (scp->vmsize > SIZE_MAX) {
2359 		DEBUG4K_ERROR("LOAD_BADMACHO vmsize 0x%llx\n", scp->vmsize);
2360 		return LOAD_BADMACHO;
2361 	}
2362 
2363 	vm_size = (size_t)scp->vmsize;
2364 
2365 	if (vm_size == 0) {
2366 		return LOAD_SUCCESS;
2367 	}
2368 	if (scp->vmaddr == 0 &&
2369 	    file_size == 0 &&
2370 	    vm_size != 0 &&
2371 	    (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
2372 	    (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
2373 		if (map == VM_MAP_NULL) {
2374 			return LOAD_SUCCESS;
2375 		}
2376 
2377 		/*
2378 		 * For PIE, extend page zero rather than moving it.  Extending
2379 		 * page zero keeps early allocations from falling predictably
2380 		 * between the end of page zero and the beginning of the first
2381 		 * slid segment.
2382 		 */
2383 		/*
2384 		 * This is a "page zero" segment:  it starts at address 0,
2385 		 * is not mapped from the binary file and is not accessible.
2386 		 * User-space should never be able to access that memory, so
2387 		 * make it completely off limits by raising the VM map's
2388 		 * minimum offset.
2389 		 */
2390 		vm_end = (vm_map_offset_t)(vm_offset + vm_size);
2391 		if (vm_end < vm_offset) {
2392 			DEBUG4K_ERROR("LOAD_BADMACHO vm_end 0x%llx vm_offset 0x%llx vm_size 0x%llx\n", (uint64_t)vm_end, (uint64_t)vm_offset, (uint64_t)vm_size);
2393 			return LOAD_BADMACHO;
2394 		}
2395 
2396 		if (verbose) {
2397 			MACHO_PRINTF(("++++++ load_segment: "
2398 			    "page_zero up to 0x%llx\n",
2399 			    (uint64_t) vm_end));
2400 		}
2401 #if __arm64__
2402 		if (fourk_align) {
2403 			/* raise min_offset as much as page-alignment allows */
2404 			vm_end_aligned = vm_map_trunc_page(vm_end,
2405 			    effective_page_mask);
2406 		} else
2407 #endif /* __arm64__ */
2408 		{
2409 			vm_end = vm_map_round_page(vm_end,
2410 			    PAGE_MASK_64);
2411 			vm_end_aligned = vm_end;
2412 		}
2413 		ret = vm_map_raise_min_offset(map,
2414 		    vm_end_aligned);
2415 		if (ret != KERN_SUCCESS) {
2416 			DEBUG4K_ERROR("LOAD_FAILURE ret 0x%x\n", ret);
2417 			return LOAD_FAILURE;
2418 		}
2419 		return LOAD_SUCCESS;
2420 	} else {
2421 #if !defined(XNU_TARGET_OS_OSX)
2422 		/* not PAGEZERO: should not be mapped at address 0 */
2423 		if (filetype != MH_DYLINKER && (imgp->ip_flags & IMGPF_ROSETTA) == 0 && scp->vmaddr == 0) {
2424 			DEBUG4K_ERROR("LOAD_BADMACHO filetype %d vmaddr 0x%llx\n", filetype, scp->vmaddr);
2425 			return LOAD_BADMACHO;
2426 		}
2427 #endif /* !defined(XNU_TARGET_OS_OSX) */
2428 	}
2429 
2430 #if __arm64__
2431 	if (fourk_align) {
2432 		/* 4K-align */
2433 		file_start = vm_map_trunc_page(file_offset,
2434 		    FOURK_PAGE_MASK);
2435 		file_end = vm_map_round_page(file_offset + file_size,
2436 		    FOURK_PAGE_MASK);
2437 		vm_start = vm_map_trunc_page(vm_offset,
2438 		    FOURK_PAGE_MASK);
2439 		vm_end = vm_map_round_page(vm_offset + vm_size,
2440 		    FOURK_PAGE_MASK);
2441 
2442 		if (file_offset - file_start > FOURK_PAGE_MASK ||
2443 		    file_end - file_offset - file_size > FOURK_PAGE_MASK) {
2444 			DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap "
2445 			    "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n",
2446 			    file_offset,
2447 			    file_offset + file_size,
2448 			    (uint64_t) file_start,
2449 			    (uint64_t) file_end);
2450 			return LOAD_BADMACHO;
2451 		}
2452 
2453 		if (!strncmp(scp->segname, "__LINKEDIT", 11) &&
2454 		    page_aligned(file_start) &&
2455 		    vm_map_page_aligned(file_start, vm_map_page_mask(map)) &&
2456 		    page_aligned(vm_start) &&
2457 		    vm_map_page_aligned(vm_start, vm_map_page_mask(map))) {
2458 			/* XXX last segment: ignore mis-aligned tail */
2459 			file_end = vm_map_round_page(file_end,
2460 			    effective_page_mask);
2461 			vm_end = vm_map_round_page(vm_end,
2462 			    effective_page_mask);
2463 		}
2464 	} else
2465 #endif /* __arm64__ */
2466 	{
2467 		file_start = vm_map_trunc_page(file_offset,
2468 		    effective_page_mask);
2469 		file_end = vm_map_round_page(file_offset + file_size,
2470 		    effective_page_mask);
2471 		vm_start = vm_map_trunc_page(vm_offset,
2472 		    effective_page_mask);
2473 		vm_end = vm_map_round_page(vm_offset + vm_size,
2474 		    effective_page_mask);
2475 
2476 		if (file_offset - file_start > effective_page_mask ||
2477 		    file_end - file_offset - file_size > effective_page_mask) {
2478 			DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap "
2479 			    "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n",
2480 			    file_offset,
2481 			    file_offset + file_size,
2482 			    (uint64_t) file_start,
2483 			    (uint64_t) file_end);
2484 			return LOAD_BADMACHO;
2485 		}
2486 	}
2487 
2488 	if (vm_start < result->min_vm_addr) {
2489 		result->min_vm_addr = vm_start;
2490 	}
2491 	if (vm_end > result->max_vm_addr) {
2492 		result->max_vm_addr = vm_end;
2493 	}
2494 
2495 	if (map == VM_MAP_NULL) {
2496 		return LOAD_SUCCESS;
2497 	}
2498 
2499 	if (scp->flags & SG_READ_ONLY) {
2500 		/*
2501 		 * Record the VM start/end of a segment which should
2502 		 * be RO after fixups. Only __DATA_CONST should
2503 		 * have this flag.
2504 		 */
2505 		if (result->ro_vm_start != MACH_VM_MIN_ADDRESS ||
2506 		    result->ro_vm_end != MACH_VM_MIN_ADDRESS) {
2507 			DEBUG4K_ERROR("LOAD_BADMACHO segment flags [%x] "
2508 			    "multiple segments with SG_READ_ONLY flag\n",
2509 			    scp->flags);
2510 			return LOAD_BADMACHO;
2511 		}
2512 
2513 		result->ro_vm_start = vm_start;
2514 		result->ro_vm_end = vm_end;
2515 	}
2516 
2517 	if (vm_size > 0) {
2518 #if !__x86_64__
2519 		if (!strncmp(scp->segname, "__LINKEDIT", 11)) {
2520 			vmk_flags.vmf_permanent = true;
2521 		}
2522 #endif /* !__x86_64__ */
2523 		initprot = (scp->initprot) & VM_PROT_ALL;
2524 		maxprot = (scp->maxprot) & VM_PROT_ALL;
2525 		/*
2526 		 *	Map a copy of the file into the address space.
2527 		 */
2528 		if (verbose) {
2529 			MACHO_PRINTF(("++++++ load_segment: "
2530 			    "mapping at vm [0x%llx:0x%llx] of "
2531 			    "file [0x%llx:0x%llx]\n",
2532 			    (uint64_t) vm_start,
2533 			    (uint64_t) vm_end,
2534 			    (uint64_t) file_start,
2535 			    (uint64_t) file_end));
2536 		}
2537 		ret = map_segment(map,
2538 		    vm_start,
2539 		    vm_end,
2540 		    vmk_flags,
2541 		    control,
2542 		    file_start,
2543 		    file_end,
2544 		    initprot,
2545 		    maxprot,
2546 		    result);
2547 		if (ret) {
2548 			DEBUG4K_ERROR("LOAD_NOSPACE start 0x%llx end 0x%llx ret 0x%x\n", (uint64_t)vm_start, (uint64_t)vm_end, ret);
2549 			return LOAD_NOSPACE;
2550 		}
2551 
2552 #if FIXME
2553 		/*
2554 		 *	If the file didn't end on a page boundary,
2555 		 *	we need to zero the leftover.
2556 		 */
2557 		delta_size = map_size - scp->filesize;
2558 		if (delta_size > 0) {
2559 			void *tmp = kalloc_data(delta_size, Z_WAITOK | Z_ZERO);
2560 			int rc;
2561 
2562 			if (tmp == NULL) {
2563 				DEBUG4K_ERROR("LOAD_RESOURCE delta_size 0x%llx ret 0x%x\n", delta_size, ret);
2564 				return LOAD_RESOURCE;
2565 			}
2566 
2567 			rc = copyout(tmp, map_addr + scp->filesize, delta_size);
2568 			kfree_data(tmp, delta_size);
2569 
2570 			if (rc) {
2571 				DEBUG4K_ERROR("LOAD_FAILURE copyout 0x%llx 0x%llx\n", map_addr + scp->filesize, delta_size);
2572 				return LOAD_FAILURE;
2573 			}
2574 		}
2575 #endif /* FIXME */
2576 	}
2577 
2578 	/*
2579 	 *	If the virtual size of the segment is greater
2580 	 *	than the size from the file, we need to allocate
2581 	 *	zero fill memory for the rest.
2582 	 */
2583 	if ((vm_end - vm_start) > (file_end - file_start)) {
2584 		delta_size = (vm_end - vm_start) - (file_end - file_start);
2585 	} else {
2586 		delta_size = 0;
2587 	}
2588 	if (delta_size > 0) {
2589 		vm_map_offset_t tmp_start;
2590 		vm_map_offset_t tmp_end;
2591 
2592 		if (os_add_overflow(vm_start, file_end - file_start, &tmp_start)) {
2593 			DEBUG4K_ERROR("LOAD_NOSPACE tmp_start: 0x%llx + 0x%llx\n", (uint64_t)vm_start, (uint64_t)(file_end - file_start));
2594 			return LOAD_NOSPACE;
2595 		}
2596 
2597 		if (os_add_overflow(tmp_start, delta_size, &tmp_end)) {
2598 			DEBUG4K_ERROR("LOAD_NOSPACE tmp_end: 0x%llx + 0x%llx\n", (uint64_t)tmp_start, (uint64_t)delta_size);
2599 			return LOAD_NOSPACE;
2600 		}
2601 
2602 		if (verbose) {
2603 			MACHO_PRINTF(("++++++ load_segment: "
2604 			    "delta mapping vm [0x%llx:0x%llx]\n",
2605 			    (uint64_t) tmp_start,
2606 			    (uint64_t) tmp_end));
2607 		}
2608 		kr = map_segment(map,
2609 		    tmp_start,
2610 		    tmp_end,
2611 		    vmk_flags,
2612 		    MEMORY_OBJECT_CONTROL_NULL,
2613 		    0,
2614 		    delta_size,
2615 		    scp->initprot,
2616 		    scp->maxprot,
2617 		    result);
2618 		if (kr != KERN_SUCCESS) {
2619 			DEBUG4K_ERROR("LOAD_NOSPACE 0x%llx 0x%llx kr 0x%x\n", (unsigned long long)tmp_start, (uint64_t)delta_size, kr);
2620 			return LOAD_NOSPACE;
2621 		}
2622 	}
2623 
2624 	if ((scp->fileoff == 0) && (scp->filesize != 0)) {
2625 		result->mach_header = vm_offset;
2626 	}
2627 
2628 	if (scp->flags & SG_PROTECTED_VERSION_1) {
2629 		ret = unprotect_dsmos_segment(file_start,
2630 		    file_end - file_start,
2631 		    vp,
2632 		    pager_offset,
2633 		    map,
2634 		    vm_start,
2635 		    vm_end - vm_start);
2636 		if (ret != LOAD_SUCCESS) {
2637 			DEBUG4K_ERROR("unprotect 0x%llx 0x%llx ret %d \n", (uint64_t)vm_start, (uint64_t)vm_end, ret);
2638 			return ret;
2639 		}
2640 	} else {
2641 		ret = LOAD_SUCCESS;
2642 	}
2643 
2644 	if (LOAD_SUCCESS == ret &&
2645 	    filetype == MH_DYLINKER &&
2646 	    result->all_image_info_addr == MACH_VM_MIN_ADDRESS) {
2647 		note_all_image_info_section(scp,
2648 		    LC_SEGMENT_64 == lcp->cmd,
2649 		    single_section_size,
2650 		    ((const char *)lcp +
2651 		    segment_command_size),
2652 		    slide,
2653 		    result);
2654 	}
2655 
2656 	if (result->entry_point != MACH_VM_MIN_ADDRESS) {
2657 		if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) {
2658 			if ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) == (VM_PROT_READ | VM_PROT_EXECUTE)) {
2659 				result->validentry = 1;
2660 			} else {
2661 				/* right range but wrong protections, unset if previously validated */
2662 				result->validentry = 0;
2663 			}
2664 		}
2665 	}
2666 
2667 	if (ret != LOAD_SUCCESS && verbose) {
2668 		DEBUG4K_ERROR("ret %d\n", ret);
2669 	}
2670 	return ret;
2671 }
2672 
2673 static
2674 load_return_t
load_uuid(struct uuid_command * uulp,char * command_end,load_result_t * result)2675 load_uuid(
2676 	struct uuid_command     *uulp,
2677 	char                    *command_end,
2678 	load_result_t           *result
2679 	)
2680 {
2681 	/*
2682 	 * We need to check the following for this command:
2683 	 * - The command size should be atleast the size of struct uuid_command
2684 	 * - The UUID part of the command should be completely within the mach-o header
2685 	 */
2686 
2687 	if ((uulp->cmdsize < sizeof(struct uuid_command)) ||
2688 	    (((char *)uulp + sizeof(struct uuid_command)) > command_end)) {
2689 		return LOAD_BADMACHO;
2690 	}
2691 
2692 	memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
2693 	return LOAD_SUCCESS;
2694 }
2695 
2696 static
2697 load_return_t
load_version(struct version_min_command * vmc,boolean_t * found_version_cmd,struct image_params * imgp __unused,load_result_t * result)2698 load_version(
2699 	struct version_min_command     *vmc,
2700 	boolean_t               *found_version_cmd,
2701 	struct image_params             *imgp __unused,
2702 	load_result_t           *result
2703 	)
2704 {
2705 	uint32_t platform = 0;
2706 	uint32_t sdk;
2707 	uint32_t min_sdk;
2708 
2709 	if (vmc->cmdsize < sizeof(*vmc)) {
2710 		return LOAD_BADMACHO;
2711 	}
2712 	if (*found_version_cmd == TRUE) {
2713 		return LOAD_BADMACHO;
2714 	}
2715 	*found_version_cmd = TRUE;
2716 	sdk = vmc->sdk;
2717 	min_sdk = vmc->version;
2718 	switch (vmc->cmd) {
2719 	case LC_VERSION_MIN_MACOSX:
2720 		platform = PLATFORM_MACOS;
2721 		break;
2722 #if __x86_64__ /* __x86_64__ */
2723 	case LC_VERSION_MIN_IPHONEOS:
2724 		platform = PLATFORM_IOSSIMULATOR;
2725 		break;
2726 	case LC_VERSION_MIN_WATCHOS:
2727 		platform = PLATFORM_WATCHOSSIMULATOR;
2728 		break;
2729 	case LC_VERSION_MIN_TVOS:
2730 		platform = PLATFORM_TVOSSIMULATOR;
2731 		break;
2732 #else
2733 	case LC_VERSION_MIN_IPHONEOS: {
2734 #if __arm64__
2735 		if (vmc->sdk < (12 << 16)) {
2736 			/* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */
2737 			result->legacy_footprint = TRUE;
2738 		}
2739 #endif /* __arm64__ */
2740 		platform = PLATFORM_IOS;
2741 		break;
2742 	}
2743 	case LC_VERSION_MIN_WATCHOS:
2744 		platform = PLATFORM_WATCHOS;
2745 		break;
2746 	case LC_VERSION_MIN_TVOS:
2747 		platform = PLATFORM_TVOS;
2748 		break;
2749 #endif /* __x86_64__ */
2750 	/* All LC_VERSION_MIN_* load commands are legacy and we will not be adding any more */
2751 	default:
2752 		sdk = (uint32_t)-1;
2753 		min_sdk = (uint32_t)-1;
2754 		__builtin_unreachable();
2755 	}
2756 	result->ip_platform = platform;
2757 	result->lr_min_sdk = min_sdk;
2758 	result->lr_sdk = sdk;
2759 	return LOAD_SUCCESS;
2760 }
2761 
2762 static
2763 load_return_t
load_main(struct entry_point_command * epc,thread_t thread,int64_t slide,load_result_t * result)2764 load_main(
2765 	struct entry_point_command      *epc,
2766 	thread_t                thread,
2767 	int64_t                         slide,
2768 	load_result_t           *result
2769 	)
2770 {
2771 	mach_vm_offset_t addr;
2772 	kern_return_t   ret;
2773 
2774 	if (epc->cmdsize < sizeof(*epc)) {
2775 		return LOAD_BADMACHO;
2776 	}
2777 	if (result->thread_count != 0) {
2778 		return LOAD_FAILURE;
2779 	}
2780 
2781 	if (thread == THREAD_NULL) {
2782 		return LOAD_SUCCESS;
2783 	}
2784 
2785 	/*
2786 	 * LC_MAIN specifies stack size but not location.
2787 	 * Add guard page to allocation size (MAXSSIZ includes guard page).
2788 	 */
2789 	if (epc->stacksize) {
2790 		if (os_add_overflow(epc->stacksize, 4 * PAGE_SIZE, &result->user_stack_size)) {
2791 			/*
2792 			 * We are going to immediately throw away this result, but we want
2793 			 * to make sure we aren't loading a dangerously close to
2794 			 * overflowing value, since this will have a guard page added to it
2795 			 * and be rounded to page boundaries
2796 			 */
2797 			return LOAD_BADMACHO;
2798 		}
2799 		result->user_stack_size = epc->stacksize;
2800 		if (os_add_overflow(epc->stacksize, PAGE_SIZE, &result->user_stack_alloc_size)) {
2801 			return LOAD_BADMACHO;
2802 		}
2803 		result->custom_stack = TRUE;
2804 	} else {
2805 		result->user_stack_alloc_size = MAXSSIZ;
2806 	}
2807 
2808 	/* use default location for stack */
2809 	ret = thread_userstackdefault(&addr, result->is_64bit_addr);
2810 	if (ret != KERN_SUCCESS) {
2811 		return LOAD_FAILURE;
2812 	}
2813 
2814 	/* The stack slides down from the default location */
2815 	result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide);
2816 
2817 	if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2818 		/* Already processed LC_MAIN or LC_UNIXTHREAD */
2819 		return LOAD_FAILURE;
2820 	}
2821 
2822 	/* kernel does *not* use entryoff from LC_MAIN.	 Dyld uses it. */
2823 	result->needs_dynlinker = TRUE;
2824 	result->using_lcmain = TRUE;
2825 
2826 	ret = thread_state_initialize( thread );
2827 	if (ret != KERN_SUCCESS) {
2828 		return LOAD_FAILURE;
2829 	}
2830 
2831 	result->unixproc = TRUE;
2832 	result->thread_count++;
2833 
2834 	return LOAD_SUCCESS;
2835 }
2836 
2837 static
2838 load_return_t
setup_driver_main(thread_t thread,int64_t slide,load_result_t * result)2839 setup_driver_main(
2840 	thread_t                thread,
2841 	int64_t                         slide,
2842 	load_result_t           *result
2843 	)
2844 {
2845 	mach_vm_offset_t addr;
2846 	kern_return_t   ret;
2847 
2848 	/* Driver binaries have no LC_MAIN, use defaults */
2849 
2850 	if (thread == THREAD_NULL) {
2851 		return LOAD_SUCCESS;
2852 	}
2853 
2854 	result->user_stack_alloc_size = MAXSSIZ;
2855 
2856 	/* use default location for stack */
2857 	ret = thread_userstackdefault(&addr, result->is_64bit_addr);
2858 	if (ret != KERN_SUCCESS) {
2859 		return LOAD_FAILURE;
2860 	}
2861 
2862 	/* The stack slides down from the default location */
2863 	result->user_stack = (user_addr_t)addr;
2864 	result->user_stack -= slide;
2865 
2866 	if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2867 		/* Already processed LC_MAIN or LC_UNIXTHREAD */
2868 		return LOAD_FAILURE;
2869 	}
2870 
2871 	result->needs_dynlinker = TRUE;
2872 
2873 	ret = thread_state_initialize( thread );
2874 	if (ret != KERN_SUCCESS) {
2875 		return LOAD_FAILURE;
2876 	}
2877 
2878 	result->unixproc = TRUE;
2879 	result->thread_count++;
2880 
2881 	return LOAD_SUCCESS;
2882 }
2883 
2884 static
2885 load_return_t
load_unixthread(struct thread_command * tcp,thread_t thread,int64_t slide,boolean_t is_x86_64_compat_binary,load_result_t * result)2886 load_unixthread(
2887 	struct thread_command   *tcp,
2888 	thread_t                thread,
2889 	int64_t                         slide,
2890 	boolean_t               is_x86_64_compat_binary,
2891 	load_result_t           *result
2892 	)
2893 {
2894 	load_return_t   ret;
2895 	int customstack = 0;
2896 	mach_vm_offset_t addr;
2897 	if (tcp->cmdsize < sizeof(*tcp)) {
2898 		return LOAD_BADMACHO;
2899 	}
2900 	if (result->thread_count != 0) {
2901 		return LOAD_FAILURE;
2902 	}
2903 
2904 	if (thread == THREAD_NULL) {
2905 		return LOAD_SUCCESS;
2906 	}
2907 
2908 	ret = load_threadstack(thread,
2909 	    (uint32_t *)(((vm_offset_t)tcp) +
2910 	    sizeof(struct thread_command)),
2911 	    tcp->cmdsize - sizeof(struct thread_command),
2912 	    &addr, &customstack, is_x86_64_compat_binary, result);
2913 	if (ret != LOAD_SUCCESS) {
2914 		return ret;
2915 	}
2916 
2917 	/* LC_UNIXTHREAD optionally specifies stack size and location */
2918 
2919 	if (customstack) {
2920 		result->custom_stack = TRUE;
2921 	} else {
2922 		result->user_stack_alloc_size = MAXSSIZ;
2923 	}
2924 
2925 	/* The stack slides down from the default location */
2926 	result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide);
2927 
2928 	{
2929 		ret = load_threadentry(thread,
2930 		    (uint32_t *)(((vm_offset_t)tcp) +
2931 		    sizeof(struct thread_command)),
2932 		    tcp->cmdsize - sizeof(struct thread_command),
2933 		    &addr);
2934 		if (ret != LOAD_SUCCESS) {
2935 			return ret;
2936 		}
2937 
2938 		if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2939 			/* Already processed LC_MAIN or LC_UNIXTHREAD */
2940 			return LOAD_FAILURE;
2941 		}
2942 
2943 		result->entry_point = (user_addr_t)addr;
2944 		result->entry_point += slide;
2945 
2946 		ret = load_threadstate(thread,
2947 		    (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)),
2948 		    tcp->cmdsize - sizeof(struct thread_command),
2949 		    result);
2950 		if (ret != LOAD_SUCCESS) {
2951 			return ret;
2952 		}
2953 	}
2954 
2955 	result->unixproc = TRUE;
2956 	result->thread_count++;
2957 
2958 	return LOAD_SUCCESS;
2959 }
2960 
2961 static
2962 load_return_t
load_threadstate(thread_t thread,uint32_t * ts,uint32_t total_size,load_result_t * result)2963 load_threadstate(
2964 	thread_t        thread,
2965 	uint32_t        *ts,
2966 	uint32_t        total_size,
2967 	load_result_t   *result
2968 	)
2969 {
2970 	uint32_t        size;
2971 	int             flavor;
2972 	uint32_t        thread_size;
2973 	uint32_t        *local_ts = NULL;
2974 	uint32_t        local_ts_size = 0;
2975 	int             ret;
2976 
2977 	(void)thread;
2978 
2979 	if (total_size > 0) {
2980 		local_ts_size = total_size;
2981 		local_ts = (uint32_t *)kalloc_data(local_ts_size, Z_WAITOK);
2982 		if (local_ts == NULL) {
2983 			return LOAD_FAILURE;
2984 		}
2985 		memcpy(local_ts, ts, local_ts_size);
2986 		ts = local_ts;
2987 	}
2988 
2989 	/*
2990 	 * Validate the new thread state; iterate through the state flavors in
2991 	 * the Mach-O file.
2992 	 * XXX: we should validate the machine state here, to avoid failing at
2993 	 * activation time where we can't bail out cleanly.
2994 	 */
2995 	while (total_size > 0) {
2996 		if (total_size < 2 * sizeof(uint32_t)) {
2997 			return LOAD_BADMACHO;
2998 		}
2999 
3000 		flavor = *ts++;
3001 		size = *ts++;
3002 
3003 		if (os_add_and_mul_overflow(size, 2, sizeof(uint32_t), &thread_size) ||
3004 		    os_sub_overflow(total_size, thread_size, &total_size)) {
3005 			ret = LOAD_BADMACHO;
3006 			goto bad;
3007 		}
3008 
3009 		ts += size;     /* ts is a (uint32_t *) */
3010 	}
3011 
3012 	result->threadstate = local_ts;
3013 	result->threadstate_sz = local_ts_size;
3014 	return LOAD_SUCCESS;
3015 
3016 bad:
3017 	if (local_ts) {
3018 		kfree_data(local_ts, local_ts_size);
3019 	}
3020 	return ret;
3021 }
3022 
3023 
3024 static
3025 load_return_t
load_threadstack(thread_t thread,uint32_t * ts,uint32_t total_size,mach_vm_offset_t * user_stack,int * customstack,__unused boolean_t is_x86_64_compat_binary,load_result_t * result)3026 load_threadstack(
3027 	thread_t                thread,
3028 	uint32_t                *ts,
3029 	uint32_t                total_size,
3030 	mach_vm_offset_t        *user_stack,
3031 	int                     *customstack,
3032 	__unused boolean_t      is_x86_64_compat_binary,
3033 	load_result_t           *result
3034 	)
3035 {
3036 	kern_return_t   ret;
3037 	uint32_t        size;
3038 	int             flavor;
3039 	uint32_t        stack_size;
3040 
3041 	if (total_size == 0) {
3042 		return LOAD_BADMACHO;
3043 	}
3044 
3045 	while (total_size > 0) {
3046 		if (total_size < 2 * sizeof(uint32_t)) {
3047 			return LOAD_BADMACHO;
3048 		}
3049 
3050 		flavor = *ts++;
3051 		size = *ts++;
3052 		if (UINT32_MAX - 2 < size ||
3053 		    UINT32_MAX / sizeof(uint32_t) < size + 2) {
3054 			return LOAD_BADMACHO;
3055 		}
3056 		stack_size = (size + 2) * sizeof(uint32_t);
3057 		if (stack_size > total_size) {
3058 			return LOAD_BADMACHO;
3059 		}
3060 		total_size -= stack_size;
3061 
3062 		/*
3063 		 * Third argument is a kernel space pointer; it gets cast
3064 		 * to the appropriate type in thread_userstack() based on
3065 		 * the value of flavor.
3066 		 */
3067 		{
3068 			ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data);
3069 			if (ret != KERN_SUCCESS) {
3070 				return LOAD_FAILURE;
3071 			}
3072 		}
3073 
3074 		ts += size;     /* ts is a (uint32_t *) */
3075 	}
3076 	return LOAD_SUCCESS;
3077 }
3078 
3079 static
3080 load_return_t
load_threadentry(thread_t thread,uint32_t * ts,uint32_t total_size,mach_vm_offset_t * entry_point)3081 load_threadentry(
3082 	thread_t        thread,
3083 	uint32_t        *ts,
3084 	uint32_t        total_size,
3085 	mach_vm_offset_t        *entry_point
3086 	)
3087 {
3088 	kern_return_t   ret;
3089 	uint32_t        size;
3090 	int             flavor;
3091 	uint32_t        entry_size;
3092 
3093 	/*
3094 	 *	Set the thread state.
3095 	 */
3096 	*entry_point = MACH_VM_MIN_ADDRESS;
3097 	while (total_size > 0) {
3098 		if (total_size < 2 * sizeof(uint32_t)) {
3099 			return LOAD_BADMACHO;
3100 		}
3101 
3102 		flavor = *ts++;
3103 		size = *ts++;
3104 		if (UINT32_MAX - 2 < size ||
3105 		    UINT32_MAX / sizeof(uint32_t) < size + 2) {
3106 			return LOAD_BADMACHO;
3107 		}
3108 		entry_size = (size + 2) * sizeof(uint32_t);
3109 		if (entry_size > total_size) {
3110 			return LOAD_BADMACHO;
3111 		}
3112 		total_size -= entry_size;
3113 		/*
3114 		 * Third argument is a kernel space pointer; it gets cast
3115 		 * to the appropriate type in thread_entrypoint() based on
3116 		 * the value of flavor.
3117 		 */
3118 		ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
3119 		if (ret != KERN_SUCCESS) {
3120 			return LOAD_FAILURE;
3121 		}
3122 		ts += size;     /* ts is a (uint32_t *) */
3123 	}
3124 	return LOAD_SUCCESS;
3125 }
3126 
3127 struct macho_data {
3128 	struct nameidata        __nid;
3129 	union macho_vnode_header {
3130 		struct mach_header      mach_header;
3131 		struct fat_header       fat_header;
3132 		char    __pad[512];
3133 	} __header;
3134 };
3135 
3136 #define DEFAULT_DYLD_PATH "/usr/lib/dyld"
3137 
3138 #if (DEVELOPMENT || DEBUG)
3139 extern char dyld_alt_path[];
3140 extern int use_alt_dyld;
3141 
3142 extern char dyld_suffix[];
3143 extern int use_dyld_suffix;
3144 
3145 typedef struct _dyld_suffix_map_entry {
3146 	const char *suffix;
3147 	const char *path;
3148 } dyld_suffix_map_entry_t;
3149 
3150 static const dyld_suffix_map_entry_t _dyld_suffix_map[] = {
3151 	[0] = {
3152 		.suffix = "",
3153 		.path = DEFAULT_DYLD_PATH,
3154 	}, {
3155 		.suffix = "release",
3156 		.path = DEFAULT_DYLD_PATH,
3157 	}, {
3158 		.suffix = "bringup",
3159 		.path = "/usr/appleinternal/lib/dyld.bringup",
3160 	},
3161 };
3162 #endif
3163 
3164 static load_return_t
load_dylinker(struct dylinker_command * lcp,cpu_type_t cputype,vm_map_t map,thread_t thread,int depth,int64_t slide,load_result_t * result,struct image_params * imgp)3165 load_dylinker(
3166 	struct dylinker_command *lcp,
3167 	cpu_type_t              cputype,
3168 	vm_map_t                map,
3169 	thread_t        thread,
3170 	int                     depth,
3171 	int64_t                 slide,
3172 	load_result_t           *result,
3173 	struct image_params     *imgp
3174 	)
3175 {
3176 	const char              *name;
3177 	struct vnode            *vp = NULLVP;   /* set by get_macho_vnode() */
3178 	struct mach_header      *header;
3179 	off_t                   file_offset = 0; /* set by get_macho_vnode() */
3180 	off_t                   macho_size = 0; /* set by get_macho_vnode() */
3181 	load_result_t           *myresult;
3182 	kern_return_t           ret;
3183 	struct macho_data       *macho_data;
3184 	struct {
3185 		struct mach_header      __header;
3186 		load_result_t           __myresult;
3187 		struct macho_data       __macho_data;
3188 	} *dyld_data;
3189 
3190 	if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize) {
3191 		return LOAD_BADMACHO;
3192 	}
3193 
3194 	name = (const char *)lcp + lcp->name.offset;
3195 
3196 	/* Check for a proper null terminated string. */
3197 	size_t maxsz = lcp->cmdsize - lcp->name.offset;
3198 	size_t namelen = strnlen(name, maxsz);
3199 	if (namelen >= maxsz) {
3200 		return LOAD_BADMACHO;
3201 	}
3202 
3203 #if (DEVELOPMENT || DEBUG)
3204 
3205 	/*
3206 	 * rdar://23680808
3207 	 * If an alternate dyld has been specified via boot args, check
3208 	 * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this
3209 	 * executable and redirect the kernel to load that linker.
3210 	 */
3211 
3212 	if (use_alt_dyld) {
3213 		int policy_error;
3214 		uint32_t policy_flags = 0;
3215 		int32_t policy_gencount = 0;
3216 
3217 		policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount);
3218 		if (policy_error == 0) {
3219 			if (policy_flags & PROC_UUID_ALT_DYLD_POLICY) {
3220 				name = dyld_alt_path;
3221 			}
3222 		}
3223 	} else if (use_dyld_suffix) {
3224 		size_t i = 0;
3225 
3226 #define countof(x) (sizeof(x) / sizeof(x[0]))
3227 		for (i = 0; i < countof(_dyld_suffix_map); i++) {
3228 			const dyld_suffix_map_entry_t *entry = &_dyld_suffix_map[i];
3229 
3230 			if (strcmp(entry->suffix, dyld_suffix) == 0) {
3231 				name = entry->path;
3232 				break;
3233 			}
3234 		}
3235 	}
3236 #endif
3237 
3238 #if !(DEVELOPMENT || DEBUG)
3239 	if (0 != strcmp(name, DEFAULT_DYLD_PATH)) {
3240 		return LOAD_BADMACHO;
3241 	}
3242 #endif
3243 
3244 	/* Allocate wad-of-data from heap to reduce excessively deep stacks */
3245 
3246 	dyld_data = kalloc_type(typeof(*dyld_data), Z_WAITOK);
3247 	header = &dyld_data->__header;
3248 	myresult = &dyld_data->__myresult;
3249 	macho_data = &dyld_data->__macho_data;
3250 
3251 	{
3252 		cputype = (cputype & CPU_ARCH_MASK) | (cpu_type() & ~CPU_ARCH_MASK);
3253 	}
3254 
3255 	ret = get_macho_vnode(name, cputype, header,
3256 	    &file_offset, &macho_size, macho_data, &vp, imgp);
3257 	if (ret) {
3258 		goto novp_out;
3259 	}
3260 
3261 	*myresult = load_result_null;
3262 	myresult->is_64bit_addr = result->is_64bit_addr;
3263 	myresult->is_64bit_data = result->is_64bit_data;
3264 
3265 	ret = parse_machfile(vp, map, thread, header, file_offset,
3266 	    macho_size, depth, slide, 0, myresult, result, imgp);
3267 
3268 	if (ret == LOAD_SUCCESS) {
3269 		if (result->threadstate) {
3270 			/* don't use the app's threadstate if we have a dyld */
3271 			kfree_data(result->threadstate, result->threadstate_sz);
3272 		}
3273 		result->threadstate = myresult->threadstate;
3274 		result->threadstate_sz = myresult->threadstate_sz;
3275 
3276 		result->dynlinker = TRUE;
3277 		result->entry_point = myresult->entry_point;
3278 		result->validentry = myresult->validentry;
3279 		result->all_image_info_addr = myresult->all_image_info_addr;
3280 		result->all_image_info_size = myresult->all_image_info_size;
3281 		if (!myresult->platform_binary) {
3282 			result->csflags &= ~CS_NO_UNTRUSTED_HELPERS;
3283 		}
3284 
3285 #if CONFIG_ROSETTA
3286 		if (imgp->ip_flags & IMGPF_ROSETTA) {
3287 			extern const struct fileops vnops;
3288 			// Save the file descriptor and mach header address for dyld. These will
3289 			// be passed on the stack for the Rosetta runtime's use.
3290 			struct fileproc *fp;
3291 			int dyld_fd;
3292 			proc_t p = vfs_context_proc(imgp->ip_vfs_context);
3293 			int error = falloc_exec(p, imgp->ip_vfs_context, &fp, &dyld_fd);
3294 			if (error == 0) {
3295 				error = VNOP_OPEN(vp, FREAD, imgp->ip_vfs_context);
3296 				if (error == 0) {
3297 					fp->fp_glob->fg_flag = FREAD;
3298 					fp->fp_glob->fg_ops = &vnops;
3299 					fp_set_data(fp, vp);
3300 
3301 					proc_fdlock(p);
3302 					procfdtbl_releasefd(p, dyld_fd, NULL);
3303 					fp_drop(p, dyld_fd, fp, 1);
3304 					proc_fdunlock(p);
3305 
3306 					vnode_ref(vp);
3307 
3308 					result->dynlinker_fd = dyld_fd;
3309 					result->dynlinker_fp = fp;
3310 					result->dynlinker_mach_header = myresult->mach_header;
3311 					result->dynlinker_max_vm_addr = myresult->max_vm_addr;
3312 					result->dynlinker_ro_vm_start = myresult->ro_vm_start;
3313 					result->dynlinker_ro_vm_end = myresult->ro_vm_end;
3314 				} else {
3315 					fp_free(p, dyld_fd, fp);
3316 					ret = LOAD_IOERROR;
3317 				}
3318 			} else {
3319 				ret = LOAD_IOERROR;
3320 			}
3321 		}
3322 #endif
3323 	}
3324 
3325 	struct vnode_attr *va;
3326 	va = kalloc_type(struct vnode_attr, Z_WAITOK | Z_ZERO);
3327 	VATTR_INIT(va);
3328 	VATTR_WANTED(va, va_fsid64);
3329 	VATTR_WANTED(va, va_fsid);
3330 	VATTR_WANTED(va, va_fileid);
3331 	int error = vnode_getattr(vp, va, imgp->ip_vfs_context);
3332 	if (error == 0) {
3333 		imgp->ip_dyld_fsid = vnode_get_va_fsid(va);
3334 		imgp->ip_dyld_fsobjid = va->va_fileid;
3335 	}
3336 
3337 	vnode_put(vp);
3338 	kfree_type(struct vnode_attr, va);
3339 novp_out:
3340 	kfree_type(typeof(*dyld_data), dyld_data);
3341 	return ret;
3342 }
3343 
3344 #if CONFIG_ROSETTA
3345 static const char* rosetta_runtime_path = "/usr/libexec/rosetta/runtime";
3346 
3347 #if (DEVELOPMENT || DEBUG)
3348 static const char* rosetta_runtime_path_alt_x86 = "/usr/local/libexec/rosetta/runtime_internal";
3349 static const char* rosetta_runtime_path_alt_arm = "/usr/local/libexec/rosetta/runtime_arm_internal";
3350 #endif
3351 
3352 static load_return_t
load_rosetta(vm_map_t map,thread_t thread,load_result_t * result,struct image_params * imgp)3353 load_rosetta(
3354 	vm_map_t                        map,
3355 	thread_t                        thread,
3356 	load_result_t           *result,
3357 	struct image_params     *imgp)
3358 {
3359 	struct vnode            *vp = NULLVP;   /* set by get_macho_vnode() */
3360 	struct mach_header      *header;
3361 	off_t                   file_offset = 0; /* set by get_macho_vnode() */
3362 	off_t                   macho_size = 0; /* set by get_macho_vnode() */
3363 	load_result_t           *myresult;
3364 	kern_return_t           ret;
3365 	struct macho_data       *macho_data;
3366 	const char              *rosetta_file_path;
3367 	struct {
3368 		struct mach_header      __header;
3369 		load_result_t           __myresult;
3370 		struct macho_data       __macho_data;
3371 	} *rosetta_data;
3372 	mach_vm_address_t rosetta_load_addr;
3373 	mach_vm_size_t    rosetta_size;
3374 	mach_vm_address_t shared_cache_base = SHARED_REGION_BASE_ARM64;
3375 	int64_t           slide = 0;
3376 
3377 	/* Allocate wad-of-data from heap to reduce excessively deep stacks */
3378 	rosetta_data = kalloc_type(typeof(*rosetta_data), Z_WAITOK | Z_NOFAIL);
3379 	header = &rosetta_data->__header;
3380 	myresult = &rosetta_data->__myresult;
3381 	macho_data = &rosetta_data->__macho_data;
3382 
3383 	rosetta_file_path = rosetta_runtime_path;
3384 
3385 #if (DEVELOPMENT || DEBUG)
3386 	bool use_alt_rosetta = false;
3387 	if (imgp->ip_flags & IMGPF_ALT_ROSETTA) {
3388 		use_alt_rosetta = true;
3389 	} else {
3390 		int policy_error;
3391 		uint32_t policy_flags = 0;
3392 		int32_t policy_gencount = 0;
3393 		policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount);
3394 		if (policy_error == 0 && (policy_flags & PROC_UUID_ALT_ROSETTA_POLICY) != 0) {
3395 			use_alt_rosetta = true;
3396 		}
3397 	}
3398 
3399 	if (use_alt_rosetta) {
3400 		if (imgp->ip_origcputype == CPU_TYPE_X86_64) {
3401 			rosetta_file_path = rosetta_runtime_path_alt_x86;
3402 		} else if (imgp->ip_origcputype == CPU_TYPE_ARM64) {
3403 			rosetta_file_path = rosetta_runtime_path_alt_arm;
3404 		} else {
3405 			ret = LOAD_BADARCH;
3406 			goto novp_out;
3407 		}
3408 	}
3409 #endif
3410 
3411 	ret = get_macho_vnode(rosetta_file_path, CPU_TYPE_ARM64, header,
3412 	    &file_offset, &macho_size, macho_data, &vp, imgp);
3413 	if (ret) {
3414 		goto novp_out;
3415 	}
3416 
3417 	*myresult = load_result_null;
3418 	myresult->is_64bit_addr = TRUE;
3419 	myresult->is_64bit_data = TRUE;
3420 
3421 	ret = parse_machfile(vp, NULL, NULL, header, file_offset, macho_size,
3422 	    2, 0, 0, myresult, NULL, imgp);
3423 	if (ret != LOAD_SUCCESS) {
3424 		goto out;
3425 	}
3426 
3427 	if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
3428 		slide = random();
3429 		slide = (slide % (vm_map_get_max_loader_aslr_slide_pages(map) - 1)) + 1;
3430 		slide <<= vm_map_page_shift(map);
3431 	}
3432 
3433 	if (imgp->ip_origcputype == CPU_TYPE_X86_64) {
3434 		shared_cache_base = SHARED_REGION_BASE_X86_64;
3435 	}
3436 
3437 	rosetta_size = round_page(myresult->max_vm_addr - myresult->min_vm_addr);
3438 	rosetta_load_addr = shared_cache_base - rosetta_size - slide;
3439 
3440 	*myresult = load_result_null;
3441 	myresult->is_64bit_addr = TRUE;
3442 	myresult->is_64bit_data = TRUE;
3443 	myresult->is_rosetta = TRUE;
3444 
3445 	ret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
3446 	    2, rosetta_load_addr, 0, myresult, result, imgp);
3447 	if (ret == LOAD_SUCCESS) {
3448 		if (result) {
3449 			if (result->threadstate) {
3450 				/* don't use the app's/dyld's threadstate */
3451 				kfree_data(result->threadstate, result->threadstate_sz);
3452 			}
3453 			assert(myresult->threadstate != NULL);
3454 
3455 			result->is_rosetta = TRUE;
3456 
3457 			result->threadstate = myresult->threadstate;
3458 			result->threadstate_sz = myresult->threadstate_sz;
3459 
3460 			result->entry_point = myresult->entry_point;
3461 			result->validentry = myresult->validentry;
3462 			if (!myresult->platform_binary) {
3463 				result->csflags &= ~CS_NO_UNTRUSTED_HELPERS;
3464 			}
3465 
3466 			if ((header->cpusubtype & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_ARM64E) {
3467 				imgp->ip_flags |= IMGPF_NOJOP;
3468 			}
3469 		}
3470 	}
3471 
3472 out:
3473 	vnode_put(vp);
3474 novp_out:
3475 	kfree_type(typeof(*rosetta_data), rosetta_data);
3476 	return ret;
3477 }
3478 #endif
3479 
3480 static void
set_signature_error(struct vnode * vp,struct image_params * imgp,const char * fatal_failure_desc,const size_t fatal_failure_desc_len)3481 set_signature_error(
3482 	struct vnode* vp,
3483 	struct image_params * imgp,
3484 	const char* fatal_failure_desc,
3485 	const size_t fatal_failure_desc_len)
3486 {
3487 	char *vn_path = NULL;
3488 	vm_size_t vn_pathlen = MAXPATHLEN;
3489 	char const *path = NULL;
3490 
3491 	vn_path = zalloc(ZV_NAMEI);
3492 	if (vn_getpath(vp, vn_path, (int*)&vn_pathlen) == 0) {
3493 		path = vn_path;
3494 	} else {
3495 		path = "(get vnode path failed)";
3496 	}
3497 	os_reason_t reason = os_reason_create(OS_REASON_CODESIGNING,
3498 	    CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG);
3499 
3500 	if (reason == OS_REASON_NULL) {
3501 		printf("load_code_signature: %s: failure to allocate exit reason for validation failure: %s\n",
3502 		    path, fatal_failure_desc);
3503 		goto out;
3504 	}
3505 
3506 	imgp->ip_cs_error = reason;
3507 	reason->osr_flags = (OS_REASON_FLAG_GENERATE_CRASH_REPORT |
3508 	    OS_REASON_FLAG_CONSISTENT_FAILURE);
3509 
3510 	mach_vm_address_t data_addr = 0;
3511 
3512 	int reason_error = 0;
3513 	int kcdata_error = 0;
3514 
3515 	if ((reason_error = os_reason_alloc_buffer_noblock(reason, kcdata_estimate_required_buffer_size
3516 	    (1, (uint32_t)fatal_failure_desc_len))) == 0 &&
3517 	    (kcdata_error = kcdata_get_memory_addr(&reason->osr_kcd_descriptor,
3518 	    EXIT_REASON_USER_DESC, (uint32_t)fatal_failure_desc_len,
3519 	    &data_addr)) == KERN_SUCCESS) {
3520 		kern_return_t mc_error = kcdata_memcpy(&reason->osr_kcd_descriptor, (mach_vm_address_t)data_addr,
3521 		    fatal_failure_desc, (uint32_t)fatal_failure_desc_len);
3522 
3523 		if (mc_error != KERN_SUCCESS) {
3524 			printf("load_code_signature: %s: failed to copy reason string "
3525 			    "(kcdata_memcpy error: %d, length: %lu)\n",
3526 			    path, mc_error, fatal_failure_desc_len);
3527 		}
3528 	} else {
3529 		printf("load_code_signature: %s: failed to allocate space for reason string "
3530 		    "(os_reason_alloc_buffer error: %d, kcdata error: %d, length: %lu)\n",
3531 		    path, reason_error, kcdata_error, fatal_failure_desc_len);
3532 	}
3533 out:
3534 	if (vn_path) {
3535 		zfree(ZV_NAMEI, vn_path);
3536 	}
3537 }
3538 
3539 static load_return_t
load_code_signature(struct linkedit_data_command * lcp,struct vnode * vp,off_t macho_offset,off_t macho_size,cpu_type_t cputype,cpu_subtype_t cpusubtype,load_result_t * result,struct image_params * imgp)3540 load_code_signature(
3541 	struct linkedit_data_command    *lcp,
3542 	struct vnode                    *vp,
3543 	off_t                           macho_offset,
3544 	off_t                           macho_size,
3545 	cpu_type_t                      cputype,
3546 	cpu_subtype_t                   cpusubtype,
3547 	load_result_t                   *result,
3548 	struct image_params             *imgp)
3549 {
3550 	int             ret;
3551 	kern_return_t   kr;
3552 	vm_offset_t     addr;
3553 	int             resid;
3554 	struct cs_blob  *blob;
3555 	int             error;
3556 	vm_size_t       blob_size;
3557 	uint32_t        sum;
3558 	boolean_t               anyCPU;
3559 
3560 	addr = 0;
3561 	blob = NULL;
3562 
3563 	cpusubtype &= ~CPU_SUBTYPE_MASK;
3564 
3565 	blob = ubc_cs_blob_get(vp, cputype, cpusubtype, macho_offset);
3566 
3567 	if (blob != NULL) {
3568 		/* we already have a blob for this vnode and cpu(sub)type */
3569 		anyCPU = blob->csb_cpu_type == -1;
3570 		if ((blob->csb_cpu_type != cputype &&
3571 		    blob->csb_cpu_subtype != cpusubtype && !anyCPU) ||
3572 		    (blob->csb_base_offset != macho_offset) ||
3573 		    ((blob->csb_flags & CS_VALID) == 0)) {
3574 			/* the blob has changed for this vnode: fail ! */
3575 			ret = LOAD_BADMACHO;
3576 			const char* fatal_failure_desc = "embedded signature doesn't match attached signature";
3577 			const size_t fatal_failure_desc_len = strlen(fatal_failure_desc) + 1;
3578 
3579 			printf("load_code_signature: %s\n", fatal_failure_desc);
3580 			set_signature_error(vp, imgp, fatal_failure_desc, fatal_failure_desc_len);
3581 			goto out;
3582 		}
3583 
3584 		/* It matches the blob we want here, let's verify the version */
3585 		if (!anyCPU && ubc_cs_generation_check(vp) == 0) {
3586 			/* No need to revalidate, we're good! */
3587 			ret = LOAD_SUCCESS;
3588 			goto out;
3589 		}
3590 
3591 		/* That blob may be stale, let's revalidate. */
3592 		error = ubc_cs_blob_revalidate(vp, blob, imgp, 0, result->ip_platform);
3593 		if (error == 0) {
3594 			/* Revalidation succeeded, we're good! */
3595 			/* If we were revaliding a CS blob with any CPU arch we adjust it */
3596 			if (anyCPU) {
3597 				vnode_lock_spin(vp);
3598 				struct cs_cpu_info cpu_info = {
3599 					.csb_cpu_type = cputype,
3600 					.csb_cpu_subtype = cpusubtype
3601 				};
3602 				zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_cpu_info, &cpu_info);
3603 				vnode_unlock(vp);
3604 			}
3605 			ret = LOAD_SUCCESS;
3606 			goto out;
3607 		}
3608 
3609 		if (error != EAGAIN) {
3610 			printf("load_code_signature: revalidation failed: %d\n", error);
3611 			ret = LOAD_FAILURE;
3612 			goto out;
3613 		}
3614 
3615 		assert(error == EAGAIN);
3616 
3617 		/*
3618 		 * Revalidation was not possible for this blob. We just continue as if there was no blob,
3619 		 * rereading the signature, and ubc_cs_blob_add will do the right thing.
3620 		 */
3621 		blob = NULL;
3622 	}
3623 
3624 	if (lcp->cmdsize != sizeof(struct linkedit_data_command)) {
3625 		ret = LOAD_BADMACHO;
3626 		goto out;
3627 	}
3628 
3629 	sum = 0;
3630 	if (os_add_overflow(lcp->dataoff, lcp->datasize, &sum) || sum > macho_size) {
3631 		ret = LOAD_BADMACHO;
3632 		goto out;
3633 	}
3634 
3635 	blob_size = lcp->datasize;
3636 	kr = ubc_cs_blob_allocate(&addr, &blob_size);
3637 	if (kr != KERN_SUCCESS) {
3638 		ret = LOAD_NOSPACE;
3639 		goto out;
3640 	}
3641 
3642 	resid = 0;
3643 	error = vn_rdwr(UIO_READ,
3644 	    vp,
3645 	    (caddr_t) addr,
3646 	    lcp->datasize,
3647 	    macho_offset + lcp->dataoff,
3648 	    UIO_SYSSPACE,
3649 	    0,
3650 	    kauth_cred_get(),
3651 	    &resid,
3652 	    current_proc());
3653 	if (error || resid != 0) {
3654 		ret = LOAD_IOERROR;
3655 		goto out;
3656 	}
3657 
3658 	if (ubc_cs_blob_add(vp,
3659 	    result->ip_platform,
3660 	    cputype,
3661 	    cpusubtype,
3662 	    macho_offset,
3663 	    &addr,
3664 	    lcp->datasize,
3665 	    imgp,
3666 	    0,
3667 	    &blob,
3668 	    CS_BLOB_ADD_ALLOW_MAIN_BINARY)) {
3669 		if (addr) {
3670 			ubc_cs_blob_deallocate(addr, blob_size);
3671 			addr = 0;
3672 		}
3673 		ret = LOAD_FAILURE;
3674 		goto out;
3675 	} else {
3676 		/* ubc_cs_blob_add() has consumed "addr" */
3677 		addr = 0;
3678 	}
3679 
3680 #if CHECK_CS_VALIDATION_BITMAP
3681 	ubc_cs_validation_bitmap_allocate( vp );
3682 #endif
3683 
3684 	ret = LOAD_SUCCESS;
3685 out:
3686 	if (ret == LOAD_SUCCESS) {
3687 		if (blob == NULL) {
3688 			panic("success, but no blob!");
3689 		}
3690 
3691 		result->csflags |= blob->csb_flags;
3692 		result->platform_binary = blob->csb_platform_binary;
3693 		result->cs_end_offset = blob->csb_end_offset;
3694 	}
3695 	if (addr != 0) {
3696 		ubc_cs_blob_deallocate(addr, blob_size);
3697 		addr = 0;
3698 	}
3699 
3700 	return ret;
3701 }
3702 
3703 
3704 #if CONFIG_CODE_DECRYPTION
3705 
3706 static load_return_t
set_code_unprotect(struct encryption_info_command * eip,caddr_t addr,vm_map_t map,int64_t slide,struct vnode * vp,off_t macho_offset,cpu_type_t cputype,cpu_subtype_t cpusubtype)3707 set_code_unprotect(
3708 	struct encryption_info_command *eip,
3709 	caddr_t addr,
3710 	vm_map_t map,
3711 	int64_t slide,
3712 	struct vnode *vp,
3713 	off_t macho_offset,
3714 	cpu_type_t cputype,
3715 	cpu_subtype_t cpusubtype)
3716 {
3717 	int error, len;
3718 	pager_crypt_info_t crypt_info;
3719 	const char * cryptname = 0;
3720 	char *vpath;
3721 
3722 	size_t offset;
3723 	struct segment_command_64 *seg64;
3724 	struct segment_command *seg32;
3725 	vm_map_offset_t map_offset, map_size;
3726 	vm_object_offset_t crypto_backing_offset;
3727 	kern_return_t kr;
3728 
3729 	if (eip->cmdsize < sizeof(*eip)) {
3730 		return LOAD_BADMACHO;
3731 	}
3732 
3733 	switch (eip->cryptid) {
3734 	case 0:
3735 		/* not encrypted, just an empty load command */
3736 		return LOAD_SUCCESS;
3737 	case 1:
3738 		cryptname = "com.apple.unfree";
3739 		break;
3740 	case 0x10:
3741 		/* some random cryptid that you could manually put into
3742 		 * your binary if you want NULL */
3743 		cryptname = "com.apple.null";
3744 		break;
3745 	default:
3746 		return LOAD_BADMACHO;
3747 	}
3748 
3749 	if (map == VM_MAP_NULL) {
3750 		return LOAD_SUCCESS;
3751 	}
3752 	if (NULL == text_crypter_create) {
3753 		return LOAD_FAILURE;
3754 	}
3755 
3756 	vpath = zalloc(ZV_NAMEI);
3757 
3758 	len = MAXPATHLEN;
3759 	error = vn_getpath(vp, vpath, &len);
3760 	if (error) {
3761 		zfree(ZV_NAMEI, vpath);
3762 		return LOAD_FAILURE;
3763 	}
3764 
3765 	if (eip->cryptsize == 0) {
3766 		printf("%s:%d '%s': cryptoff 0x%llx cryptsize 0x%llx cryptid 0x%x ignored\n", __FUNCTION__, __LINE__, vpath, (uint64_t)eip->cryptoff, (uint64_t)eip->cryptsize, eip->cryptid);
3767 		zfree(ZV_NAMEI, vpath);
3768 		return LOAD_SUCCESS;
3769 	}
3770 
3771 	/* set up decrypter first */
3772 	crypt_file_data_t crypt_data = {
3773 		.filename = vpath,
3774 		.cputype = cputype,
3775 		.cpusubtype = cpusubtype,
3776 		.origin = CRYPT_ORIGIN_APP_LAUNCH,
3777 	};
3778 	kr = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
3779 #if VM_MAP_DEBUG_APPLE_PROTECT
3780 	if (vm_map_debug_apple_protect) {
3781 		struct proc *p;
3782 		p  = current_proc();
3783 		printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n",
3784 		    proc_getpid(p), p->p_comm, map, __FUNCTION__, vpath, kr);
3785 	}
3786 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
3787 	zfree(ZV_NAMEI, vpath);
3788 
3789 	if (kr) {
3790 		printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
3791 		    cryptname, kr);
3792 		if (kr == kIOReturnNotPrivileged) {
3793 			/* text encryption returned decryption failure */
3794 			return LOAD_DECRYPTFAIL;
3795 		} else {
3796 			return LOAD_RESOURCE;
3797 		}
3798 	}
3799 
3800 	/* this is terrible, but we have to rescan the load commands to find the
3801 	 * virtual address of this encrypted stuff. This code is gonna look like
3802 	 * the dyld source one day... */
3803 	struct mach_header *header = (struct mach_header *)addr;
3804 	size_t mach_header_sz = sizeof(struct mach_header);
3805 	if (header->magic == MH_MAGIC_64 ||
3806 	    header->magic == MH_CIGAM_64) {
3807 		mach_header_sz = sizeof(struct mach_header_64);
3808 	}
3809 	offset = mach_header_sz;
3810 	uint32_t ncmds = header->ncmds;
3811 	while (ncmds--) {
3812 		/*
3813 		 *	Get a pointer to the command.
3814 		 */
3815 		struct load_command *lcp = (struct load_command *)(addr + offset);
3816 		offset += lcp->cmdsize;
3817 
3818 		switch (lcp->cmd) {
3819 		case LC_SEGMENT_64:
3820 			seg64 = (struct segment_command_64 *)lcp;
3821 			if ((seg64->fileoff <= eip->cryptoff) &&
3822 			    (seg64->fileoff + seg64->filesize >=
3823 			    eip->cryptoff + eip->cryptsize)) {
3824 				map_offset = (vm_map_offset_t)(seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide);
3825 				map_size = eip->cryptsize;
3826 				crypto_backing_offset = macho_offset + eip->cryptoff;
3827 				goto remap_now;
3828 			}
3829 			break;
3830 		case LC_SEGMENT:
3831 			seg32 = (struct segment_command *)lcp;
3832 			if ((seg32->fileoff <= eip->cryptoff) &&
3833 			    (seg32->fileoff + seg32->filesize >=
3834 			    eip->cryptoff + eip->cryptsize)) {
3835 				map_offset = (vm_map_offset_t)(seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide);
3836 				map_size = eip->cryptsize;
3837 				crypto_backing_offset = macho_offset + eip->cryptoff;
3838 				goto remap_now;
3839 			}
3840 			break;
3841 		}
3842 	}
3843 
3844 	/* if we get here, did not find anything */
3845 	return LOAD_BADMACHO;
3846 
3847 remap_now:
3848 	/* now remap using the decrypter */
3849 	MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n",
3850 	    (uint64_t) map_offset,
3851 	    (uint64_t) (map_offset + map_size)));
3852 	kr = vm_map_apple_protected(map,
3853 	    map_offset,
3854 	    map_offset + map_size,
3855 	    crypto_backing_offset,
3856 	    &crypt_info,
3857 	    CRYPTID_APP_ENCRYPTION);
3858 	if (kr) {
3859 		printf("set_code_unprotect(): mapping failed with %x\n", kr);
3860 		return LOAD_PROTECT;
3861 	}
3862 
3863 	return LOAD_SUCCESS;
3864 }
3865 
3866 #endif
3867 
3868 /*
3869  * This routine exists to support the load_dylinker().
3870  *
3871  * This routine has its own, separate, understanding of the FAT file format,
3872  * which is terrifically unfortunate.
3873  */
3874 static
3875 load_return_t
get_macho_vnode(const char * path,cpu_type_t cputype,struct mach_header * mach_header,off_t * file_offset,off_t * macho_size,struct macho_data * data,struct vnode ** vpp,struct image_params * imgp)3876 get_macho_vnode(
3877 	const char              *path,
3878 	cpu_type_t              cputype,
3879 	struct mach_header      *mach_header,
3880 	off_t                   *file_offset,
3881 	off_t                   *macho_size,
3882 	struct macho_data       *data,
3883 	struct vnode            **vpp,
3884 	struct image_params     *imgp
3885 	)
3886 {
3887 	struct vnode            *vp;
3888 	vfs_context_t           ctx = vfs_context_current();
3889 	proc_t                  p = vfs_context_proc(ctx);
3890 	kauth_cred_t            kerncred;
3891 	struct nameidata        *ndp = &data->__nid;
3892 	boolean_t               is_fat;
3893 	struct fat_arch         fat_arch;
3894 	int                     error;
3895 	int resid;
3896 	union macho_vnode_header *header = &data->__header;
3897 	off_t fsize = (off_t)0;
3898 
3899 	/*
3900 	 * Capture the kernel credential for use in the actual read of the
3901 	 * file, since the user doing the execution may have execute rights
3902 	 * but not read rights, but to exec something, we have to either map
3903 	 * or read it into the new process address space, which requires
3904 	 * read rights.  This is to deal with lack of common credential
3905 	 * serialization code which would treat NOCRED as "serialize 'root'".
3906 	 */
3907 	kerncred = vfs_context_ucred(vfs_context_kernel());
3908 
3909 	/* init the namei data to point the file user's program name */
3910 	NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
3911 
3912 	if ((error = namei(ndp)) != 0) {
3913 		if (error == ENOENT) {
3914 			error = LOAD_ENOENT;
3915 		} else {
3916 			error = LOAD_FAILURE;
3917 		}
3918 		return error;
3919 	}
3920 	nameidone(ndp);
3921 	vp = ndp->ni_vp;
3922 
3923 	/* check for regular file */
3924 	if (vp->v_type != VREG) {
3925 		error = LOAD_PROTECT;
3926 		goto bad1;
3927 	}
3928 
3929 	/* get size */
3930 	if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
3931 		error = LOAD_FAILURE;
3932 		goto bad1;
3933 	}
3934 
3935 	/* Check mount point */
3936 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
3937 		error = LOAD_PROTECT;
3938 		goto bad1;
3939 	}
3940 
3941 	/* check access */
3942 	if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE | KAUTH_VNODE_READ_DATA, ctx)) != 0) {
3943 		error = LOAD_PROTECT;
3944 		goto bad1;
3945 	}
3946 
3947 	/* try to open it */
3948 	if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
3949 		error = LOAD_PROTECT;
3950 		goto bad1;
3951 	}
3952 
3953 	if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof(*header), 0,
3954 	    UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
3955 		error = LOAD_IOERROR;
3956 		goto bad2;
3957 	}
3958 
3959 	if (resid) {
3960 		error = LOAD_BADMACHO;
3961 		goto bad2;
3962 	}
3963 
3964 	if (header->mach_header.magic == MH_MAGIC ||
3965 	    header->mach_header.magic == MH_MAGIC_64) {
3966 		is_fat = FALSE;
3967 	} else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) {
3968 		is_fat = TRUE;
3969 	} else {
3970 		error = LOAD_BADMACHO;
3971 		goto bad2;
3972 	}
3973 
3974 	if (is_fat) {
3975 		error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header),
3976 		    sizeof(*header), fsize);
3977 		if (error != LOAD_SUCCESS) {
3978 			goto bad2;
3979 		}
3980 
3981 		/* Look up our architecture in the fat file. */
3982 		error = fatfile_getbestarch_for_cputype(cputype, CPU_SUBTYPE_ANY,
3983 		    (vm_offset_t)(&header->fat_header), sizeof(*header), imgp, &fat_arch);
3984 		if (error != LOAD_SUCCESS) {
3985 			goto bad2;
3986 		}
3987 
3988 		/* Read the Mach-O header out of it */
3989 		error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header,
3990 		    sizeof(header->mach_header), fat_arch.offset,
3991 		    UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
3992 		if (error) {
3993 			error = LOAD_IOERROR;
3994 			goto bad2;
3995 		}
3996 
3997 		if (resid) {
3998 			error = LOAD_BADMACHO;
3999 			goto bad2;
4000 		}
4001 
4002 		/* Is this really a Mach-O? */
4003 		if (header->mach_header.magic != MH_MAGIC &&
4004 		    header->mach_header.magic != MH_MAGIC_64) {
4005 			error = LOAD_BADMACHO;
4006 			goto bad2;
4007 		}
4008 
4009 		*file_offset = fat_arch.offset;
4010 		*macho_size = fat_arch.size;
4011 	} else {
4012 		/*
4013 		 * Force get_macho_vnode() to fail if the architecture bits
4014 		 * do not match the expected architecture bits.  This in
4015 		 * turn causes load_dylinker() to fail for the same reason,
4016 		 * so it ensures the dynamic linker and the binary are in
4017 		 * lock-step.  This is potentially bad, if we ever add to
4018 		 * the CPU_ARCH_* bits any bits that are desirable but not
4019 		 * required, since the dynamic linker might work, but we will
4020 		 * refuse to load it because of this check.
4021 		 */
4022 		if ((cpu_type_t)header->mach_header.cputype != cputype) {
4023 			error = LOAD_BADARCH;
4024 			goto bad2;
4025 		}
4026 
4027 		*file_offset = 0;
4028 		*macho_size = fsize;
4029 	}
4030 
4031 	*mach_header = header->mach_header;
4032 	*vpp = vp;
4033 
4034 	ubc_setsize(vp, fsize);
4035 	return error;
4036 
4037 bad2:
4038 	(void) VNOP_CLOSE(vp, FREAD, ctx);
4039 bad1:
4040 	vnode_put(vp);
4041 	return error;
4042 }
4043