xref: /xnu-10002.61.3/libkern/kxld/kxld_util.c (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <stdarg.h>
29 #include <string.h>
30 #include <mach-o/loader.h>
31 #include <mach-o/nlist.h>
32 #include <mach-o/reloc.h>
33 #if KERNEL
34     #include <kern/kalloc.h>
35     #include <libkern/libkern.h>
36     #include <mach/vm_param.h>
37     #include <vm/vm_kern.h>
38 #else
39     #include <stdio.h>
40     #define __SPI_AVAILABLE(...)
41     #include <stdlib.h>
42     #include <mach/mach_init.h>
43     #include <mach-o/swap.h>
44 #endif
45 
46 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
47 #include <AssertMacros.h>
48 
49 #include "kxld_util.h"
50 
51 /* swap_ functions are deprecated */
52 #pragma clang diagnostic push
53 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
54 
55 #if !KERNEL
56 static void unswap_macho_32(u_char *file, enum NXByteOrder host_order,
57     enum NXByteOrder target_order);
58 static void unswap_macho_64(u_char *file, enum NXByteOrder host_order,
59     enum NXByteOrder target_order);
60 #endif /* !KERNEL */
61 
62 #if DEBUG
63 static unsigned long num_allocations = 0;
64 static unsigned long num_frees = 0;
65 static unsigned long bytes_allocated = 0;
66 static unsigned long bytes_freed = 0;
67 #endif
68 
69 static KXLDLoggingCallback s_logging_callback = NULL;
70 static char s_callback_name[64] = "internal";
71 static void *s_callback_data = NULL;
72 
73 #if !KERNEL
74 static boolean_t s_cross_link_enabled  = FALSE;
75 /* Can't use PAGE_SIZE here because it is not a compile-time constant.
76  * However from inspection below, s_cross_link_page_size is not used
77  * unless s_cross_link_enabled is TRUE, and s_cross_link_enabled is
78  * only set to TRUE when a client specifies the value. So the
79  * default should never be used in practice,
80  */
81 static kxld_size_t s_cross_link_page_size;
82 #endif
83 
84 
85 /*******************************************************************************
86 *******************************************************************************/
87 void
kxld_set_logging_callback(KXLDLoggingCallback logging_callback)88 kxld_set_logging_callback(KXLDLoggingCallback logging_callback)
89 {
90 	s_logging_callback = logging_callback;
91 }
92 
93 /*******************************************************************************
94 *******************************************************************************/
95 void
kxld_set_logging_callback_data(const char * name,void * user_data)96 kxld_set_logging_callback_data(const char *name, void *user_data)
97 {
98 	if (name) {
99 		(void)strlcpy(s_callback_name, name, sizeof(s_callback_name));
100 		/* disallow format strings in the kxld logging callback name */
101 		for (size_t i = 0; i < sizeof(s_callback_name); i++) {
102 			if (s_callback_name[i] == '%') {
103 				s_callback_name[i] = '.';
104 			}
105 		}
106 	} else {
107 		(void)strlcpy(s_callback_name, "internal", sizeof(s_callback_name));
108 	}
109 
110 	s_callback_data = user_data;
111 }
112 
113 /*******************************************************************************
114 *******************************************************************************/
115 void
kxld_log(KXLDLogSubsystem subsystem,KXLDLogLevel level,const char * in_format,...)116 kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level,
117     const char *in_format, ...)
118 {
119 	char stack_buffer[256];
120 	char *alloc_buffer = NULL;
121 	char *format = stack_buffer;
122 	u_int length = 0;
123 	va_list ap;
124 
125 	if (s_logging_callback) {
126 		length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s",
127 		    s_callback_name, in_format);
128 
129 		if (length >= sizeof(stack_buffer)) {
130 			length += 1;
131 			alloc_buffer = kxld_alloc(length);
132 			if (!alloc_buffer) {
133 				return;
134 			}
135 
136 			snprintf(alloc_buffer, length, "kxld[%s]: %s",
137 			    s_callback_name, in_format);
138 			format = alloc_buffer;
139 		}
140 
141 		va_start(ap, in_format);
142 		s_logging_callback(subsystem, level, format, ap, s_callback_data);
143 		va_end(ap);
144 
145 		if (alloc_buffer) {
146 			kxld_free(alloc_buffer, length);
147 		}
148 	}
149 }
150 
151 /* We'll use kalloc for any page-based allocations under this threshold, and
152  * kmem_alloc otherwise.
153  */
154 #define KALLOC_MAX 16 * 1024
155 
156 /*******************************************************************************
157 *******************************************************************************/
158 void *
kxld_calloc(size_t size)159 kxld_calloc(size_t size)
160 {
161 	void * ptr = NULL;
162 
163 #if KERNEL
164 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
165 	    VM_KERN_MEMORY_OSKEXT);
166 #else
167 	ptr = calloc(1, size);
168 #endif
169 
170 #if DEBUG
171 	if (ptr) {
172 		++num_allocations;
173 		bytes_allocated += size;
174 	}
175 #endif
176 
177 	return ptr;
178 }
179 
180 void *
kxld_alloc(size_t size)181 kxld_alloc(size_t size)
182 {
183 	void * ptr = NULL;
184 
185 #if KERNEL
186 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
187 	    VM_KERN_MEMORY_OSKEXT);
188 #else
189 	ptr = malloc(size);
190 #endif
191 
192 #if DEBUG
193 	if (ptr) {
194 		++num_allocations;
195 		bytes_allocated += size;
196 	}
197 #endif
198 
199 	return ptr;
200 }
201 
202 /*******************************************************************************
203 *******************************************************************************/
204 void *
kxld_page_alloc_untracked(size_t size)205 kxld_page_alloc_untracked(size_t size)
206 {
207 	void * ptr = NULL;
208 
209 	size = round_page(size);
210 
211 #if KERNEL
212 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
213 	    VM_KERN_MEMORY_OSKEXT);
214 #else /* !KERNEL */
215 	ptr = calloc(1, size);
216 #endif /* KERNEL */
217 
218 	return ptr;
219 }
220 
221 /*******************************************************************************
222 *******************************************************************************/
223 void *
kxld_page_alloc(size_t size)224 kxld_page_alloc(size_t size)
225 {
226 	void * ptr = NULL;
227 
228 	ptr = kxld_page_alloc_untracked(size);
229 #if DEBUG
230 	if (ptr) {
231 		++num_allocations;
232 		bytes_allocated += round_page(size);
233 	}
234 #endif /* DEBUG */
235 
236 	return ptr;
237 }
238 
239 /*******************************************************************************
240 *******************************************************************************/
241 void
kxld_free(void * ptr,size_t size __unused)242 kxld_free(void *ptr, size_t size __unused)
243 {
244 #if DEBUG
245 	++num_frees;
246 	bytes_freed += size;
247 #endif
248 
249 #if KERNEL
250 	kheap_free(KHEAP_DEFAULT, ptr, size);
251 #else
252 	free(ptr);
253 #endif
254 }
255 
256 /*******************************************************************************
257 *******************************************************************************/
258 void
kxld_page_free_untracked(void * ptr,size_t size __unused)259 kxld_page_free_untracked(void *ptr, size_t size __unused)
260 {
261 #if KERNEL
262 	kheap_free(KHEAP_DEFAULT, ptr, round_page(size));
263 #else /* !KERNEL */
264 	free(ptr);
265 #endif /* KERNEL */
266 }
267 
268 
269 /*******************************************************************************
270 *******************************************************************************/
271 void
kxld_page_free(void * ptr,size_t size)272 kxld_page_free(void *ptr, size_t size)
273 {
274 #if DEBUG
275 	++num_frees;
276 	bytes_freed += round_page(size);
277 #endif /* DEBUG */
278 	kxld_page_free_untracked(ptr, size);
279 }
280 
281 /*******************************************************************************
282 *******************************************************************************/
283 kern_return_t
validate_and_swap_macho_32(u_char * file,u_long size,enum NXByteOrder host_order)284 validate_and_swap_macho_32(u_char *file, u_long size
285 #if !KERNEL
286     , enum NXByteOrder host_order
287 #endif /* !KERNEL */
288     )
289 {
290 	kern_return_t rval = KERN_FAILURE;
291 	struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
292 	struct load_command *load_hdr = NULL;
293 	struct segment_command *seg_hdr = NULL;
294 	struct section *sects = NULL;
295 	struct relocation_info *relocs = NULL;
296 	struct symtab_command *symtab_hdr = NULL;
297 	struct nlist *symtab = NULL;
298 	u_long offset = 0;
299 	u_int cmd = 0;
300 	u_int cmdsize = 0;
301 	u_int i = 0;
302 	u_int j = 0;
303 #if !KERNEL
304 	boolean_t swap = FALSE;
305 #endif /* !KERNEL */
306 
307 	check(file);
308 	check(size);
309 
310 	/* Verify that the file is big enough for the mach header */
311 	require_action(size >= sizeof(*mach_hdr), finish,
312 	    rval = KERN_FAILURE;
313 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
314 	offset = sizeof(*mach_hdr);
315 
316 #if !KERNEL
317 	/* Swap the mach header if necessary */
318 	if (mach_hdr->magic == MH_CIGAM) {
319 		swap = TRUE;
320 		(void) swap_mach_header(mach_hdr, host_order);
321 	}
322 #endif /* !KERNEL */
323 
324 	/* Validate the mach_header's magic number */
325 	require_action(mach_hdr->magic == MH_MAGIC, finish,
326 	    rval = KERN_FAILURE;
327 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
328 	    "Invalid magic number: 0x%x.", mach_hdr->magic));
329 
330 	/* If in the running kernel, and asked to validate the kernel
331 	 * (which is the only file of type MH_EXECUTE we should ever see),
332 	 * then just assume it's ok or we wouldn't be running to begin with.
333 	 */
334 #if KERNEL
335 	if (mach_hdr->filetype == MH_EXECUTE) {
336 		rval = KERN_SUCCESS;
337 		goto finish;
338 	}
339 #endif /* KERNEL */
340 
341 	/* Validate and potentially swap the load commands */
342 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
343 		/* Get the load command and size */
344 		load_hdr = (struct load_command *) ((void *) (file + offset));
345 		cmd = load_hdr->cmd;
346 		cmdsize = load_hdr->cmdsize;
347 
348 #if !KERNEL
349 		if (swap) {
350 			cmd = OSSwapInt32(load_hdr->cmd);
351 			cmdsize = OSSwapInt32(load_hdr->cmdsize);
352 		}
353 #endif /* !KERNEL */
354 
355 		/* Verify that the file is big enough to contain the load command */
356 		require_action(size >= offset + cmdsize, finish,
357 		    rval = KERN_FAILURE;
358 		    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
359 
360 		switch (cmd) {
361 		case LC_SEGMENT:
362 			/* Get and swap the segment header */
363 			seg_hdr = (struct segment_command *) load_hdr;
364 #if !KERNEL
365 			if (swap) {
366 				swap_segment_command(seg_hdr, host_order);
367 			}
368 #endif /* !KERNEL */
369 
370 			/* Get and swap the section headers */
371 			sects = (struct section *) &seg_hdr[1];
372 #if !KERNEL
373 			if (swap) {
374 				swap_section(sects, seg_hdr->nsects, host_order);
375 			}
376 #endif /* !KERNEL */
377 
378 			/* Ignore segments with no vm size */
379 			if (!seg_hdr->vmsize) {
380 				continue;
381 			}
382 
383 			/* Verify that the file is big enough for the segment data. */
384 			require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
385 			    rval = KERN_FAILURE;
386 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
387 
388 			for (j = 0; j < seg_hdr->nsects; ++j) {
389 				/* Verify that, if the section is not to be zero filled on
390 				 * demand, that file is big enough for the section's data.
391 				 */
392 				require_action((sects[j].flags & S_ZEROFILL) ||
393 				    (size >= sects[j].offset + sects[j].size), finish,
394 				    rval = KERN_FAILURE;
395 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
396 
397 				/* Verify that the file is big enough for the section's
398 				 * relocation entries.
399 				 */
400 				require_action(size >=
401 				    sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
402 				    rval = KERN_FAILURE;
403 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
404 
405 				/* Swap the relocation entries */
406 				relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
407 #if !KERNEL
408 				if (swap) {
409 					swap_relocation_info(relocs, sects[j].nreloc,
410 					    host_order);
411 				}
412 #endif /* !KERNEL */
413 			}
414 
415 			break;
416 		case LC_SYMTAB:
417 			/* Get and swap the symtab header */
418 			symtab_hdr = (struct symtab_command *) load_hdr;
419 #if !KERNEL
420 			if (swap) {
421 				swap_symtab_command(symtab_hdr, host_order);
422 			}
423 #endif /* !KERNEL */
424 
425 			/* Verify that the file is big enough for the symbol table */
426 			require_action(size >=
427 			    symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
428 			    rval = KERN_FAILURE;
429 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
430 
431 			/* Verify that the file is big enough for the string table */
432 			require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
433 			    rval = KERN_FAILURE;
434 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
435 
436 #if !KERNEL
437 			/* Swap the symbol table entries */
438 			symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff));
439 			if (swap) {
440 				swap_nlist(symtab, symtab_hdr->nsyms, host_order);
441 			}
442 #endif /* !KERNEL */
443 
444 			break;
445 		default:
446 #if !KERNEL
447 			/* Swap the load command */
448 			if (swap) {
449 				swap_load_command(load_hdr, host_order);
450 			}
451 #endif /* !KERNEL */
452 			break;
453 		}
454 	}
455 
456 	rval = KERN_SUCCESS;
457 
458 finish:
459 	return rval;
460 }
461 
462 /*******************************************************************************
463 *******************************************************************************/
464 kern_return_t
validate_and_swap_macho_64(u_char * file,u_long size,enum NXByteOrder host_order)465 validate_and_swap_macho_64(u_char *file, u_long size
466 #if !KERNEL
467     , enum NXByteOrder host_order
468 #endif /* !KERNEL */
469     )
470 {
471 	kern_return_t rval = KERN_FAILURE;
472 	struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
473 	struct load_command *load_hdr = NULL;
474 	struct segment_command_64 *seg_hdr = NULL;
475 	struct section_64 *sects = NULL;
476 	struct relocation_info *relocs = NULL;
477 	struct symtab_command *symtab_hdr = NULL;
478 	struct nlist_64 *symtab = NULL;
479 	u_long offset = 0;
480 	u_int cmd = 0;
481 	u_int cmdsize = 0;
482 	u_int i = 0;
483 	u_int j = 0;
484 #if !KERNEL
485 	boolean_t swap = FALSE;
486 #endif /* !KERNEL */
487 
488 	check(file);
489 	check(size);
490 
491 	/* Verify that the file is big enough for the mach header */
492 	require_action(size >= sizeof(*mach_hdr), finish,
493 	    rval = KERN_FAILURE;
494 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
495 	offset = sizeof(*mach_hdr);
496 
497 #if !KERNEL
498 	/* Swap the mach header if necessary */
499 	if (mach_hdr->magic == MH_CIGAM_64) {
500 		swap = TRUE;
501 		(void) swap_mach_header_64(mach_hdr, host_order);
502 	}
503 #endif /* !KERNEL */
504 
505 	/* Validate the mach_header's magic number */
506 	require_action(mach_hdr->magic == MH_MAGIC_64, finish,
507 	    rval = KERN_FAILURE;
508 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
509 	    "Invalid magic number: 0x%x.", mach_hdr->magic));
510 
511 	/* If in the running kernel, and asked to validate the kernel
512 	 * (which is the only file of type MH_EXECUTE we should ever see),
513 	 * then just assume it's ok or we wouldn't be running to begin with.
514 	 */
515 #if KERNEL
516 	if (mach_hdr->filetype == MH_EXECUTE) {
517 		rval = KERN_SUCCESS;
518 		goto finish;
519 	}
520 #endif /* KERNEL */
521 
522 	/* Validate and potentially swap the load commands */
523 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
524 		/* Get the load command and size */
525 		load_hdr = (struct load_command *) ((void *) (file + offset));
526 		cmd = load_hdr->cmd;
527 		cmdsize = load_hdr->cmdsize;
528 
529 #if !KERNEL
530 		if (swap) {
531 			cmd = OSSwapInt32(load_hdr->cmd);
532 			cmdsize = OSSwapInt32(load_hdr->cmdsize);
533 		}
534 #endif /* !KERNEL */
535 
536 		/* Verify that the file is big enough to contain the load command */
537 		require_action(size >= offset + cmdsize, finish,
538 		    rval = KERN_FAILURE;
539 		    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
540 		switch (cmd) {
541 		case LC_SEGMENT_64:
542 			/* Get and swap the segment header */
543 			seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
544 #if !KERNEL
545 			if (swap) {
546 				swap_segment_command_64(seg_hdr, host_order);
547 			}
548 #endif /* !KERNEL */
549 
550 			/* Get and swap the section headers */
551 			sects = (struct section_64 *) &seg_hdr[1];
552 #if !KERNEL
553 			if (swap) {
554 				swap_section_64(sects, seg_hdr->nsects, host_order);
555 			}
556 #endif /* !KERNEL */
557 
558 			/* If the segment has no vm footprint, skip it */
559 			if (!seg_hdr->vmsize) {
560 				continue;
561 			}
562 
563 			/* Verify that the file is big enough for the segment data. */
564 			require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
565 			    rval = KERN_FAILURE;
566 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
567 
568 			for (j = 0; j < seg_hdr->nsects; ++j) {
569 				/* Verify that, if the section is not to be zero filled on
570 				 * demand, that file is big enough for the section's data.
571 				 */
572 				require_action((sects[j].flags & S_ZEROFILL) ||
573 				    (size >= sects[j].offset + sects[j].size), finish,
574 				    rval = KERN_FAILURE;
575 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
576 
577 				/* Verify that the file is big enough for the section's
578 				 * relocation entries.
579 				 */
580 				require_action(size >=
581 				    sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
582 				    rval = KERN_FAILURE;
583 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
584 
585 				/* Swap the relocation entries */
586 				relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
587 #if !KERNEL
588 				if (swap) {
589 					swap_relocation_info(relocs, sects[j].nreloc,
590 					    host_order);
591 				}
592 #endif /* !KERNEL */
593 			}
594 
595 			break;
596 		case LC_SYMTAB:
597 			/* Get and swap the symtab header */
598 			symtab_hdr = (struct symtab_command *) load_hdr;
599 #if !KERNEL
600 			if (swap) {
601 				swap_symtab_command(symtab_hdr, host_order);
602 			}
603 #endif /* !KERNEL */
604 
605 			/* Verify that the file is big enough for the symbol table */
606 			require_action(size >=
607 			    symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
608 			    rval = KERN_FAILURE;
609 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
610 
611 			/* Verify that the file is big enough for the string table */
612 			require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
613 			    rval = KERN_FAILURE;
614 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
615 
616 #if !KERNEL
617 			/* Swap the symbol table entries */
618 			symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
619 			if (swap) {
620 				swap_nlist_64(symtab, symtab_hdr->nsyms, host_order);
621 			}
622 #endif /* !KERNEL */
623 
624 			break;
625 		default:
626 #if !KERNEL
627 			/* Swap the load command */
628 			if (swap) {
629 				swap_load_command(load_hdr, host_order);
630 			}
631 #endif /* !KERNEL */
632 			break;
633 		}
634 	}
635 
636 	rval = KERN_SUCCESS;
637 
638 finish:
639 	return rval;
640 }
641 
642 #if !KERNEL
643 /*******************************************************************************
644 *******************************************************************************/
645 void
unswap_macho(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)646 unswap_macho(u_char *file, enum NXByteOrder host_order,
647     enum NXByteOrder target_order)
648 {
649 	struct mach_header *hdr = (struct mach_header *) ((void *) file);
650 
651 	if (!hdr) {
652 		return;
653 	}
654 
655 	if (hdr->magic == MH_MAGIC) {
656 		unswap_macho_32(file, host_order, target_order);
657 	} else if (hdr->magic == MH_MAGIC_64) {
658 		unswap_macho_64(file, host_order, target_order);
659 	}
660 }
661 
662 /*******************************************************************************
663 *******************************************************************************/
664 static void
unswap_macho_32(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)665 unswap_macho_32(u_char *file, enum NXByteOrder host_order,
666     enum NXByteOrder target_order)
667 {
668 	struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
669 	struct load_command *load_hdr = NULL;
670 	struct segment_command *seg_hdr = NULL;
671 	struct section *sects = NULL;
672 	struct symtab_command *symtab_hdr = NULL;
673 	struct nlist *symtab = NULL;
674 	u_long offset = 0;
675 	u_int cmd = 0;
676 	u_int size = 0;
677 	u_int i = 0;
678 
679 	check(file);
680 
681 	if (target_order == host_order) {
682 		return;
683 	}
684 
685 	offset = sizeof(*mach_hdr);
686 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
687 		load_hdr = (struct load_command *) ((void *) (file + offset));
688 		cmd = load_hdr->cmd;
689 		size = load_hdr->cmdsize;
690 
691 		switch (cmd) {
692 		case LC_SEGMENT:
693 			seg_hdr = (struct segment_command *) load_hdr;
694 			sects = (struct section *) &seg_hdr[1];
695 
696 			/* We don't need to unswap relocations because this function is
697 			 * called when linking is completed (so there are no relocations).
698 			 */
699 
700 			swap_section(sects, seg_hdr->nsects, target_order);
701 			swap_segment_command(seg_hdr, target_order);
702 			break;
703 		case LC_SYMTAB:
704 			symtab_hdr = (struct symtab_command *) load_hdr;
705 			symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff));
706 
707 			swap_nlist(symtab, symtab_hdr->nsyms, target_order);
708 			swap_symtab_command(symtab_hdr, target_order);
709 
710 			break;
711 		default:
712 			swap_load_command(load_hdr, target_order);
713 			break;
714 		}
715 	}
716 
717 	(void) swap_mach_header(mach_hdr, target_order);
718 }
719 
720 /*******************************************************************************
721 *******************************************************************************/
722 static void
unswap_macho_64(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)723 unswap_macho_64(u_char *file, enum NXByteOrder host_order,
724     enum NXByteOrder target_order)
725 {
726 	struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
727 	struct load_command *load_hdr = NULL;
728 	struct segment_command_64 *seg_hdr = NULL;
729 	struct section_64 *sects = NULL;
730 	struct symtab_command *symtab_hdr = NULL;
731 	struct nlist_64 *symtab = NULL;
732 	u_long offset = 0;
733 	u_int cmd = 0;
734 	u_int size = 0;
735 	u_int i = 0;
736 
737 	check(file);
738 
739 	if (target_order == host_order) {
740 		return;
741 	}
742 
743 	offset = sizeof(*mach_hdr);
744 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
745 		load_hdr = (struct load_command *) ((void *) (file + offset));
746 		cmd = load_hdr->cmd;
747 		size = load_hdr->cmdsize;
748 
749 		switch (cmd) {
750 		case LC_SEGMENT_64:
751 			seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
752 			sects = (struct section_64 *) &seg_hdr[1];
753 
754 			/* We don't need to unswap relocations because this function is
755 			 * called when linking is completed (so there are no relocations).
756 			 */
757 
758 			swap_section_64(sects, seg_hdr->nsects, target_order);
759 			swap_segment_command_64(seg_hdr, target_order);
760 			break;
761 		case LC_SYMTAB:
762 			symtab_hdr = (struct symtab_command *) load_hdr;
763 			symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
764 
765 			swap_nlist_64(symtab, symtab_hdr->nsyms, target_order);
766 			swap_symtab_command(symtab_hdr, target_order);
767 
768 			break;
769 		default:
770 			swap_load_command(load_hdr, target_order);
771 			break;
772 		}
773 	}
774 
775 	(void) swap_mach_header_64(mach_hdr, target_order);
776 }
777 #endif /* !KERNEL */
778 
779 /*******************************************************************************
780 *******************************************************************************/
781 kxld_addr_t
kxld_align_address(kxld_addr_t address,u_int align)782 kxld_align_address(kxld_addr_t address, u_int align)
783 {
784 	kxld_addr_t alignment = (1 << align);
785 	kxld_addr_t low_bits = 0;
786 
787 	if (!align) {
788 		return address;
789 	}
790 
791 	low_bits = (address) & (alignment - 1);
792 	if (low_bits) {
793 		address += (alignment - low_bits);
794 	}
795 
796 	return address;
797 }
798 
799 /*******************************************************************************
800 *******************************************************************************/
801 boolean_t
kxld_is_32_bit(cpu_type_t cputype)802 kxld_is_32_bit(cpu_type_t cputype)
803 {
804 	return !(cputype & CPU_ARCH_ABI64);
805 }
806 
807 /*******************************************************************************
808 *******************************************************************************/
809 void
kxld_print_memory_report(void)810 kxld_print_memory_report(void)
811 {
812 #if DEBUG
813 	kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n"
814 	    "\tNumber of allocations:   %8lu\n"
815 	    "\tNumber of frees:         %8lu\n"
816 	    "\tAverage allocation size: %8lu\n"
817 	    "\tTotal bytes allocated:   %8lu\n"
818 	    "\tTotal bytes freed:       %8lu\n"
819 	    "\tTotal bytes leaked:      %8lu",
820 	    num_allocations, num_frees, bytes_allocated / num_allocations,
821 	    bytes_allocated, bytes_freed, bytes_allocated - bytes_freed);
822 #endif
823 }
824 
825 /*********************************************************************
826 *********************************************************************/
827 #if !KERNEL
828 boolean_t
kxld_set_cross_link_page_size(kxld_size_t target_page_size)829 kxld_set_cross_link_page_size(kxld_size_t target_page_size)
830 {
831 	// verify radix 2
832 	if ((target_page_size != 0) &&
833 	    ((target_page_size & (target_page_size - 1)) == 0)) {
834 		s_cross_link_enabled = TRUE;
835 		s_cross_link_page_size = target_page_size;
836 
837 		return TRUE;
838 	} else {
839 		return FALSE;
840 	}
841 }
842 #endif /* !KERNEL */
843 
844 /*********************************************************************
845 *********************************************************************/
846 kxld_size_t
kxld_get_effective_page_size(void)847 kxld_get_effective_page_size(void)
848 {
849 #if KERNEL
850 	return PAGE_SIZE;
851 #else
852 	if (s_cross_link_enabled) {
853 		return s_cross_link_page_size;
854 	} else {
855 		return PAGE_SIZE;
856 	}
857 #endif /* KERNEL */
858 }
859 
860 /*********************************************************************
861 *********************************************************************/
862 kxld_addr_t
kxld_round_page_cross_safe(kxld_addr_t offset)863 kxld_round_page_cross_safe(kxld_addr_t offset)
864 {
865 #if KERNEL
866 	return round_page(offset);
867 #else
868 	// assume s_cross_link_page_size is power of 2
869 	if (s_cross_link_enabled) {
870 		return (offset + (s_cross_link_page_size - 1)) &
871 		       (~(s_cross_link_page_size - 1));
872 	} else {
873 		return round_page(offset);
874 	}
875 #endif /* KERNEL */
876 }
877 
878 #if SPLIT_KEXTS_DEBUG
879 
880 void
kxld_show_split_info(splitKextLinkInfo * info)881 kxld_show_split_info(splitKextLinkInfo *info)
882 {
883 	kxld_log(kKxldLogLinking, kKxldLogErr,
884 	    "splitKextLinkInfo: \n"
885 	    "kextExecutable %p to %p kextSize %lu \n"
886 	    "linkedKext %p to %p linkedKextSize %lu \n"
887 	    "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p "
888 	    "vmaddr_DATA %p vmaddr_DATA_CONST %p "
889 	    "vmaddr_LLVM_COV %p vmaddr_LINKEDIT %p",
890 	    (void *) info->kextExecutable,
891 	    (void *) (info->kextExecutable + info->kextSize),
892 	    info->kextSize,
893 	    (void*) info->linkedKext,
894 	    (void*) (info->linkedKext + info->linkedKextSize),
895 	    info->linkedKextSize,
896 	    (void *) info->vmaddr_TEXT,
897 	    (void *) info->vmaddr_TEXT_EXEC,
898 	    (void *) info->vmaddr_DATA,
899 	    (void *) info->vmaddr_DATA_CONST,
900 	    (void *) info->vmaddr_LLVM_COV,
901 	    (void *) info->vmaddr_LINKEDIT);
902 }
903 
904 boolean_t
isTargetKextName(const char * the_name)905 isTargetKextName(const char * the_name)
906 {
907 	if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) {
908 		return TRUE;
909 	}
910 	return FALSE;
911 }
912 #endif
913 
914 #pragma clang diagnostic pop
915