xref: /xnu-11215.41.3/libkern/kxld/kxld_util.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <stdarg.h>
29 #include <string.h>
30 #include <mach-o/loader.h>
31 #include <mach-o/nlist.h>
32 #include <mach-o/reloc.h>
33 #if KERNEL
34     #include <kern/kalloc.h>
35     #include <libkern/libkern.h>
36     #include <mach/vm_param.h>
37     #include <vm/vm_kern.h>
38 #else
39     #include <stdio.h>
40     #include <stdlib.h>
41     #include <mach/mach_init.h>
42     #include <mach-o/swap.h>
43 #endif
44 
45 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
46 #include <AssertMacros.h>
47 
48 #include "kxld_util.h"
49 
50 /* swap_ functions are deprecated */
51 #pragma clang diagnostic push
52 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
53 
54 #if !KERNEL
55 static void unswap_macho_32(u_char *file, enum NXByteOrder host_order,
56     enum NXByteOrder target_order);
57 static void unswap_macho_64(u_char *file, enum NXByteOrder host_order,
58     enum NXByteOrder target_order);
59 #endif /* !KERNEL */
60 
61 #if DEBUG
62 static unsigned long num_allocations = 0;
63 static unsigned long num_frees = 0;
64 static unsigned long bytes_allocated = 0;
65 static unsigned long bytes_freed = 0;
66 #endif
67 
68 static KXLDLoggingCallback s_logging_callback = NULL;
69 static char s_callback_name[64] = "internal";
70 static void *s_callback_data = NULL;
71 
72 #if !KERNEL
73 static boolean_t s_cross_link_enabled  = FALSE;
74 /* Can't use PAGE_SIZE here because it is not a compile-time constant.
75  * However from inspection below, s_cross_link_page_size is not used
76  * unless s_cross_link_enabled is TRUE, and s_cross_link_enabled is
77  * only set to TRUE when a client specifies the value. So the
78  * default should never be used in practice,
79  */
80 static kxld_size_t s_cross_link_page_size;
81 #endif
82 
83 
84 /*******************************************************************************
85 *******************************************************************************/
86 void
kxld_set_logging_callback(KXLDLoggingCallback logging_callback)87 kxld_set_logging_callback(KXLDLoggingCallback logging_callback)
88 {
89 	s_logging_callback = logging_callback;
90 }
91 
92 /*******************************************************************************
93 *******************************************************************************/
94 void
kxld_set_logging_callback_data(const char * name,void * user_data)95 kxld_set_logging_callback_data(const char *name, void *user_data)
96 {
97 	if (name) {
98 		(void)strlcpy(s_callback_name, name, sizeof(s_callback_name));
99 		/* disallow format strings in the kxld logging callback name */
100 		for (size_t i = 0; i < sizeof(s_callback_name); i++) {
101 			if (s_callback_name[i] == '%') {
102 				s_callback_name[i] = '.';
103 			}
104 		}
105 	} else {
106 		(void)strlcpy(s_callback_name, "internal", sizeof(s_callback_name));
107 	}
108 
109 	s_callback_data = user_data;
110 }
111 
112 /*******************************************************************************
113 *******************************************************************************/
114 void
kxld_log(KXLDLogSubsystem subsystem,KXLDLogLevel level,const char * in_format,...)115 kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level,
116     const char *in_format, ...)
117 {
118 	char stack_buffer[256];
119 	char *alloc_buffer = NULL;
120 	char *format = stack_buffer;
121 	u_int length = 0;
122 	va_list ap;
123 
124 	if (s_logging_callback) {
125 		length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s",
126 		    s_callback_name, in_format);
127 
128 		if (length >= sizeof(stack_buffer)) {
129 			length += 1;
130 			alloc_buffer = kxld_alloc(length);
131 			if (!alloc_buffer) {
132 				return;
133 			}
134 
135 			snprintf(alloc_buffer, length, "kxld[%s]: %s",
136 			    s_callback_name, in_format);
137 			format = alloc_buffer;
138 		}
139 
140 		va_start(ap, in_format);
141 		s_logging_callback(subsystem, level, format, ap, s_callback_data);
142 		va_end(ap);
143 
144 		if (alloc_buffer) {
145 			kxld_free(alloc_buffer, length);
146 		}
147 	}
148 }
149 
150 /* We'll use kalloc for any page-based allocations under this threshold, and
151  * kmem_alloc otherwise.
152  */
153 #define KALLOC_MAX 16 * 1024
154 
155 /*******************************************************************************
156 *******************************************************************************/
157 void *
kxld_calloc(size_t size)158 kxld_calloc(size_t size)
159 {
160 	void * ptr = NULL;
161 
162 #if KERNEL
163 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
164 	    VM_KERN_MEMORY_OSKEXT);
165 #else
166 	ptr = calloc(1, size);
167 #endif
168 
169 #if DEBUG
170 	if (ptr) {
171 		++num_allocations;
172 		bytes_allocated += size;
173 	}
174 #endif
175 
176 	return ptr;
177 }
178 
179 void *
kxld_alloc(size_t size)180 kxld_alloc(size_t size)
181 {
182 	void * ptr = NULL;
183 
184 #if KERNEL
185 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
186 	    VM_KERN_MEMORY_OSKEXT);
187 #else
188 	ptr = malloc(size);
189 #endif
190 
191 #if DEBUG
192 	if (ptr) {
193 		++num_allocations;
194 		bytes_allocated += size;
195 	}
196 #endif
197 
198 	return ptr;
199 }
200 
201 /*******************************************************************************
202 *******************************************************************************/
203 void *
kxld_page_alloc_untracked(size_t size)204 kxld_page_alloc_untracked(size_t size)
205 {
206 	void * ptr = NULL;
207 
208 	size = round_page(size);
209 
210 #if KERNEL
211 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
212 	    VM_KERN_MEMORY_OSKEXT);
213 #else /* !KERNEL */
214 	ptr = calloc(1, size);
215 #endif /* KERNEL */
216 
217 	return ptr;
218 }
219 
220 /*******************************************************************************
221 *******************************************************************************/
222 void *
kxld_page_alloc(size_t size)223 kxld_page_alloc(size_t size)
224 {
225 	void * ptr = NULL;
226 
227 	ptr = kxld_page_alloc_untracked(size);
228 #if DEBUG
229 	if (ptr) {
230 		++num_allocations;
231 		bytes_allocated += round_page(size);
232 	}
233 #endif /* DEBUG */
234 
235 	return ptr;
236 }
237 
238 /*******************************************************************************
239 *******************************************************************************/
240 void
kxld_free(void * ptr,size_t size __unused)241 kxld_free(void *ptr, size_t size __unused)
242 {
243 #if DEBUG
244 	++num_frees;
245 	bytes_freed += size;
246 #endif
247 
248 #if KERNEL
249 	kheap_free(KHEAP_DEFAULT, ptr, size);
250 #else
251 	free(ptr);
252 #endif
253 }
254 
255 /*******************************************************************************
256 *******************************************************************************/
257 void
kxld_page_free_untracked(void * ptr,size_t size __unused)258 kxld_page_free_untracked(void *ptr, size_t size __unused)
259 {
260 #if KERNEL
261 	kheap_free(KHEAP_DEFAULT, ptr, round_page(size));
262 #else /* !KERNEL */
263 	free(ptr);
264 #endif /* KERNEL */
265 }
266 
267 
268 /*******************************************************************************
269 *******************************************************************************/
270 void
kxld_page_free(void * ptr,size_t size)271 kxld_page_free(void *ptr, size_t size)
272 {
273 #if DEBUG
274 	++num_frees;
275 	bytes_freed += round_page(size);
276 #endif /* DEBUG */
277 	kxld_page_free_untracked(ptr, size);
278 }
279 
280 /*******************************************************************************
281 *******************************************************************************/
282 kern_return_t
validate_and_swap_macho_32(u_char * file,u_long size,enum NXByteOrder host_order)283 validate_and_swap_macho_32(u_char *file, u_long size
284 #if !KERNEL
285     , enum NXByteOrder host_order
286 #endif /* !KERNEL */
287     )
288 {
289 	kern_return_t rval = KERN_FAILURE;
290 	struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
291 	struct load_command *load_hdr = NULL;
292 	struct segment_command *seg_hdr = NULL;
293 	struct section *sects = NULL;
294 	struct relocation_info *relocs = NULL;
295 	struct symtab_command *symtab_hdr = NULL;
296 	struct nlist *symtab = NULL;
297 	u_long offset = 0;
298 	u_int cmd = 0;
299 	u_int cmdsize = 0;
300 	u_int i = 0;
301 	u_int j = 0;
302 #if !KERNEL
303 	boolean_t swap = FALSE;
304 #endif /* !KERNEL */
305 
306 	check(file);
307 	check(size);
308 
309 	/* Verify that the file is big enough for the mach header */
310 	require_action(size >= sizeof(*mach_hdr), finish,
311 	    rval = KERN_FAILURE;
312 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
313 	offset = sizeof(*mach_hdr);
314 
315 #if !KERNEL
316 	/* Swap the mach header if necessary */
317 	if (mach_hdr->magic == MH_CIGAM) {
318 		swap = TRUE;
319 		(void) swap_mach_header(mach_hdr, host_order);
320 	}
321 #endif /* !KERNEL */
322 
323 	/* Validate the mach_header's magic number */
324 	require_action(mach_hdr->magic == MH_MAGIC, finish,
325 	    rval = KERN_FAILURE;
326 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
327 	    "Invalid magic number: 0x%x.", mach_hdr->magic));
328 
329 	/* If in the running kernel, and asked to validate the kernel
330 	 * (which is the only file of type MH_EXECUTE we should ever see),
331 	 * then just assume it's ok or we wouldn't be running to begin with.
332 	 */
333 #if KERNEL
334 	if (mach_hdr->filetype == MH_EXECUTE) {
335 		rval = KERN_SUCCESS;
336 		goto finish;
337 	}
338 #endif /* KERNEL */
339 
340 	/* Validate and potentially swap the load commands */
341 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
342 		/* Get the load command and size */
343 		load_hdr = (struct load_command *) ((void *) (file + offset));
344 		cmd = load_hdr->cmd;
345 		cmdsize = load_hdr->cmdsize;
346 
347 #if !KERNEL
348 		if (swap) {
349 			cmd = OSSwapInt32(load_hdr->cmd);
350 			cmdsize = OSSwapInt32(load_hdr->cmdsize);
351 		}
352 #endif /* !KERNEL */
353 
354 		/* Verify that the file is big enough to contain the load command */
355 		require_action(size >= offset + cmdsize, finish,
356 		    rval = KERN_FAILURE;
357 		    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
358 
359 		switch (cmd) {
360 		case LC_SEGMENT:
361 			/* Get and swap the segment header */
362 			seg_hdr = (struct segment_command *) load_hdr;
363 #if !KERNEL
364 			if (swap) {
365 				swap_segment_command(seg_hdr, host_order);
366 			}
367 #endif /* !KERNEL */
368 
369 			/* Get and swap the section headers */
370 			sects = (struct section *) &seg_hdr[1];
371 #if !KERNEL
372 			if (swap) {
373 				swap_section(sects, seg_hdr->nsects, host_order);
374 			}
375 #endif /* !KERNEL */
376 
377 			/* Ignore segments with no vm size */
378 			if (!seg_hdr->vmsize) {
379 				continue;
380 			}
381 
382 			/* Verify that the file is big enough for the segment data. */
383 			require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
384 			    rval = KERN_FAILURE;
385 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
386 
387 			for (j = 0; j < seg_hdr->nsects; ++j) {
388 				/* Verify that, if the section is not to be zero filled on
389 				 * demand, that file is big enough for the section's data.
390 				 */
391 				require_action((sects[j].flags & S_ZEROFILL) ||
392 				    (size >= sects[j].offset + sects[j].size), finish,
393 				    rval = KERN_FAILURE;
394 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
395 
396 				/* Verify that the file is big enough for the section's
397 				 * relocation entries.
398 				 */
399 				require_action(size >=
400 				    sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
401 				    rval = KERN_FAILURE;
402 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
403 
404 				/* Swap the relocation entries */
405 				relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
406 #if !KERNEL
407 				if (swap) {
408 					swap_relocation_info(relocs, sects[j].nreloc,
409 					    host_order);
410 				}
411 #endif /* !KERNEL */
412 			}
413 
414 			break;
415 		case LC_SYMTAB:
416 			/* Get and swap the symtab header */
417 			symtab_hdr = (struct symtab_command *) load_hdr;
418 #if !KERNEL
419 			if (swap) {
420 				swap_symtab_command(symtab_hdr, host_order);
421 			}
422 #endif /* !KERNEL */
423 
424 			/* Verify that the file is big enough for the symbol table */
425 			require_action(size >=
426 			    symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
427 			    rval = KERN_FAILURE;
428 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
429 
430 			/* Verify that the file is big enough for the string table */
431 			require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
432 			    rval = KERN_FAILURE;
433 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
434 
435 #if !KERNEL
436 			/* Swap the symbol table entries */
437 			symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff));
438 			if (swap) {
439 				swap_nlist(symtab, symtab_hdr->nsyms, host_order);
440 			}
441 #endif /* !KERNEL */
442 
443 			break;
444 		default:
445 #if !KERNEL
446 			/* Swap the load command */
447 			if (swap) {
448 				swap_load_command(load_hdr, host_order);
449 			}
450 #endif /* !KERNEL */
451 			break;
452 		}
453 	}
454 
455 	rval = KERN_SUCCESS;
456 
457 finish:
458 	return rval;
459 }
460 
461 /*******************************************************************************
462 *******************************************************************************/
463 kern_return_t
validate_and_swap_macho_64(u_char * file,u_long size,enum NXByteOrder host_order)464 validate_and_swap_macho_64(u_char *file, u_long size
465 #if !KERNEL
466     , enum NXByteOrder host_order
467 #endif /* !KERNEL */
468     )
469 {
470 	kern_return_t rval = KERN_FAILURE;
471 	struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
472 	struct load_command *load_hdr = NULL;
473 	struct segment_command_64 *seg_hdr = NULL;
474 	struct section_64 *sects = NULL;
475 	struct relocation_info *relocs = NULL;
476 	struct symtab_command *symtab_hdr = NULL;
477 	struct nlist_64 *symtab = NULL;
478 	u_long offset = 0;
479 	u_int cmd = 0;
480 	u_int cmdsize = 0;
481 	u_int i = 0;
482 	u_int j = 0;
483 #if !KERNEL
484 	boolean_t swap = FALSE;
485 #endif /* !KERNEL */
486 
487 	check(file);
488 	check(size);
489 
490 	/* Verify that the file is big enough for the mach header */
491 	require_action(size >= sizeof(*mach_hdr), finish,
492 	    rval = KERN_FAILURE;
493 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
494 	offset = sizeof(*mach_hdr);
495 
496 #if !KERNEL
497 	/* Swap the mach header if necessary */
498 	if (mach_hdr->magic == MH_CIGAM_64) {
499 		swap = TRUE;
500 		(void) swap_mach_header_64(mach_hdr, host_order);
501 	}
502 #endif /* !KERNEL */
503 
504 	/* Validate the mach_header's magic number */
505 	require_action(mach_hdr->magic == MH_MAGIC_64, finish,
506 	    rval = KERN_FAILURE;
507 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
508 	    "Invalid magic number: 0x%x.", mach_hdr->magic));
509 
510 	/* If in the running kernel, and asked to validate the kernel
511 	 * (which is the only file of type MH_EXECUTE we should ever see),
512 	 * then just assume it's ok or we wouldn't be running to begin with.
513 	 */
514 #if KERNEL
515 	if (mach_hdr->filetype == MH_EXECUTE) {
516 		rval = KERN_SUCCESS;
517 		goto finish;
518 	}
519 #endif /* KERNEL */
520 
521 	/* Validate and potentially swap the load commands */
522 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
523 		/* Get the load command and size */
524 		load_hdr = (struct load_command *) ((void *) (file + offset));
525 		cmd = load_hdr->cmd;
526 		cmdsize = load_hdr->cmdsize;
527 
528 #if !KERNEL
529 		if (swap) {
530 			cmd = OSSwapInt32(load_hdr->cmd);
531 			cmdsize = OSSwapInt32(load_hdr->cmdsize);
532 		}
533 #endif /* !KERNEL */
534 
535 		/* Verify that the file is big enough to contain the load command */
536 		require_action(size >= offset + cmdsize, finish,
537 		    rval = KERN_FAILURE;
538 		    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
539 		switch (cmd) {
540 		case LC_SEGMENT_64:
541 			/* Get and swap the segment header */
542 			seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
543 #if !KERNEL
544 			if (swap) {
545 				swap_segment_command_64(seg_hdr, host_order);
546 			}
547 #endif /* !KERNEL */
548 
549 			/* Get and swap the section headers */
550 			sects = (struct section_64 *) &seg_hdr[1];
551 #if !KERNEL
552 			if (swap) {
553 				swap_section_64(sects, seg_hdr->nsects, host_order);
554 			}
555 #endif /* !KERNEL */
556 
557 			/* If the segment has no vm footprint, skip it */
558 			if (!seg_hdr->vmsize) {
559 				continue;
560 			}
561 
562 			/* Verify that the file is big enough for the segment data. */
563 			require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
564 			    rval = KERN_FAILURE;
565 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
566 
567 			for (j = 0; j < seg_hdr->nsects; ++j) {
568 				/* Verify that, if the section is not to be zero filled on
569 				 * demand, that file is big enough for the section's data.
570 				 */
571 				require_action((sects[j].flags & S_ZEROFILL) ||
572 				    (size >= sects[j].offset + sects[j].size), finish,
573 				    rval = KERN_FAILURE;
574 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
575 
576 				/* Verify that the file is big enough for the section's
577 				 * relocation entries.
578 				 */
579 				require_action(size >=
580 				    sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
581 				    rval = KERN_FAILURE;
582 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
583 
584 				/* Swap the relocation entries */
585 				relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
586 #if !KERNEL
587 				if (swap) {
588 					swap_relocation_info(relocs, sects[j].nreloc,
589 					    host_order);
590 				}
591 #endif /* !KERNEL */
592 			}
593 
594 			break;
595 		case LC_SYMTAB:
596 			/* Get and swap the symtab header */
597 			symtab_hdr = (struct symtab_command *) load_hdr;
598 #if !KERNEL
599 			if (swap) {
600 				swap_symtab_command(symtab_hdr, host_order);
601 			}
602 #endif /* !KERNEL */
603 
604 			/* Verify that the file is big enough for the symbol table */
605 			require_action(size >=
606 			    symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
607 			    rval = KERN_FAILURE;
608 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
609 
610 			/* Verify that the file is big enough for the string table */
611 			require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
612 			    rval = KERN_FAILURE;
613 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
614 
615 #if !KERNEL
616 			/* Swap the symbol table entries */
617 			symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
618 			if (swap) {
619 				swap_nlist_64(symtab, symtab_hdr->nsyms, host_order);
620 			}
621 #endif /* !KERNEL */
622 
623 			break;
624 		default:
625 #if !KERNEL
626 			/* Swap the load command */
627 			if (swap) {
628 				swap_load_command(load_hdr, host_order);
629 			}
630 #endif /* !KERNEL */
631 			break;
632 		}
633 	}
634 
635 	rval = KERN_SUCCESS;
636 
637 finish:
638 	return rval;
639 }
640 
641 #if !KERNEL
642 /*******************************************************************************
643 *******************************************************************************/
644 void
unswap_macho(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)645 unswap_macho(u_char *file, enum NXByteOrder host_order,
646     enum NXByteOrder target_order)
647 {
648 	struct mach_header *hdr = (struct mach_header *) ((void *) file);
649 
650 	if (!hdr) {
651 		return;
652 	}
653 
654 	if (hdr->magic == MH_MAGIC) {
655 		unswap_macho_32(file, host_order, target_order);
656 	} else if (hdr->magic == MH_MAGIC_64) {
657 		unswap_macho_64(file, host_order, target_order);
658 	}
659 }
660 
661 /*******************************************************************************
662 *******************************************************************************/
663 static void
unswap_macho_32(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)664 unswap_macho_32(u_char *file, enum NXByteOrder host_order,
665     enum NXByteOrder target_order)
666 {
667 	struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
668 	struct load_command *load_hdr = NULL;
669 	struct segment_command *seg_hdr = NULL;
670 	struct section *sects = NULL;
671 	struct symtab_command *symtab_hdr = NULL;
672 	struct nlist *symtab = NULL;
673 	u_long offset = 0;
674 	u_int cmd = 0;
675 	u_int size = 0;
676 	u_int i = 0;
677 
678 	check(file);
679 
680 	if (target_order == host_order) {
681 		return;
682 	}
683 
684 	offset = sizeof(*mach_hdr);
685 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
686 		load_hdr = (struct load_command *) ((void *) (file + offset));
687 		cmd = load_hdr->cmd;
688 		size = load_hdr->cmdsize;
689 
690 		switch (cmd) {
691 		case LC_SEGMENT:
692 			seg_hdr = (struct segment_command *) load_hdr;
693 			sects = (struct section *) &seg_hdr[1];
694 
695 			/* We don't need to unswap relocations because this function is
696 			 * called when linking is completed (so there are no relocations).
697 			 */
698 
699 			swap_section(sects, seg_hdr->nsects, target_order);
700 			swap_segment_command(seg_hdr, target_order);
701 			break;
702 		case LC_SYMTAB:
703 			symtab_hdr = (struct symtab_command *) load_hdr;
704 			symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff));
705 
706 			swap_nlist(symtab, symtab_hdr->nsyms, target_order);
707 			swap_symtab_command(symtab_hdr, target_order);
708 
709 			break;
710 		default:
711 			swap_load_command(load_hdr, target_order);
712 			break;
713 		}
714 	}
715 
716 	(void) swap_mach_header(mach_hdr, target_order);
717 }
718 
719 /*******************************************************************************
720 *******************************************************************************/
721 static void
unswap_macho_64(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)722 unswap_macho_64(u_char *file, enum NXByteOrder host_order,
723     enum NXByteOrder target_order)
724 {
725 	struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
726 	struct load_command *load_hdr = NULL;
727 	struct segment_command_64 *seg_hdr = NULL;
728 	struct section_64 *sects = NULL;
729 	struct symtab_command *symtab_hdr = NULL;
730 	struct nlist_64 *symtab = NULL;
731 	u_long offset = 0;
732 	u_int cmd = 0;
733 	u_int size = 0;
734 	u_int i = 0;
735 
736 	check(file);
737 
738 	if (target_order == host_order) {
739 		return;
740 	}
741 
742 	offset = sizeof(*mach_hdr);
743 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
744 		load_hdr = (struct load_command *) ((void *) (file + offset));
745 		cmd = load_hdr->cmd;
746 		size = load_hdr->cmdsize;
747 
748 		switch (cmd) {
749 		case LC_SEGMENT_64:
750 			seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
751 			sects = (struct section_64 *) &seg_hdr[1];
752 
753 			/* We don't need to unswap relocations because this function is
754 			 * called when linking is completed (so there are no relocations).
755 			 */
756 
757 			swap_section_64(sects, seg_hdr->nsects, target_order);
758 			swap_segment_command_64(seg_hdr, target_order);
759 			break;
760 		case LC_SYMTAB:
761 			symtab_hdr = (struct symtab_command *) load_hdr;
762 			symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
763 
764 			swap_nlist_64(symtab, symtab_hdr->nsyms, target_order);
765 			swap_symtab_command(symtab_hdr, target_order);
766 
767 			break;
768 		default:
769 			swap_load_command(load_hdr, target_order);
770 			break;
771 		}
772 	}
773 
774 	(void) swap_mach_header_64(mach_hdr, target_order);
775 }
776 #endif /* !KERNEL */
777 
778 /*******************************************************************************
779 *******************************************************************************/
780 kxld_addr_t
kxld_align_address(kxld_addr_t address,u_int align)781 kxld_align_address(kxld_addr_t address, u_int align)
782 {
783 	kxld_addr_t alignment = (1 << align);
784 	kxld_addr_t low_bits = 0;
785 
786 	if (!align) {
787 		return address;
788 	}
789 
790 	low_bits = (address) & (alignment - 1);
791 	if (low_bits) {
792 		address += (alignment - low_bits);
793 	}
794 
795 	return address;
796 }
797 
798 /*******************************************************************************
799 *******************************************************************************/
800 boolean_t
kxld_is_32_bit(cpu_type_t cputype)801 kxld_is_32_bit(cpu_type_t cputype)
802 {
803 	return !(cputype & CPU_ARCH_ABI64);
804 }
805 
806 /*******************************************************************************
807 *******************************************************************************/
808 void
kxld_print_memory_report(void)809 kxld_print_memory_report(void)
810 {
811 #if DEBUG
812 	kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n"
813 	    "\tNumber of allocations:   %8lu\n"
814 	    "\tNumber of frees:         %8lu\n"
815 	    "\tAverage allocation size: %8lu\n"
816 	    "\tTotal bytes allocated:   %8lu\n"
817 	    "\tTotal bytes freed:       %8lu\n"
818 	    "\tTotal bytes leaked:      %8lu",
819 	    num_allocations, num_frees, bytes_allocated / num_allocations,
820 	    bytes_allocated, bytes_freed, bytes_allocated - bytes_freed);
821 #endif
822 }
823 
824 /*********************************************************************
825 *********************************************************************/
826 #if !KERNEL
827 boolean_t
kxld_set_cross_link_page_size(kxld_size_t target_page_size)828 kxld_set_cross_link_page_size(kxld_size_t target_page_size)
829 {
830 	// verify radix 2
831 	if ((target_page_size != 0) &&
832 	    ((target_page_size & (target_page_size - 1)) == 0)) {
833 		s_cross_link_enabled = TRUE;
834 		s_cross_link_page_size = target_page_size;
835 
836 		return TRUE;
837 	} else {
838 		return FALSE;
839 	}
840 }
841 #endif /* !KERNEL */
842 
843 /*********************************************************************
844 *********************************************************************/
845 kxld_size_t
kxld_get_effective_page_size(void)846 kxld_get_effective_page_size(void)
847 {
848 #if KERNEL
849 	return PAGE_SIZE;
850 #else
851 	if (s_cross_link_enabled) {
852 		return s_cross_link_page_size;
853 	} else {
854 		return PAGE_SIZE;
855 	}
856 #endif /* KERNEL */
857 }
858 
859 /*********************************************************************
860 *********************************************************************/
861 kxld_addr_t
kxld_round_page_cross_safe(kxld_addr_t offset)862 kxld_round_page_cross_safe(kxld_addr_t offset)
863 {
864 #if KERNEL
865 	return round_page(offset);
866 #else
867 	// assume s_cross_link_page_size is power of 2
868 	if (s_cross_link_enabled) {
869 		return (offset + (s_cross_link_page_size - 1)) &
870 		       (~(s_cross_link_page_size - 1));
871 	} else {
872 		return round_page(offset);
873 	}
874 #endif /* KERNEL */
875 }
876 
877 #if SPLIT_KEXTS_DEBUG
878 
879 void
kxld_show_split_info(splitKextLinkInfo * info)880 kxld_show_split_info(splitKextLinkInfo *info)
881 {
882 	kxld_log(kKxldLogLinking, kKxldLogErr,
883 	    "splitKextLinkInfo: \n"
884 	    "kextExecutable %p to %p kextSize %lu \n"
885 	    "linkedKext %p to %p linkedKextSize %lu \n"
886 	    "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p "
887 	    "vmaddr_DATA %p vmaddr_DATA_CONST %p "
888 	    "vmaddr_LLVM_COV %p vmaddr_LINKEDIT %p",
889 	    (void *) info->kextExecutable,
890 	    (void *) (info->kextExecutable + info->kextSize),
891 	    info->kextSize,
892 	    (void*) info->linkedKext,
893 	    (void*) (info->linkedKext + info->linkedKextSize),
894 	    info->linkedKextSize,
895 	    (void *) info->vmaddr_TEXT,
896 	    (void *) info->vmaddr_TEXT_EXEC,
897 	    (void *) info->vmaddr_DATA,
898 	    (void *) info->vmaddr_DATA_CONST,
899 	    (void *) info->vmaddr_LLVM_COV,
900 	    (void *) info->vmaddr_LINKEDIT);
901 }
902 
903 boolean_t
isTargetKextName(const char * the_name)904 isTargetKextName(const char * the_name)
905 {
906 	if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) {
907 		return TRUE;
908 	}
909 	return FALSE;
910 }
911 #endif
912 
913 #pragma clang diagnostic pop
914