xref: /xnu-8019.80.24/libkern/kxld/kxld_util.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <stdarg.h>
29 #include <string.h>
30 #include <mach-o/loader.h>
31 #include <mach-o/nlist.h>
32 #include <mach-o/reloc.h>
33 #if KERNEL
34     #include <kern/kalloc.h>
35     #include <libkern/libkern.h>
36     #include <mach/vm_param.h>
37     #include <vm/vm_kern.h>
38 #else
39     #include <stdio.h>
40     #include <stdlib.h>
41     #include <mach/mach_init.h>
42     #include <mach-o/swap.h>
43 #endif
44 
45 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
46 #include <AssertMacros.h>
47 
48 #include "kxld_util.h"
49 
50 #if !KERNEL
51 static void unswap_macho_32(u_char *file, enum NXByteOrder host_order,
52     enum NXByteOrder target_order);
53 static void unswap_macho_64(u_char *file, enum NXByteOrder host_order,
54     enum NXByteOrder target_order);
55 #endif /* !KERNEL */
56 
57 #if DEBUG
58 static unsigned long num_allocations = 0;
59 static unsigned long num_frees = 0;
60 static unsigned long bytes_allocated = 0;
61 static unsigned long bytes_freed = 0;
62 #endif
63 
64 static KXLDLoggingCallback s_logging_callback = NULL;
65 static char s_callback_name[64] = "internal";
66 static void *s_callback_data = NULL;
67 
68 #if !KERNEL
69 static boolean_t s_cross_link_enabled  = FALSE;
70 /* Can't use PAGE_SIZE here because it is not a compile-time constant.
71  * However from inspection below, s_cross_link_page_size is not used
72  * unless s_cross_link_enabled is TRUE, and s_cross_link_enabled is
73  * only set to TRUE when a client specifies the value. So the
74  * default should never be used in practice,
75  */
76 static kxld_size_t s_cross_link_page_size;
77 #endif
78 
79 
80 /*******************************************************************************
81 *******************************************************************************/
82 void
kxld_set_logging_callback(KXLDLoggingCallback logging_callback)83 kxld_set_logging_callback(KXLDLoggingCallback logging_callback)
84 {
85 	s_logging_callback = logging_callback;
86 }
87 
88 /*******************************************************************************
89 *******************************************************************************/
90 void
kxld_set_logging_callback_data(const char * name,void * user_data)91 kxld_set_logging_callback_data(const char *name, void *user_data)
92 {
93 	if (name) {
94 		(void)strlcpy(s_callback_name, name, sizeof(s_callback_name));
95 		/* disallow format strings in the kxld logging callback name */
96 		for (size_t i = 0; i < sizeof(s_callback_name); i++) {
97 			if (s_callback_name[i] == '%') {
98 				s_callback_name[i] = '.';
99 			}
100 		}
101 	} else {
102 		(void)strlcpy(s_callback_name, "internal", sizeof(s_callback_name));
103 	}
104 
105 	s_callback_data = user_data;
106 }
107 
108 /*******************************************************************************
109 *******************************************************************************/
110 void
kxld_log(KXLDLogSubsystem subsystem,KXLDLogLevel level,const char * in_format,...)111 kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level,
112     const char *in_format, ...)
113 {
114 	char stack_buffer[256];
115 	char *alloc_buffer = NULL;
116 	char *format = stack_buffer;
117 	u_int length = 0;
118 	va_list ap;
119 
120 	if (s_logging_callback) {
121 		length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s",
122 		    s_callback_name, in_format);
123 
124 		if (length >= sizeof(stack_buffer)) {
125 			length += 1;
126 			alloc_buffer = kxld_alloc(length);
127 			if (!alloc_buffer) {
128 				return;
129 			}
130 
131 			snprintf(alloc_buffer, length, "kxld[%s]: %s",
132 			    s_callback_name, in_format);
133 			format = alloc_buffer;
134 		}
135 
136 		va_start(ap, in_format);
137 		s_logging_callback(subsystem, level, format, ap, s_callback_data);
138 		va_end(ap);
139 
140 		if (alloc_buffer) {
141 			kxld_free(alloc_buffer, length);
142 		}
143 	}
144 }
145 
146 /* We'll use kalloc for any page-based allocations under this threshold, and
147  * kmem_alloc otherwise.
148  */
149 #define KALLOC_MAX 16 * 1024
150 
151 /*******************************************************************************
152 *******************************************************************************/
153 void *
kxld_calloc(size_t size)154 kxld_calloc(size_t size)
155 {
156 	void * ptr = NULL;
157 
158 #if KERNEL
159 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
160 	    VM_KERN_MEMORY_OSKEXT);
161 #else
162 	ptr = calloc(1, size);
163 #endif
164 
165 #if DEBUG
166 	if (ptr) {
167 		++num_allocations;
168 		bytes_allocated += size;
169 	}
170 #endif
171 
172 	return ptr;
173 }
174 
175 void *
kxld_alloc(size_t size)176 kxld_alloc(size_t size)
177 {
178 	void * ptr = NULL;
179 
180 #if KERNEL
181 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
182 	    VM_KERN_MEMORY_OSKEXT);
183 #else
184 	ptr = malloc(size);
185 #endif
186 
187 #if DEBUG
188 	if (ptr) {
189 		++num_allocations;
190 		bytes_allocated += size;
191 	}
192 #endif
193 
194 	return ptr;
195 }
196 
197 /*******************************************************************************
198 *******************************************************************************/
199 void *
kxld_page_alloc_untracked(size_t size)200 kxld_page_alloc_untracked(size_t size)
201 {
202 	void * ptr = NULL;
203 
204 	size = round_page(size);
205 
206 #if KERNEL
207 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
208 	    VM_KERN_MEMORY_OSKEXT);
209 #else /* !KERNEL */
210 	ptr = calloc(1, size);
211 #endif /* KERNEL */
212 
213 	return ptr;
214 }
215 
216 /*******************************************************************************
217 *******************************************************************************/
218 void *
kxld_page_alloc(size_t size)219 kxld_page_alloc(size_t size)
220 {
221 	void * ptr = NULL;
222 
223 	ptr = kxld_page_alloc_untracked(size);
224 #if DEBUG
225 	if (ptr) {
226 		++num_allocations;
227 		bytes_allocated += round_page(size);
228 	}
229 #endif /* DEBUG */
230 
231 	return ptr;
232 }
233 
234 /*******************************************************************************
235 *******************************************************************************/
236 void *
kxld_alloc_pageable(size_t size)237 kxld_alloc_pageable(size_t size)
238 {
239 	size = round_page(size);
240 
241 #if KERNEL
242 	kern_return_t rval = 0;
243 	vm_offset_t ptr = 0;
244 
245 	rval = kmem_alloc_pageable(kernel_map, &ptr, size, VM_KERN_MEMORY_OSKEXT);
246 	if (rval) {
247 		ptr = 0;
248 	}
249 
250 	return (void *) ptr;
251 #else
252 	return kxld_page_alloc_untracked(size);
253 #endif
254 }
255 
256 /*******************************************************************************
257 *******************************************************************************/
258 void
kxld_free(void * ptr,size_t size __unused)259 kxld_free(void *ptr, size_t size __unused)
260 {
261 #if DEBUG
262 	++num_frees;
263 	bytes_freed += size;
264 #endif
265 
266 #if KERNEL
267 	kfree(ptr, size);
268 #else
269 	free(ptr);
270 #endif
271 }
272 
273 /*******************************************************************************
274 *******************************************************************************/
275 void
kxld_page_free_untracked(void * ptr,size_t size __unused)276 kxld_page_free_untracked(void *ptr, size_t size __unused)
277 {
278 #if KERNEL
279 	kfree(ptr, round_page(size));
280 #else /* !KERNEL */
281 	free(ptr);
282 #endif /* KERNEL */
283 }
284 
285 
286 /*******************************************************************************
287 *******************************************************************************/
288 void
kxld_page_free(void * ptr,size_t size)289 kxld_page_free(void *ptr, size_t size)
290 {
291 #if DEBUG
292 	++num_frees;
293 	bytes_freed += round_page(size);
294 #endif /* DEBUG */
295 	kxld_page_free_untracked(ptr, size);
296 }
297 
298 /*******************************************************************************
299 *******************************************************************************/
300 kern_return_t
validate_and_swap_macho_32(u_char * file,u_long size,enum NXByteOrder host_order)301 validate_and_swap_macho_32(u_char *file, u_long size
302 #if !KERNEL
303     , enum NXByteOrder host_order
304 #endif /* !KERNEL */
305     )
306 {
307 	kern_return_t rval = KERN_FAILURE;
308 	struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
309 	struct load_command *load_hdr = NULL;
310 	struct segment_command *seg_hdr = NULL;
311 	struct section *sects = NULL;
312 	struct relocation_info *relocs = NULL;
313 	struct symtab_command *symtab_hdr = NULL;
314 	struct nlist *symtab = NULL;
315 	u_long offset = 0;
316 	u_int cmd = 0;
317 	u_int cmdsize = 0;
318 	u_int i = 0;
319 	u_int j = 0;
320 #if !KERNEL
321 	boolean_t swap = FALSE;
322 #endif /* !KERNEL */
323 
324 	check(file);
325 	check(size);
326 
327 	/* Verify that the file is big enough for the mach header */
328 	require_action(size >= sizeof(*mach_hdr), finish,
329 	    rval = KERN_FAILURE;
330 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
331 	offset = sizeof(*mach_hdr);
332 
333 #if !KERNEL
334 	/* Swap the mach header if necessary */
335 	if (mach_hdr->magic == MH_CIGAM) {
336 		swap = TRUE;
337 		(void) swap_mach_header(mach_hdr, host_order);
338 	}
339 #endif /* !KERNEL */
340 
341 	/* Validate the mach_header's magic number */
342 	require_action(mach_hdr->magic == MH_MAGIC, finish,
343 	    rval = KERN_FAILURE;
344 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
345 	    "Invalid magic number: 0x%x.", mach_hdr->magic));
346 
347 	/* If in the running kernel, and asked to validate the kernel
348 	 * (which is the only file of type MH_EXECUTE we should ever see),
349 	 * then just assume it's ok or we wouldn't be running to begin with.
350 	 */
351 #if KERNEL
352 	if (mach_hdr->filetype == MH_EXECUTE) {
353 		rval = KERN_SUCCESS;
354 		goto finish;
355 	}
356 #endif /* KERNEL */
357 
358 	/* Validate and potentially swap the load commands */
359 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
360 		/* Get the load command and size */
361 		load_hdr = (struct load_command *) ((void *) (file + offset));
362 		cmd = load_hdr->cmd;
363 		cmdsize = load_hdr->cmdsize;
364 
365 #if !KERNEL
366 		if (swap) {
367 			cmd = OSSwapInt32(load_hdr->cmd);
368 			cmdsize = OSSwapInt32(load_hdr->cmdsize);
369 		}
370 #endif /* !KERNEL */
371 
372 		/* Verify that the file is big enough to contain the load command */
373 		require_action(size >= offset + cmdsize, finish,
374 		    rval = KERN_FAILURE;
375 		    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
376 
377 		switch (cmd) {
378 		case LC_SEGMENT:
379 			/* Get and swap the segment header */
380 			seg_hdr = (struct segment_command *) load_hdr;
381 #if !KERNEL
382 			if (swap) {
383 				swap_segment_command(seg_hdr, host_order);
384 			}
385 #endif /* !KERNEL */
386 
387 			/* Get and swap the section headers */
388 			sects = (struct section *) &seg_hdr[1];
389 #if !KERNEL
390 			if (swap) {
391 				swap_section(sects, seg_hdr->nsects, host_order);
392 			}
393 #endif /* !KERNEL */
394 
395 			/* Ignore segments with no vm size */
396 			if (!seg_hdr->vmsize) {
397 				continue;
398 			}
399 
400 			/* Verify that the file is big enough for the segment data. */
401 			require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
402 			    rval = KERN_FAILURE;
403 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
404 
405 			for (j = 0; j < seg_hdr->nsects; ++j) {
406 				/* Verify that, if the section is not to be zero filled on
407 				 * demand, that file is big enough for the section's data.
408 				 */
409 				require_action((sects[j].flags & S_ZEROFILL) ||
410 				    (size >= sects[j].offset + sects[j].size), finish,
411 				    rval = KERN_FAILURE;
412 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
413 
414 				/* Verify that the file is big enough for the section's
415 				 * relocation entries.
416 				 */
417 				require_action(size >=
418 				    sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
419 				    rval = KERN_FAILURE;
420 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
421 
422 				/* Swap the relocation entries */
423 				relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
424 #if !KERNEL
425 				if (swap) {
426 					swap_relocation_info(relocs, sects[j].nreloc,
427 					    host_order);
428 				}
429 #endif /* !KERNEL */
430 			}
431 
432 			break;
433 		case LC_SYMTAB:
434 			/* Get and swap the symtab header */
435 			symtab_hdr = (struct symtab_command *) load_hdr;
436 #if !KERNEL
437 			if (swap) {
438 				swap_symtab_command(symtab_hdr, host_order);
439 			}
440 #endif /* !KERNEL */
441 
442 			/* Verify that the file is big enough for the symbol table */
443 			require_action(size >=
444 			    symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
445 			    rval = KERN_FAILURE;
446 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
447 
448 			/* Verify that the file is big enough for the string table */
449 			require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
450 			    rval = KERN_FAILURE;
451 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
452 
453 #if !KERNEL
454 			/* Swap the symbol table entries */
455 			symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff));
456 			if (swap) {
457 				swap_nlist(symtab, symtab_hdr->nsyms, host_order);
458 			}
459 #endif /* !KERNEL */
460 
461 			break;
462 		default:
463 #if !KERNEL
464 			/* Swap the load command */
465 			if (swap) {
466 				swap_load_command(load_hdr, host_order);
467 			}
468 #endif /* !KERNEL */
469 			break;
470 		}
471 	}
472 
473 	rval = KERN_SUCCESS;
474 
475 finish:
476 	return rval;
477 }
478 
479 /*******************************************************************************
480 *******************************************************************************/
481 kern_return_t
validate_and_swap_macho_64(u_char * file,u_long size,enum NXByteOrder host_order)482 validate_and_swap_macho_64(u_char *file, u_long size
483 #if !KERNEL
484     , enum NXByteOrder host_order
485 #endif /* !KERNEL */
486     )
487 {
488 	kern_return_t rval = KERN_FAILURE;
489 	struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
490 	struct load_command *load_hdr = NULL;
491 	struct segment_command_64 *seg_hdr = NULL;
492 	struct section_64 *sects = NULL;
493 	struct relocation_info *relocs = NULL;
494 	struct symtab_command *symtab_hdr = NULL;
495 	struct nlist_64 *symtab = NULL;
496 	u_long offset = 0;
497 	u_int cmd = 0;
498 	u_int cmdsize = 0;
499 	u_int i = 0;
500 	u_int j = 0;
501 #if !KERNEL
502 	boolean_t swap = FALSE;
503 #endif /* !KERNEL */
504 
505 	check(file);
506 	check(size);
507 
508 	/* Verify that the file is big enough for the mach header */
509 	require_action(size >= sizeof(*mach_hdr), finish,
510 	    rval = KERN_FAILURE;
511 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
512 	offset = sizeof(*mach_hdr);
513 
514 #if !KERNEL
515 	/* Swap the mach header if necessary */
516 	if (mach_hdr->magic == MH_CIGAM_64) {
517 		swap = TRUE;
518 		(void) swap_mach_header_64(mach_hdr, host_order);
519 	}
520 #endif /* !KERNEL */
521 
522 	/* Validate the mach_header's magic number */
523 	require_action(mach_hdr->magic == MH_MAGIC_64, finish,
524 	    rval = KERN_FAILURE;
525 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
526 	    "Invalid magic number: 0x%x.", mach_hdr->magic));
527 
528 	/* If in the running kernel, and asked to validate the kernel
529 	 * (which is the only file of type MH_EXECUTE we should ever see),
530 	 * then just assume it's ok or we wouldn't be running to begin with.
531 	 */
532 #if KERNEL
533 	if (mach_hdr->filetype == MH_EXECUTE) {
534 		rval = KERN_SUCCESS;
535 		goto finish;
536 	}
537 #endif /* KERNEL */
538 
539 	/* Validate and potentially swap the load commands */
540 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
541 		/* Get the load command and size */
542 		load_hdr = (struct load_command *) ((void *) (file + offset));
543 		cmd = load_hdr->cmd;
544 		cmdsize = load_hdr->cmdsize;
545 
546 #if !KERNEL
547 		if (swap) {
548 			cmd = OSSwapInt32(load_hdr->cmd);
549 			cmdsize = OSSwapInt32(load_hdr->cmdsize);
550 		}
551 #endif /* !KERNEL */
552 
553 		/* Verify that the file is big enough to contain the load command */
554 		require_action(size >= offset + cmdsize, finish,
555 		    rval = KERN_FAILURE;
556 		    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
557 		switch (cmd) {
558 		case LC_SEGMENT_64:
559 			/* Get and swap the segment header */
560 			seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
561 #if !KERNEL
562 			if (swap) {
563 				swap_segment_command_64(seg_hdr, host_order);
564 			}
565 #endif /* !KERNEL */
566 
567 			/* Get and swap the section headers */
568 			sects = (struct section_64 *) &seg_hdr[1];
569 #if !KERNEL
570 			if (swap) {
571 				swap_section_64(sects, seg_hdr->nsects, host_order);
572 			}
573 #endif /* !KERNEL */
574 
575 			/* If the segment has no vm footprint, skip it */
576 			if (!seg_hdr->vmsize) {
577 				continue;
578 			}
579 
580 			/* Verify that the file is big enough for the segment data. */
581 			require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
582 			    rval = KERN_FAILURE;
583 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
584 
585 			for (j = 0; j < seg_hdr->nsects; ++j) {
586 				/* Verify that, if the section is not to be zero filled on
587 				 * demand, that file is big enough for the section's data.
588 				 */
589 				require_action((sects[j].flags & S_ZEROFILL) ||
590 				    (size >= sects[j].offset + sects[j].size), finish,
591 				    rval = KERN_FAILURE;
592 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
593 
594 				/* Verify that the file is big enough for the section's
595 				 * relocation entries.
596 				 */
597 				require_action(size >=
598 				    sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
599 				    rval = KERN_FAILURE;
600 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
601 
602 				/* Swap the relocation entries */
603 				relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
604 #if !KERNEL
605 				if (swap) {
606 					swap_relocation_info(relocs, sects[j].nreloc,
607 					    host_order);
608 				}
609 #endif /* !KERNEL */
610 			}
611 
612 			break;
613 		case LC_SYMTAB:
614 			/* Get and swap the symtab header */
615 			symtab_hdr = (struct symtab_command *) load_hdr;
616 #if !KERNEL
617 			if (swap) {
618 				swap_symtab_command(symtab_hdr, host_order);
619 			}
620 #endif /* !KERNEL */
621 
622 			/* Verify that the file is big enough for the symbol table */
623 			require_action(size >=
624 			    symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
625 			    rval = KERN_FAILURE;
626 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
627 
628 			/* Verify that the file is big enough for the string table */
629 			require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
630 			    rval = KERN_FAILURE;
631 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
632 
633 #if !KERNEL
634 			/* Swap the symbol table entries */
635 			symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
636 			if (swap) {
637 				swap_nlist_64(symtab, symtab_hdr->nsyms, host_order);
638 			}
639 #endif /* !KERNEL */
640 
641 			break;
642 		default:
643 #if !KERNEL
644 			/* Swap the load command */
645 			if (swap) {
646 				swap_load_command(load_hdr, host_order);
647 			}
648 #endif /* !KERNEL */
649 			break;
650 		}
651 	}
652 
653 	rval = KERN_SUCCESS;
654 
655 finish:
656 	return rval;
657 }
658 
659 #if !KERNEL
660 /*******************************************************************************
661 *******************************************************************************/
662 void
unswap_macho(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)663 unswap_macho(u_char *file, enum NXByteOrder host_order,
664     enum NXByteOrder target_order)
665 {
666 	struct mach_header *hdr = (struct mach_header *) ((void *) file);
667 
668 	if (!hdr) {
669 		return;
670 	}
671 
672 	if (hdr->magic == MH_MAGIC) {
673 		unswap_macho_32(file, host_order, target_order);
674 	} else if (hdr->magic == MH_MAGIC_64) {
675 		unswap_macho_64(file, host_order, target_order);
676 	}
677 }
678 
679 /*******************************************************************************
680 *******************************************************************************/
681 static void
unswap_macho_32(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)682 unswap_macho_32(u_char *file, enum NXByteOrder host_order,
683     enum NXByteOrder target_order)
684 {
685 	struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
686 	struct load_command *load_hdr = NULL;
687 	struct segment_command *seg_hdr = NULL;
688 	struct section *sects = NULL;
689 	struct symtab_command *symtab_hdr = NULL;
690 	struct nlist *symtab = NULL;
691 	u_long offset = 0;
692 	u_int cmd = 0;
693 	u_int size = 0;
694 	u_int i = 0;
695 
696 	check(file);
697 
698 	if (target_order == host_order) {
699 		return;
700 	}
701 
702 	offset = sizeof(*mach_hdr);
703 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
704 		load_hdr = (struct load_command *) ((void *) (file + offset));
705 		cmd = load_hdr->cmd;
706 		size = load_hdr->cmdsize;
707 
708 		switch (cmd) {
709 		case LC_SEGMENT:
710 			seg_hdr = (struct segment_command *) load_hdr;
711 			sects = (struct section *) &seg_hdr[1];
712 
713 			/* We don't need to unswap relocations because this function is
714 			 * called when linking is completed (so there are no relocations).
715 			 */
716 
717 			swap_section(sects, seg_hdr->nsects, target_order);
718 			swap_segment_command(seg_hdr, target_order);
719 			break;
720 		case LC_SYMTAB:
721 			symtab_hdr = (struct symtab_command *) load_hdr;
722 			symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff));
723 
724 			swap_nlist(symtab, symtab_hdr->nsyms, target_order);
725 			swap_symtab_command(symtab_hdr, target_order);
726 
727 			break;
728 		default:
729 			swap_load_command(load_hdr, target_order);
730 			break;
731 		}
732 	}
733 
734 	(void) swap_mach_header(mach_hdr, target_order);
735 }
736 
737 /*******************************************************************************
738 *******************************************************************************/
739 static void
unswap_macho_64(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)740 unswap_macho_64(u_char *file, enum NXByteOrder host_order,
741     enum NXByteOrder target_order)
742 {
743 	struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
744 	struct load_command *load_hdr = NULL;
745 	struct segment_command_64 *seg_hdr = NULL;
746 	struct section_64 *sects = NULL;
747 	struct symtab_command *symtab_hdr = NULL;
748 	struct nlist_64 *symtab = NULL;
749 	u_long offset = 0;
750 	u_int cmd = 0;
751 	u_int size = 0;
752 	u_int i = 0;
753 
754 	check(file);
755 
756 	if (target_order == host_order) {
757 		return;
758 	}
759 
760 	offset = sizeof(*mach_hdr);
761 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
762 		load_hdr = (struct load_command *) ((void *) (file + offset));
763 		cmd = load_hdr->cmd;
764 		size = load_hdr->cmdsize;
765 
766 		switch (cmd) {
767 		case LC_SEGMENT_64:
768 			seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
769 			sects = (struct section_64 *) &seg_hdr[1];
770 
771 			/* We don't need to unswap relocations because this function is
772 			 * called when linking is completed (so there are no relocations).
773 			 */
774 
775 			swap_section_64(sects, seg_hdr->nsects, target_order);
776 			swap_segment_command_64(seg_hdr, target_order);
777 			break;
778 		case LC_SYMTAB:
779 			symtab_hdr = (struct symtab_command *) load_hdr;
780 			symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
781 
782 			swap_nlist_64(symtab, symtab_hdr->nsyms, target_order);
783 			swap_symtab_command(symtab_hdr, target_order);
784 
785 			break;
786 		default:
787 			swap_load_command(load_hdr, target_order);
788 			break;
789 		}
790 	}
791 
792 	(void) swap_mach_header_64(mach_hdr, target_order);
793 }
794 #endif /* !KERNEL */
795 
796 /*******************************************************************************
797 *******************************************************************************/
798 kxld_addr_t
kxld_align_address(kxld_addr_t address,u_int align)799 kxld_align_address(kxld_addr_t address, u_int align)
800 {
801 	kxld_addr_t alignment = (1 << align);
802 	kxld_addr_t low_bits = 0;
803 
804 	if (!align) {
805 		return address;
806 	}
807 
808 	low_bits = (address) & (alignment - 1);
809 	if (low_bits) {
810 		address += (alignment - low_bits);
811 	}
812 
813 	return address;
814 }
815 
816 /*******************************************************************************
817 *******************************************************************************/
818 boolean_t
kxld_is_32_bit(cpu_type_t cputype)819 kxld_is_32_bit(cpu_type_t cputype)
820 {
821 	return !(cputype & CPU_ARCH_ABI64);
822 }
823 
824 /*******************************************************************************
825 *******************************************************************************/
826 void
kxld_print_memory_report(void)827 kxld_print_memory_report(void)
828 {
829 #if DEBUG
830 	kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n"
831 	    "\tNumber of allocations:   %8lu\n"
832 	    "\tNumber of frees:         %8lu\n"
833 	    "\tAverage allocation size: %8lu\n"
834 	    "\tTotal bytes allocated:   %8lu\n"
835 	    "\tTotal bytes freed:       %8lu\n"
836 	    "\tTotal bytes leaked:      %8lu",
837 	    num_allocations, num_frees, bytes_allocated / num_allocations,
838 	    bytes_allocated, bytes_freed, bytes_allocated - bytes_freed);
839 #endif
840 }
841 
842 /*********************************************************************
843 *********************************************************************/
844 #if !KERNEL
845 boolean_t
kxld_set_cross_link_page_size(kxld_size_t target_page_size)846 kxld_set_cross_link_page_size(kxld_size_t target_page_size)
847 {
848 	// verify radix 2
849 	if ((target_page_size != 0) &&
850 	    ((target_page_size & (target_page_size - 1)) == 0)) {
851 		s_cross_link_enabled = TRUE;
852 		s_cross_link_page_size = target_page_size;
853 
854 		return TRUE;
855 	} else {
856 		return FALSE;
857 	}
858 }
859 #endif /* !KERNEL */
860 
861 /*********************************************************************
862 *********************************************************************/
863 kxld_size_t
kxld_get_effective_page_size(void)864 kxld_get_effective_page_size(void)
865 {
866 #if KERNEL
867 	return PAGE_SIZE;
868 #else
869 	if (s_cross_link_enabled) {
870 		return s_cross_link_page_size;
871 	} else {
872 		return PAGE_SIZE;
873 	}
874 #endif /* KERNEL */
875 }
876 
877 /*********************************************************************
878 *********************************************************************/
879 kxld_addr_t
kxld_round_page_cross_safe(kxld_addr_t offset)880 kxld_round_page_cross_safe(kxld_addr_t offset)
881 {
882 #if KERNEL
883 	return round_page(offset);
884 #else
885 	// assume s_cross_link_page_size is power of 2
886 	if (s_cross_link_enabled) {
887 		return (offset + (s_cross_link_page_size - 1)) &
888 		       (~(s_cross_link_page_size - 1));
889 	} else {
890 		return round_page(offset);
891 	}
892 #endif /* KERNEL */
893 }
894 
895 #if SPLIT_KEXTS_DEBUG
896 
897 void
kxld_show_split_info(splitKextLinkInfo * info)898 kxld_show_split_info(splitKextLinkInfo *info)
899 {
900 	kxld_log(kKxldLogLinking, kKxldLogErr,
901 	    "splitKextLinkInfo: \n"
902 	    "kextExecutable %p to %p kextSize %lu \n"
903 	    "linkedKext %p to %p linkedKextSize %lu \n"
904 	    "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p "
905 	    "vmaddr_DATA %p vmaddr_DATA_CONST %p "
906 	    "vmaddr_LLVM_COV %p vmaddr_LINKEDIT %p",
907 	    (void *) info->kextExecutable,
908 	    (void *) (info->kextExecutable + info->kextSize),
909 	    info->kextSize,
910 	    (void*) info->linkedKext,
911 	    (void*) (info->linkedKext + info->linkedKextSize),
912 	    info->linkedKextSize,
913 	    (void *) info->vmaddr_TEXT,
914 	    (void *) info->vmaddr_TEXT_EXEC,
915 	    (void *) info->vmaddr_DATA,
916 	    (void *) info->vmaddr_DATA_CONST,
917 	    (void *) info->vmaddr_LLVM_COV,
918 	    (void *) info->vmaddr_LINKEDIT);
919 }
920 
921 boolean_t
isTargetKextName(const char * the_name)922 isTargetKextName(const char * the_name)
923 {
924 	if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) {
925 		return TRUE;
926 	}
927 	return FALSE;
928 }
929 #endif
930