xref: /xnu-8020.140.41/libkern/kxld/kxld_util.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <stdarg.h>
29 #include <string.h>
30 #include <mach-o/loader.h>
31 #include <mach-o/nlist.h>
32 #include <mach-o/reloc.h>
33 #if KERNEL
34     #include <kern/kalloc.h>
35     #include <libkern/libkern.h>
36     #include <mach/vm_param.h>
37     #include <vm/vm_kern.h>
38 #else
39     #include <stdio.h>
40     #include <stdlib.h>
41     #include <mach/mach_init.h>
42     #include <mach-o/swap.h>
43 #endif
44 
45 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
46 #include <AssertMacros.h>
47 
48 #include "kxld_util.h"
49 
50 #if !KERNEL
51 static void unswap_macho_32(u_char *file, enum NXByteOrder host_order,
52     enum NXByteOrder target_order);
53 static void unswap_macho_64(u_char *file, enum NXByteOrder host_order,
54     enum NXByteOrder target_order);
55 #endif /* !KERNEL */
56 
57 #if DEBUG
58 static unsigned long num_allocations = 0;
59 static unsigned long num_frees = 0;
60 static unsigned long bytes_allocated = 0;
61 static unsigned long bytes_freed = 0;
62 #endif
63 
64 static KXLDLoggingCallback s_logging_callback = NULL;
65 static char s_callback_name[64] = "internal";
66 static void *s_callback_data = NULL;
67 
68 #if !KERNEL
69 static boolean_t s_cross_link_enabled  = FALSE;
70 /* Can't use PAGE_SIZE here because it is not a compile-time constant.
71  * However from inspection below, s_cross_link_page_size is not used
72  * unless s_cross_link_enabled is TRUE, and s_cross_link_enabled is
73  * only set to TRUE when a client specifies the value. So the
74  * default should never be used in practice,
75  */
76 static kxld_size_t s_cross_link_page_size;
77 #endif
78 
79 
80 /*******************************************************************************
81 *******************************************************************************/
82 void
kxld_set_logging_callback(KXLDLoggingCallback logging_callback)83 kxld_set_logging_callback(KXLDLoggingCallback logging_callback)
84 {
85 	s_logging_callback = logging_callback;
86 }
87 
88 /*******************************************************************************
89 *******************************************************************************/
90 void
kxld_set_logging_callback_data(const char * name,void * user_data)91 kxld_set_logging_callback_data(const char *name, void *user_data)
92 {
93 	if (name) {
94 		(void)strlcpy(s_callback_name, name, sizeof(s_callback_name));
95 		/* disallow format strings in the kxld logging callback name */
96 		for (size_t i = 0; i < sizeof(s_callback_name); i++) {
97 			if (s_callback_name[i] == '%') {
98 				s_callback_name[i] = '.';
99 			}
100 		}
101 	} else {
102 		(void)strlcpy(s_callback_name, "internal", sizeof(s_callback_name));
103 	}
104 
105 	s_callback_data = user_data;
106 }
107 
108 /*******************************************************************************
109 *******************************************************************************/
110 void
kxld_log(KXLDLogSubsystem subsystem,KXLDLogLevel level,const char * in_format,...)111 kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level,
112     const char *in_format, ...)
113 {
114 	char stack_buffer[256];
115 	char *alloc_buffer = NULL;
116 	char *format = stack_buffer;
117 	u_int length = 0;
118 	va_list ap;
119 
120 	if (s_logging_callback) {
121 		length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s",
122 		    s_callback_name, in_format);
123 
124 		if (length >= sizeof(stack_buffer)) {
125 			length += 1;
126 			alloc_buffer = kxld_alloc(length);
127 			if (!alloc_buffer) {
128 				return;
129 			}
130 
131 			snprintf(alloc_buffer, length, "kxld[%s]: %s",
132 			    s_callback_name, in_format);
133 			format = alloc_buffer;
134 		}
135 
136 		va_start(ap, in_format);
137 		s_logging_callback(subsystem, level, format, ap, s_callback_data);
138 		va_end(ap);
139 
140 		if (alloc_buffer) {
141 			kxld_free(alloc_buffer, length);
142 		}
143 	}
144 }
145 
146 /* We'll use kalloc for any page-based allocations under this threshold, and
147  * kmem_alloc otherwise.
148  */
149 #define KALLOC_MAX 16 * 1024
150 
151 /*******************************************************************************
152 *******************************************************************************/
153 void *
kxld_calloc(size_t size)154 kxld_calloc(size_t size)
155 {
156 	void * ptr = NULL;
157 
158 #if KERNEL
159 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
160 	    VM_KERN_MEMORY_OSKEXT);
161 #else
162 	ptr = calloc(1, size);
163 #endif
164 
165 #if DEBUG
166 	if (ptr) {
167 		++num_allocations;
168 		bytes_allocated += size;
169 	}
170 #endif
171 
172 	return ptr;
173 }
174 
175 void *
kxld_alloc(size_t size)176 kxld_alloc(size_t size)
177 {
178 	void * ptr = NULL;
179 
180 #if KERNEL
181 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
182 	    VM_KERN_MEMORY_OSKEXT);
183 #else
184 	ptr = malloc(size);
185 #endif
186 
187 #if DEBUG
188 	if (ptr) {
189 		++num_allocations;
190 		bytes_allocated += size;
191 	}
192 #endif
193 
194 	return ptr;
195 }
196 
197 /*******************************************************************************
198 *******************************************************************************/
199 void *
kxld_page_alloc_untracked(size_t size)200 kxld_page_alloc_untracked(size_t size)
201 {
202 	void * ptr = NULL;
203 
204 	size = round_page(size);
205 
206 #if KERNEL
207 	ptr = kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK | Z_ZERO,
208 	    VM_KERN_MEMORY_OSKEXT);
209 #else /* !KERNEL */
210 	ptr = calloc(1, size);
211 #endif /* KERNEL */
212 
213 	return ptr;
214 }
215 
216 /*******************************************************************************
217 *******************************************************************************/
218 void *
kxld_page_alloc(size_t size)219 kxld_page_alloc(size_t size)
220 {
221 	void * ptr = NULL;
222 
223 	ptr = kxld_page_alloc_untracked(size);
224 #if DEBUG
225 	if (ptr) {
226 		++num_allocations;
227 		bytes_allocated += round_page(size);
228 	}
229 #endif /* DEBUG */
230 
231 	return ptr;
232 }
233 
234 /*******************************************************************************
235 *******************************************************************************/
236 void
kxld_free(void * ptr,size_t size __unused)237 kxld_free(void *ptr, size_t size __unused)
238 {
239 #if DEBUG
240 	++num_frees;
241 	bytes_freed += size;
242 #endif
243 
244 #if KERNEL
245 	kheap_free(KHEAP_DEFAULT, ptr, size);
246 #else
247 	free(ptr);
248 #endif
249 }
250 
251 /*******************************************************************************
252 *******************************************************************************/
253 void
kxld_page_free_untracked(void * ptr,size_t size __unused)254 kxld_page_free_untracked(void *ptr, size_t size __unused)
255 {
256 #if KERNEL
257 	kheap_free(KHEAP_DEFAULT, ptr, round_page(size));
258 #else /* !KERNEL */
259 	free(ptr);
260 #endif /* KERNEL */
261 }
262 
263 
264 /*******************************************************************************
265 *******************************************************************************/
266 void
kxld_page_free(void * ptr,size_t size)267 kxld_page_free(void *ptr, size_t size)
268 {
269 #if DEBUG
270 	++num_frees;
271 	bytes_freed += round_page(size);
272 #endif /* DEBUG */
273 	kxld_page_free_untracked(ptr, size);
274 }
275 
276 /*******************************************************************************
277 *******************************************************************************/
278 kern_return_t
validate_and_swap_macho_32(u_char * file,u_long size,enum NXByteOrder host_order)279 validate_and_swap_macho_32(u_char *file, u_long size
280 #if !KERNEL
281     , enum NXByteOrder host_order
282 #endif /* !KERNEL */
283     )
284 {
285 	kern_return_t rval = KERN_FAILURE;
286 	struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
287 	struct load_command *load_hdr = NULL;
288 	struct segment_command *seg_hdr = NULL;
289 	struct section *sects = NULL;
290 	struct relocation_info *relocs = NULL;
291 	struct symtab_command *symtab_hdr = NULL;
292 	struct nlist *symtab = NULL;
293 	u_long offset = 0;
294 	u_int cmd = 0;
295 	u_int cmdsize = 0;
296 	u_int i = 0;
297 	u_int j = 0;
298 #if !KERNEL
299 	boolean_t swap = FALSE;
300 #endif /* !KERNEL */
301 
302 	check(file);
303 	check(size);
304 
305 	/* Verify that the file is big enough for the mach header */
306 	require_action(size >= sizeof(*mach_hdr), finish,
307 	    rval = KERN_FAILURE;
308 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
309 	offset = sizeof(*mach_hdr);
310 
311 #if !KERNEL
312 	/* Swap the mach header if necessary */
313 	if (mach_hdr->magic == MH_CIGAM) {
314 		swap = TRUE;
315 		(void) swap_mach_header(mach_hdr, host_order);
316 	}
317 #endif /* !KERNEL */
318 
319 	/* Validate the mach_header's magic number */
320 	require_action(mach_hdr->magic == MH_MAGIC, finish,
321 	    rval = KERN_FAILURE;
322 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
323 	    "Invalid magic number: 0x%x.", mach_hdr->magic));
324 
325 	/* If in the running kernel, and asked to validate the kernel
326 	 * (which is the only file of type MH_EXECUTE we should ever see),
327 	 * then just assume it's ok or we wouldn't be running to begin with.
328 	 */
329 #if KERNEL
330 	if (mach_hdr->filetype == MH_EXECUTE) {
331 		rval = KERN_SUCCESS;
332 		goto finish;
333 	}
334 #endif /* KERNEL */
335 
336 	/* Validate and potentially swap the load commands */
337 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
338 		/* Get the load command and size */
339 		load_hdr = (struct load_command *) ((void *) (file + offset));
340 		cmd = load_hdr->cmd;
341 		cmdsize = load_hdr->cmdsize;
342 
343 #if !KERNEL
344 		if (swap) {
345 			cmd = OSSwapInt32(load_hdr->cmd);
346 			cmdsize = OSSwapInt32(load_hdr->cmdsize);
347 		}
348 #endif /* !KERNEL */
349 
350 		/* Verify that the file is big enough to contain the load command */
351 		require_action(size >= offset + cmdsize, finish,
352 		    rval = KERN_FAILURE;
353 		    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
354 
355 		switch (cmd) {
356 		case LC_SEGMENT:
357 			/* Get and swap the segment header */
358 			seg_hdr = (struct segment_command *) load_hdr;
359 #if !KERNEL
360 			if (swap) {
361 				swap_segment_command(seg_hdr, host_order);
362 			}
363 #endif /* !KERNEL */
364 
365 			/* Get and swap the section headers */
366 			sects = (struct section *) &seg_hdr[1];
367 #if !KERNEL
368 			if (swap) {
369 				swap_section(sects, seg_hdr->nsects, host_order);
370 			}
371 #endif /* !KERNEL */
372 
373 			/* Ignore segments with no vm size */
374 			if (!seg_hdr->vmsize) {
375 				continue;
376 			}
377 
378 			/* Verify that the file is big enough for the segment data. */
379 			require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
380 			    rval = KERN_FAILURE;
381 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
382 
383 			for (j = 0; j < seg_hdr->nsects; ++j) {
384 				/* Verify that, if the section is not to be zero filled on
385 				 * demand, that file is big enough for the section's data.
386 				 */
387 				require_action((sects[j].flags & S_ZEROFILL) ||
388 				    (size >= sects[j].offset + sects[j].size), finish,
389 				    rval = KERN_FAILURE;
390 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
391 
392 				/* Verify that the file is big enough for the section's
393 				 * relocation entries.
394 				 */
395 				require_action(size >=
396 				    sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
397 				    rval = KERN_FAILURE;
398 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
399 
400 				/* Swap the relocation entries */
401 				relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
402 #if !KERNEL
403 				if (swap) {
404 					swap_relocation_info(relocs, sects[j].nreloc,
405 					    host_order);
406 				}
407 #endif /* !KERNEL */
408 			}
409 
410 			break;
411 		case LC_SYMTAB:
412 			/* Get and swap the symtab header */
413 			symtab_hdr = (struct symtab_command *) load_hdr;
414 #if !KERNEL
415 			if (swap) {
416 				swap_symtab_command(symtab_hdr, host_order);
417 			}
418 #endif /* !KERNEL */
419 
420 			/* Verify that the file is big enough for the symbol table */
421 			require_action(size >=
422 			    symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
423 			    rval = KERN_FAILURE;
424 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
425 
426 			/* Verify that the file is big enough for the string table */
427 			require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
428 			    rval = KERN_FAILURE;
429 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
430 
431 #if !KERNEL
432 			/* Swap the symbol table entries */
433 			symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff));
434 			if (swap) {
435 				swap_nlist(symtab, symtab_hdr->nsyms, host_order);
436 			}
437 #endif /* !KERNEL */
438 
439 			break;
440 		default:
441 #if !KERNEL
442 			/* Swap the load command */
443 			if (swap) {
444 				swap_load_command(load_hdr, host_order);
445 			}
446 #endif /* !KERNEL */
447 			break;
448 		}
449 	}
450 
451 	rval = KERN_SUCCESS;
452 
453 finish:
454 	return rval;
455 }
456 
457 /*******************************************************************************
458 *******************************************************************************/
459 kern_return_t
validate_and_swap_macho_64(u_char * file,u_long size,enum NXByteOrder host_order)460 validate_and_swap_macho_64(u_char *file, u_long size
461 #if !KERNEL
462     , enum NXByteOrder host_order
463 #endif /* !KERNEL */
464     )
465 {
466 	kern_return_t rval = KERN_FAILURE;
467 	struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
468 	struct load_command *load_hdr = NULL;
469 	struct segment_command_64 *seg_hdr = NULL;
470 	struct section_64 *sects = NULL;
471 	struct relocation_info *relocs = NULL;
472 	struct symtab_command *symtab_hdr = NULL;
473 	struct nlist_64 *symtab = NULL;
474 	u_long offset = 0;
475 	u_int cmd = 0;
476 	u_int cmdsize = 0;
477 	u_int i = 0;
478 	u_int j = 0;
479 #if !KERNEL
480 	boolean_t swap = FALSE;
481 #endif /* !KERNEL */
482 
483 	check(file);
484 	check(size);
485 
486 	/* Verify that the file is big enough for the mach header */
487 	require_action(size >= sizeof(*mach_hdr), finish,
488 	    rval = KERN_FAILURE;
489 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
490 	offset = sizeof(*mach_hdr);
491 
492 #if !KERNEL
493 	/* Swap the mach header if necessary */
494 	if (mach_hdr->magic == MH_CIGAM_64) {
495 		swap = TRUE;
496 		(void) swap_mach_header_64(mach_hdr, host_order);
497 	}
498 #endif /* !KERNEL */
499 
500 	/* Validate the mach_header's magic number */
501 	require_action(mach_hdr->magic == MH_MAGIC_64, finish,
502 	    rval = KERN_FAILURE;
503 	    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
504 	    "Invalid magic number: 0x%x.", mach_hdr->magic));
505 
506 	/* If in the running kernel, and asked to validate the kernel
507 	 * (which is the only file of type MH_EXECUTE we should ever see),
508 	 * then just assume it's ok or we wouldn't be running to begin with.
509 	 */
510 #if KERNEL
511 	if (mach_hdr->filetype == MH_EXECUTE) {
512 		rval = KERN_SUCCESS;
513 		goto finish;
514 	}
515 #endif /* KERNEL */
516 
517 	/* Validate and potentially swap the load commands */
518 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
519 		/* Get the load command and size */
520 		load_hdr = (struct load_command *) ((void *) (file + offset));
521 		cmd = load_hdr->cmd;
522 		cmdsize = load_hdr->cmdsize;
523 
524 #if !KERNEL
525 		if (swap) {
526 			cmd = OSSwapInt32(load_hdr->cmd);
527 			cmdsize = OSSwapInt32(load_hdr->cmdsize);
528 		}
529 #endif /* !KERNEL */
530 
531 		/* Verify that the file is big enough to contain the load command */
532 		require_action(size >= offset + cmdsize, finish,
533 		    rval = KERN_FAILURE;
534 		    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
535 		switch (cmd) {
536 		case LC_SEGMENT_64:
537 			/* Get and swap the segment header */
538 			seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
539 #if !KERNEL
540 			if (swap) {
541 				swap_segment_command_64(seg_hdr, host_order);
542 			}
543 #endif /* !KERNEL */
544 
545 			/* Get and swap the section headers */
546 			sects = (struct section_64 *) &seg_hdr[1];
547 #if !KERNEL
548 			if (swap) {
549 				swap_section_64(sects, seg_hdr->nsects, host_order);
550 			}
551 #endif /* !KERNEL */
552 
553 			/* If the segment has no vm footprint, skip it */
554 			if (!seg_hdr->vmsize) {
555 				continue;
556 			}
557 
558 			/* Verify that the file is big enough for the segment data. */
559 			require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
560 			    rval = KERN_FAILURE;
561 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
562 
563 			for (j = 0; j < seg_hdr->nsects; ++j) {
564 				/* Verify that, if the section is not to be zero filled on
565 				 * demand, that file is big enough for the section's data.
566 				 */
567 				require_action((sects[j].flags & S_ZEROFILL) ||
568 				    (size >= sects[j].offset + sects[j].size), finish,
569 				    rval = KERN_FAILURE;
570 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
571 
572 				/* Verify that the file is big enough for the section's
573 				 * relocation entries.
574 				 */
575 				require_action(size >=
576 				    sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
577 				    rval = KERN_FAILURE;
578 				    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
579 
580 				/* Swap the relocation entries */
581 				relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
582 #if !KERNEL
583 				if (swap) {
584 					swap_relocation_info(relocs, sects[j].nreloc,
585 					    host_order);
586 				}
587 #endif /* !KERNEL */
588 			}
589 
590 			break;
591 		case LC_SYMTAB:
592 			/* Get and swap the symtab header */
593 			symtab_hdr = (struct symtab_command *) load_hdr;
594 #if !KERNEL
595 			if (swap) {
596 				swap_symtab_command(symtab_hdr, host_order);
597 			}
598 #endif /* !KERNEL */
599 
600 			/* Verify that the file is big enough for the symbol table */
601 			require_action(size >=
602 			    symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
603 			    rval = KERN_FAILURE;
604 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
605 
606 			/* Verify that the file is big enough for the string table */
607 			require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
608 			    rval = KERN_FAILURE;
609 			    kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
610 
611 #if !KERNEL
612 			/* Swap the symbol table entries */
613 			symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
614 			if (swap) {
615 				swap_nlist_64(symtab, symtab_hdr->nsyms, host_order);
616 			}
617 #endif /* !KERNEL */
618 
619 			break;
620 		default:
621 #if !KERNEL
622 			/* Swap the load command */
623 			if (swap) {
624 				swap_load_command(load_hdr, host_order);
625 			}
626 #endif /* !KERNEL */
627 			break;
628 		}
629 	}
630 
631 	rval = KERN_SUCCESS;
632 
633 finish:
634 	return rval;
635 }
636 
637 #if !KERNEL
638 /*******************************************************************************
639 *******************************************************************************/
640 void
unswap_macho(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)641 unswap_macho(u_char *file, enum NXByteOrder host_order,
642     enum NXByteOrder target_order)
643 {
644 	struct mach_header *hdr = (struct mach_header *) ((void *) file);
645 
646 	if (!hdr) {
647 		return;
648 	}
649 
650 	if (hdr->magic == MH_MAGIC) {
651 		unswap_macho_32(file, host_order, target_order);
652 	} else if (hdr->magic == MH_MAGIC_64) {
653 		unswap_macho_64(file, host_order, target_order);
654 	}
655 }
656 
657 /*******************************************************************************
658 *******************************************************************************/
659 static void
unswap_macho_32(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)660 unswap_macho_32(u_char *file, enum NXByteOrder host_order,
661     enum NXByteOrder target_order)
662 {
663 	struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
664 	struct load_command *load_hdr = NULL;
665 	struct segment_command *seg_hdr = NULL;
666 	struct section *sects = NULL;
667 	struct symtab_command *symtab_hdr = NULL;
668 	struct nlist *symtab = NULL;
669 	u_long offset = 0;
670 	u_int cmd = 0;
671 	u_int size = 0;
672 	u_int i = 0;
673 
674 	check(file);
675 
676 	if (target_order == host_order) {
677 		return;
678 	}
679 
680 	offset = sizeof(*mach_hdr);
681 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
682 		load_hdr = (struct load_command *) ((void *) (file + offset));
683 		cmd = load_hdr->cmd;
684 		size = load_hdr->cmdsize;
685 
686 		switch (cmd) {
687 		case LC_SEGMENT:
688 			seg_hdr = (struct segment_command *) load_hdr;
689 			sects = (struct section *) &seg_hdr[1];
690 
691 			/* We don't need to unswap relocations because this function is
692 			 * called when linking is completed (so there are no relocations).
693 			 */
694 
695 			swap_section(sects, seg_hdr->nsects, target_order);
696 			swap_segment_command(seg_hdr, target_order);
697 			break;
698 		case LC_SYMTAB:
699 			symtab_hdr = (struct symtab_command *) load_hdr;
700 			symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff));
701 
702 			swap_nlist(symtab, symtab_hdr->nsyms, target_order);
703 			swap_symtab_command(symtab_hdr, target_order);
704 
705 			break;
706 		default:
707 			swap_load_command(load_hdr, target_order);
708 			break;
709 		}
710 	}
711 
712 	(void) swap_mach_header(mach_hdr, target_order);
713 }
714 
715 /*******************************************************************************
716 *******************************************************************************/
717 static void
unswap_macho_64(u_char * file,enum NXByteOrder host_order,enum NXByteOrder target_order)718 unswap_macho_64(u_char *file, enum NXByteOrder host_order,
719     enum NXByteOrder target_order)
720 {
721 	struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
722 	struct load_command *load_hdr = NULL;
723 	struct segment_command_64 *seg_hdr = NULL;
724 	struct section_64 *sects = NULL;
725 	struct symtab_command *symtab_hdr = NULL;
726 	struct nlist_64 *symtab = NULL;
727 	u_long offset = 0;
728 	u_int cmd = 0;
729 	u_int size = 0;
730 	u_int i = 0;
731 
732 	check(file);
733 
734 	if (target_order == host_order) {
735 		return;
736 	}
737 
738 	offset = sizeof(*mach_hdr);
739 	for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
740 		load_hdr = (struct load_command *) ((void *) (file + offset));
741 		cmd = load_hdr->cmd;
742 		size = load_hdr->cmdsize;
743 
744 		switch (cmd) {
745 		case LC_SEGMENT_64:
746 			seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
747 			sects = (struct section_64 *) &seg_hdr[1];
748 
749 			/* We don't need to unswap relocations because this function is
750 			 * called when linking is completed (so there are no relocations).
751 			 */
752 
753 			swap_section_64(sects, seg_hdr->nsects, target_order);
754 			swap_segment_command_64(seg_hdr, target_order);
755 			break;
756 		case LC_SYMTAB:
757 			symtab_hdr = (struct symtab_command *) load_hdr;
758 			symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
759 
760 			swap_nlist_64(symtab, symtab_hdr->nsyms, target_order);
761 			swap_symtab_command(symtab_hdr, target_order);
762 
763 			break;
764 		default:
765 			swap_load_command(load_hdr, target_order);
766 			break;
767 		}
768 	}
769 
770 	(void) swap_mach_header_64(mach_hdr, target_order);
771 }
772 #endif /* !KERNEL */
773 
774 /*******************************************************************************
775 *******************************************************************************/
776 kxld_addr_t
kxld_align_address(kxld_addr_t address,u_int align)777 kxld_align_address(kxld_addr_t address, u_int align)
778 {
779 	kxld_addr_t alignment = (1 << align);
780 	kxld_addr_t low_bits = 0;
781 
782 	if (!align) {
783 		return address;
784 	}
785 
786 	low_bits = (address) & (alignment - 1);
787 	if (low_bits) {
788 		address += (alignment - low_bits);
789 	}
790 
791 	return address;
792 }
793 
794 /*******************************************************************************
795 *******************************************************************************/
796 boolean_t
kxld_is_32_bit(cpu_type_t cputype)797 kxld_is_32_bit(cpu_type_t cputype)
798 {
799 	return !(cputype & CPU_ARCH_ABI64);
800 }
801 
802 /*******************************************************************************
803 *******************************************************************************/
804 void
kxld_print_memory_report(void)805 kxld_print_memory_report(void)
806 {
807 #if DEBUG
808 	kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n"
809 	    "\tNumber of allocations:   %8lu\n"
810 	    "\tNumber of frees:         %8lu\n"
811 	    "\tAverage allocation size: %8lu\n"
812 	    "\tTotal bytes allocated:   %8lu\n"
813 	    "\tTotal bytes freed:       %8lu\n"
814 	    "\tTotal bytes leaked:      %8lu",
815 	    num_allocations, num_frees, bytes_allocated / num_allocations,
816 	    bytes_allocated, bytes_freed, bytes_allocated - bytes_freed);
817 #endif
818 }
819 
820 /*********************************************************************
821 *********************************************************************/
822 #if !KERNEL
823 boolean_t
kxld_set_cross_link_page_size(kxld_size_t target_page_size)824 kxld_set_cross_link_page_size(kxld_size_t target_page_size)
825 {
826 	// verify radix 2
827 	if ((target_page_size != 0) &&
828 	    ((target_page_size & (target_page_size - 1)) == 0)) {
829 		s_cross_link_enabled = TRUE;
830 		s_cross_link_page_size = target_page_size;
831 
832 		return TRUE;
833 	} else {
834 		return FALSE;
835 	}
836 }
837 #endif /* !KERNEL */
838 
839 /*********************************************************************
840 *********************************************************************/
841 kxld_size_t
kxld_get_effective_page_size(void)842 kxld_get_effective_page_size(void)
843 {
844 #if KERNEL
845 	return PAGE_SIZE;
846 #else
847 	if (s_cross_link_enabled) {
848 		return s_cross_link_page_size;
849 	} else {
850 		return PAGE_SIZE;
851 	}
852 #endif /* KERNEL */
853 }
854 
855 /*********************************************************************
856 *********************************************************************/
857 kxld_addr_t
kxld_round_page_cross_safe(kxld_addr_t offset)858 kxld_round_page_cross_safe(kxld_addr_t offset)
859 {
860 #if KERNEL
861 	return round_page(offset);
862 #else
863 	// assume s_cross_link_page_size is power of 2
864 	if (s_cross_link_enabled) {
865 		return (offset + (s_cross_link_page_size - 1)) &
866 		       (~(s_cross_link_page_size - 1));
867 	} else {
868 		return round_page(offset);
869 	}
870 #endif /* KERNEL */
871 }
872 
873 #if SPLIT_KEXTS_DEBUG
874 
875 void
kxld_show_split_info(splitKextLinkInfo * info)876 kxld_show_split_info(splitKextLinkInfo *info)
877 {
878 	kxld_log(kKxldLogLinking, kKxldLogErr,
879 	    "splitKextLinkInfo: \n"
880 	    "kextExecutable %p to %p kextSize %lu \n"
881 	    "linkedKext %p to %p linkedKextSize %lu \n"
882 	    "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p "
883 	    "vmaddr_DATA %p vmaddr_DATA_CONST %p "
884 	    "vmaddr_LLVM_COV %p vmaddr_LINKEDIT %p",
885 	    (void *) info->kextExecutable,
886 	    (void *) (info->kextExecutable + info->kextSize),
887 	    info->kextSize,
888 	    (void*) info->linkedKext,
889 	    (void*) (info->linkedKext + info->linkedKextSize),
890 	    info->linkedKextSize,
891 	    (void *) info->vmaddr_TEXT,
892 	    (void *) info->vmaddr_TEXT_EXEC,
893 	    (void *) info->vmaddr_DATA,
894 	    (void *) info->vmaddr_DATA_CONST,
895 	    (void *) info->vmaddr_LLVM_COV,
896 	    (void *) info->vmaddr_LINKEDIT);
897 }
898 
899 boolean_t
isTargetKextName(const char * the_name)900 isTargetKextName(const char * the_name)
901 {
902 	if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) {
903 		return TRUE;
904 	}
905 	return FALSE;
906 }
907 #endif
908