xref: /xnu-8019.80.24/osfmk/arm/arm_vm_init.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
3  * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4  *
5  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6  *
7  * This file contains Original Code and/or Modifications of Original Code
8  * as defined in and that are subject to the Apple Public Source License
9  * Version 2.0 (the 'License'). You may not use this file except in
10  * compliance with the License. The rights granted to you under the License
11  * may not be used to create, or enable the creation or redistribution of,
12  * unlawful or unlicensed copies of an Apple operating system, or to
13  * circumvent, violate, or enable the circumvention or violation of, any
14  * terms of an Apple operating system software license agreement.
15  *
16  * Please obtain a copy of the License at
17  * http://www.opensource.apple.com/apsl/ and read it before using this file.
18  *
19  * The Original Code and all software distributed under the License are
20  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24  * Please see the License for the specific language governing rights and
25  * limitations under the License.
26  *
27  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28  */
29 #include <mach_debug.h>
30 #include <mach_kdp.h>
31 #include <debug.h>
32 
33 #include <mach/vm_types.h>
34 #include <mach/vm_param.h>
35 #include <mach/thread_status.h>
36 #include <kern/misc_protos.h>
37 #include <kern/assert.h>
38 #include <kern/cpu_number.h>
39 #include <kern/thread.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_page.h>
42 #include <vm/pmap.h>
43 
44 #include <arm/proc_reg.h>
45 #include <arm/caches_internal.h>
46 #include <arm/cpu_data_internal.h>
47 #include <arm/pmap.h>
48 #include <arm/misc_protos.h>
49 #include <arm/lowglobals.h>
50 
51 #include <pexpert/arm/boot.h>
52 #include <pexpert/device_tree.h>
53 
54 #include <libkern/kernel_mach_header.h>
55 
56 /*
57  * Denotes the end of xnu.
58  */
59 extern void *last_kernel_symbol;
60 
61 /*
62  * KASLR parameters
63  */
64 vm_offset_t vm_kernel_base;
65 vm_offset_t vm_kernel_top;
66 vm_offset_t vm_kernel_stext;
67 vm_offset_t vm_kernel_etext;
68 vm_offset_t vm_kernel_slide;
69 vm_offset_t vm_kernel_slid_base;
70 vm_offset_t vm_kernel_slid_top;
71 vm_offset_t vm_kext_base;
72 vm_offset_t vm_kext_top;
73 vm_offset_t vm_prelink_stext;
74 vm_offset_t vm_prelink_etext;
75 vm_offset_t vm_prelink_sinfo;
76 vm_offset_t vm_prelink_einfo;
77 vm_offset_t vm_slinkedit;
78 vm_offset_t vm_elinkedit;
79 vm_offset_t vm_prelink_sdata;
80 vm_offset_t vm_prelink_edata;
81 
82 vm_offset_t vm_kernel_builtinkmod_text;
83 vm_offset_t vm_kernel_builtinkmod_text_end;
84 
85 unsigned long gVirtBase, gPhysBase, gPhysSize;      /* Used by <mach/arm/vm_param.h> */
86 
87 vm_offset_t   mem_size;                             /* Size of actual physical memory present
88                                                      * minus any performance buffer and possibly
89                                                      * limited by mem_limit in bytes */
90 uint64_t      mem_actual;                           /* The "One True" physical memory size
91                                                      * actually, it's the highest physical
92                                                      * address + 1 */
93 uint64_t      max_mem;                              /* kernel/vm managed memory, adjusted by maxmem */
94 uint64_t      max_mem_actual;                       /* Actual size of physical memory (bytes), adjusted
95                                                      * by the maxmem boot-arg */
96 uint64_t      sane_size;                            /* Memory size to use for defaults
97                                                      * calculations */
98 addr64_t      vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel
99                                                      * virtual address known
100                                                      * to the VM system */
101 
102 vm_offset_t            segEXTRADATA;
103 unsigned long          segSizeEXTRADATA;
104 vm_offset_t            segLOWESTTEXT;
105 vm_offset_t            segLOWEST;
106 static vm_offset_t     segTEXTB;
107 static unsigned long   segSizeTEXT;
108 static vm_offset_t     segDATAB;
109 static unsigned long   segSizeDATA;
110 vm_offset_t            segLINKB;
111 static unsigned long   segSizeLINK;
112 static vm_offset_t     segKLDB;
113 static unsigned long   segSizeKLD;
114 static vm_offset_t     segKLDDATAB;
115 static unsigned long   segSizeKLDDATA;
116 static vm_offset_t     segLASTB;
117 static vm_offset_t     segLASTDATACONSTB;
118 static unsigned long   segSizeLASTDATACONST;
119 static unsigned long   segSizeLAST;
120 static vm_offset_t     sectCONSTB;
121 static unsigned long   sectSizeCONST;
122 vm_offset_t            segBOOTDATAB;
123 unsigned long          segSizeBOOTDATA;
124 extern vm_offset_t     intstack_low_guard;
125 extern vm_offset_t     intstack_high_guard;
126 extern vm_offset_t     fiqstack_high_guard;
127 
128 vm_offset_t     segPRELINKTEXTB;
129 unsigned long   segSizePRELINKTEXT;
130 vm_offset_t     segPRELINKINFOB;
131 unsigned long   segSizePRELINKINFO;
132 
133 vm_offset_t          segLOWESTKC;
134 vm_offset_t          segHIGHESTKC;
135 vm_offset_t          segLOWESTROKC;
136 vm_offset_t          segHIGHESTROKC;
137 vm_offset_t          segLOWESTAuxKC;
138 vm_offset_t          segHIGHESTAuxKC;
139 vm_offset_t          segLOWESTROAuxKC;
140 vm_offset_t          segHIGHESTROAuxKC;
141 vm_offset_t          segLOWESTRXAuxKC;
142 vm_offset_t          segHIGHESTRXAuxKC;
143 vm_offset_t          segHIGHESTNLEAuxKC;
144 
145 static kernel_segment_command_t *segDATA;
146 static boolean_t doconstro = TRUE;
147 
148 vm_offset_t end_kern, etext, sdata, edata;
149 
150 /*
151  * Bootstrap the system enough to run with virtual memory.
152  * Map the kernel's code and data, and allocate the system page table.
153  * Page_size must already be set.
154  *
155  * Parameters:
156  * first_avail: first available physical page -
157  *              after kernel page tables
158  * avail_start: PA of first physical page
159  * avail_end  : PA of last physical page
160  */
161 vm_offset_t     first_avail;
162 vm_offset_t     static_memory_end;
163 pmap_paddr_t    avail_start, avail_end;
164 
165 #define MEM_SIZE_MAX 0x40000000
166 
167 extern vm_offset_t ExceptionVectorsBase; /* the code we want to load there */
168 
169 /* The translation tables have to be 16KB aligned */
170 #define round_x_table(x) \
171 	(((pmap_paddr_t)(x) + (ARM_PGBYTES<<2) - 1) & ~((ARM_PGBYTES<<2) - 1))
172 
173 vm_map_address_t
phystokv(pmap_paddr_t pa)174 phystokv(pmap_paddr_t pa)
175 {
176 	return pa - gPhysBase + gVirtBase;
177 }
178 
179 static void
arm_vm_page_granular_helper(vm_offset_t start,vm_offset_t _end,vm_offset_t va,int pte_prot_APX,int pte_prot_XN)180 arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va,
181     int pte_prot_APX, int pte_prot_XN)
182 {
183 	if (va & ARM_TT_L1_PT_OFFMASK) { /* ragged edge hanging over a ARM_TT_L1_PT_SIZE  boundary */
184 		va &= (~ARM_TT_L1_PT_OFFMASK);
185 		tt_entry_t *tte = &cpu_tte[ttenum(va)];
186 		tt_entry_t tmplate = *tte;
187 		pmap_paddr_t pa;
188 		pt_entry_t *ppte, ptmp;
189 		unsigned int i;
190 
191 		pa = va - gVirtBase + gPhysBase;
192 
193 		if (pa >= avail_end) {
194 			return;
195 		}
196 
197 		assert(_end >= va);
198 
199 		if (ARM_TTE_TYPE_TABLE == (tmplate & ARM_TTE_TYPE_MASK)) {
200 			/* pick up the existing page table. */
201 			ppte = (pt_entry_t *)phystokv((tmplate & ARM_TTE_TABLE_MASK));
202 		} else {
203 			/* TTE must be reincarnated COARSE. */
204 			ppte = (pt_entry_t *)phystokv(avail_start);
205 			pmap_paddr_t l2table = avail_start;
206 			avail_start += ARM_PGBYTES;
207 			bzero(ppte, ARM_PGBYTES);
208 
209 			for (i = 0; i < 4; ++i) {
210 				tte[i] = pa_to_tte(l2table + (i * 0x400)) | ARM_TTE_TYPE_TABLE;
211 			}
212 		}
213 
214 		vm_offset_t len = _end - va;
215 		if ((pa + len) > avail_end) {
216 			_end -= (pa + len - avail_end);
217 		}
218 		assert((start - gVirtBase + gPhysBase) >= gPhysBase);
219 
220 		/* Apply the desired protections to the specified page range */
221 		for (i = 0; i < (ARM_PGBYTES / sizeof(*ppte)); i++) {
222 			if (start <= va && va < _end) {
223 				ptmp = pa | ARM_PTE_AF | ARM_PTE_SH | ARM_PTE_TYPE;
224 				ptmp = ptmp | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT);
225 				ptmp = ptmp | ARM_PTE_AP(pte_prot_APX);
226 				if (pte_prot_XN) {
227 					ptmp = ptmp | ARM_PTE_NX;
228 				}
229 
230 				ppte[i] = ptmp;
231 			}
232 
233 			va += ARM_PGBYTES;
234 			pa += ARM_PGBYTES;
235 		}
236 	}
237 }
238 
239 static void
arm_vm_page_granular_prot(vm_offset_t start,unsigned long size,int tte_prot_XN,int pte_prot_APX,int pte_prot_XN,int force_page_granule)240 arm_vm_page_granular_prot(vm_offset_t start, unsigned long size,
241     int tte_prot_XN, int pte_prot_APX, int pte_prot_XN, int force_page_granule)
242 {
243 	vm_offset_t _end = start + size;
244 	vm_offset_t align_start = (start + ARM_TT_L1_PT_OFFMASK) & ~ARM_TT_L1_PT_OFFMASK;
245 	vm_offset_t align_end = _end & ~ARM_TT_L1_PT_OFFMASK;
246 
247 	arm_vm_page_granular_helper(start, _end, start, pte_prot_APX, pte_prot_XN);
248 
249 	while (align_start < align_end) {
250 		if (force_page_granule) {
251 			arm_vm_page_granular_helper(align_start, align_end, align_start + 1,
252 			    pte_prot_APX, pte_prot_XN);
253 		} else {
254 			tt_entry_t *tte = &cpu_tte[ttenum(align_start)];
255 			for (int i = 0; i < 4; ++i) {
256 				tt_entry_t tmplate = tte[i];
257 
258 				tmplate = (tmplate & ~ARM_TTE_BLOCK_APMASK) | ARM_TTE_BLOCK_AP(pte_prot_APX);
259 				tmplate = (tmplate & ~ARM_TTE_BLOCK_NX_MASK);
260 				if (tte_prot_XN) {
261 					tmplate = tmplate | ARM_TTE_BLOCK_NX;
262 				}
263 
264 				tte[i] = tmplate;
265 			}
266 		}
267 		align_start += ARM_TT_L1_PT_SIZE;
268 	}
269 
270 	arm_vm_page_granular_helper(start, _end, _end, pte_prot_APX, pte_prot_XN);
271 }
272 
273 static inline void
arm_vm_page_granular_RNX(vm_offset_t start,unsigned long size,int force_page_granule)274 arm_vm_page_granular_RNX(vm_offset_t start, unsigned long size, int force_page_granule)
275 {
276 	arm_vm_page_granular_prot(start, size, 1, AP_RONA, 1, force_page_granule);
277 }
278 
279 static inline void
arm_vm_page_granular_ROX(vm_offset_t start,unsigned long size,int force_page_granule)280 arm_vm_page_granular_ROX(vm_offset_t start, unsigned long size, int force_page_granule)
281 {
282 	arm_vm_page_granular_prot(start, size, 0, AP_RONA, 0, force_page_granule);
283 }
284 
285 static inline void
arm_vm_page_granular_RWNX(vm_offset_t start,unsigned long size,int force_page_granule)286 arm_vm_page_granular_RWNX(vm_offset_t start, unsigned long size, int force_page_granule)
287 {
288 	arm_vm_page_granular_prot(start, size, 1, AP_RWNA, 1, force_page_granule);
289 }
290 
291 static inline void
arm_vm_page_granular_RWX(vm_offset_t start,unsigned long size,int force_page_granule)292 arm_vm_page_granular_RWX(vm_offset_t start, unsigned long size, int force_page_granule)
293 {
294 	arm_vm_page_granular_prot(start, size, 0, AP_RWNA, 0, force_page_granule);
295 }
296 
297 void
arm_vm_prot_init(boot_args * args)298 arm_vm_prot_init(boot_args * args)
299 {
300 #if __ARM_PTE_PHYSMAP__
301 	boolean_t force_coarse_physmap = TRUE;
302 #else
303 	boolean_t force_coarse_physmap = FALSE;
304 #endif
305 	/*
306 	 * Enforce W^X protections on segments that have been identified so far. This will be
307 	 * further refined for each KEXT's TEXT and DATA segments in readPrelinkedExtensions()
308 	 */
309 
310 	/*
311 	 * Protection on kernel text is loose here to allow shenanigans early on (e.g. copying exception vectors)
312 	 * and storing an address into "error_buffer" (see arm_init.c) !?!
313 	 * These protections are tightened in arm_vm_prot_finalize()
314 	 */
315 	arm_vm_page_granular_RWX(gVirtBase, segSizeTEXT + (segTEXTB - gVirtBase), FALSE);
316 
317 	if (doconstro) {
318 		/*
319 		 * We map __DATA with 3 calls, so that the __const section can have its
320 		 * protections changed independently of the rest of the __DATA segment.
321 		 */
322 		arm_vm_page_granular_RWNX(segDATAB, sectCONSTB - segDATAB, FALSE);
323 		arm_vm_page_granular_RNX(sectCONSTB, sectSizeCONST, FALSE);
324 		arm_vm_page_granular_RWNX(sectCONSTB + sectSizeCONST, (segDATAB + segSizeDATA) - (sectCONSTB + sectSizeCONST), FALSE);
325 	} else {
326 		/* If we aren't protecting const, just map DATA as a single blob. */
327 		arm_vm_page_granular_RWNX(segDATAB, segSizeDATA, FALSE);
328 	}
329 	arm_vm_page_granular_RWNX(segBOOTDATAB, segSizeBOOTDATA, TRUE);
330 	arm_vm_page_granular_RNX((vm_offset_t)&intstack_low_guard, PAGE_MAX_SIZE, TRUE);
331 	arm_vm_page_granular_RNX((vm_offset_t)&intstack_high_guard, PAGE_MAX_SIZE, TRUE);
332 	arm_vm_page_granular_RNX((vm_offset_t)&fiqstack_high_guard, PAGE_MAX_SIZE, TRUE);
333 
334 	arm_vm_page_granular_ROX(segKLDB, segSizeKLD, force_coarse_physmap);
335 	arm_vm_page_granular_RNX(segKLDDATAB, segSizeKLDDATA, force_coarse_physmap);
336 	arm_vm_page_granular_RWNX(segLINKB, segSizeLINK, force_coarse_physmap);
337 	arm_vm_page_granular_RWNX(segLASTB, segSizeLAST, FALSE); // __LAST may be empty, but we cannot assume this
338 	if (segLASTDATACONSTB) {
339 		arm_vm_page_granular_RWNX(segLASTDATACONSTB, segSizeLASTDATACONST, FALSE); // __LASTDATA_CONST may be empty, but we cannot assume this
340 	}
341 	arm_vm_page_granular_RWNX(segPRELINKTEXTB, segSizePRELINKTEXT, TRUE); // Refined in OSKext::readPrelinkedExtensions
342 	arm_vm_page_granular_RWNX(segPRELINKTEXTB + segSizePRELINKTEXT,
343 	    end_kern - (segPRELINKTEXTB + segSizePRELINKTEXT), force_coarse_physmap);                          // PreLinkInfoDictionary
344 	arm_vm_page_granular_RWNX(end_kern, phystokv(args->topOfKernelData) - end_kern, force_coarse_physmap); // Device Tree, RAM Disk (if present), bootArgs, trust caches
345 	arm_vm_page_granular_RNX(segEXTRADATA, segSizeEXTRADATA, FALSE); // tighter trust cache protection
346 	arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData), ARM_PGBYTES * 8, FALSE); // boot_tte, cpu_tte
347 
348 	/*
349 	 * FIXME: Any page table pages that arm_vm_page_granular_* created with ROX entries in the range
350 	 * phystokv(args->topOfKernelData) to phystokv(prot_avail_start) should themselves be
351 	 * write protected in the static mapping of that range.
352 	 * [Page table pages whose page table entries grant execute (X) privileges should themselves be
353 	 * marked read-only. This aims to thwart attacks that replace the X entries with vectors to evil code
354 	 * (relying on some thread of execution to eventually arrive at what previously was a trusted routine).]
355 	 */
356 	arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 8, ARM_PGBYTES, FALSE); /* Excess physMem over 1MB */
357 	arm_vm_page_granular_RWX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 9, ARM_PGBYTES, FALSE); /* refined in finalize */
358 
359 	/* Map the remainder of xnu owned memory. */
360 	arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 10,
361 	    static_memory_end - (phystokv(args->topOfKernelData) + ARM_PGBYTES * 10), force_coarse_physmap);                       /* rest of physmem */
362 
363 	/*
364 	 * Special case write protection for the mapping of ExceptionVectorsBase (EVB) at 0xFFFF0000.
365 	 * Recall that start.s handcrafted a page table page for EVB mapping
366 	 */
367 	pmap_paddr_t p = (pmap_paddr_t)(args->topOfKernelData) + (ARM_PGBYTES * 9);
368 	pt_entry_t *ppte = (pt_entry_t *)phystokv(p);
369 	pmap_init_pte_page(kernel_pmap, ppte, HIGH_EXC_VECTORS & ~ARM_TT_L1_PT_OFFMASK, 2, TRUE);
370 
371 	int idx = (HIGH_EXC_VECTORS & ARM_TT_L1_PT_OFFMASK) >> ARM_TT_L2_SHIFT;
372 	pt_entry_t ptmp = ppte[idx];
373 
374 	ptmp = (ptmp & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RONA);
375 
376 	ppte[idx] = ptmp;
377 }
378 
379 void
arm_vm_prot_finalize(boot_args * args)380 arm_vm_prot_finalize(boot_args * args)
381 {
382 	cpu_stack_alloc(&BootCpuData);
383 	ml_static_mfree(segBOOTDATAB, segSizeBOOTDATA);
384 	/*
385 	 * Naively we could have:
386 	 * arm_vm_page_granular_ROX(segTEXTB, segSizeTEXT, FALSE);
387 	 * but, at present, that would miss a 1Mb boundary at the beginning of the segment and
388 	 * so would force a (wasteful) coarse page (e.g. when gVirtBase is 0x80000000, segTEXTB is 0x80001000).
389 	 */
390 	arm_vm_page_granular_ROX(gVirtBase, segSizeTEXT + (segTEXTB - gVirtBase), FALSE);
391 
392 	arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 9, ARM_PGBYTES, FALSE); /* commpage, EVB */
393 
394 	flush_mmu_tlb();
395 }
396 
397 /* used in the chosen/memory-map node, populated by iBoot. */
398 typedef struct MemoryMapFileInfo {
399 	vm_offset_t paddr;
400 	size_t length;
401 } MemoryMapFileInfo;
402 
403 
404 void
arm_vm_init(uint64_t memory_size,boot_args * args)405 arm_vm_init(uint64_t memory_size, boot_args * args)
406 {
407 	vm_map_address_t va, off, off_end;
408 	tt_entry_t       *tte, *tte_limit;
409 	pmap_paddr_t     boot_ttep;
410 	tt_entry_t       *boot_tte;
411 	uint32_t         mem_segments;
412 	kernel_section_t *sectDCONST;
413 
414 	/*
415 	 * Get the virtual and physical memory base from boot_args.
416 	 */
417 	gVirtBase = args->virtBase;
418 	gPhysBase = args->physBase;
419 	gPhysSize = args->memSize;
420 	mem_size = args->memSize;
421 	mem_actual = args->memSizeActual ? args->memSizeActual : mem_size;
422 	if (mem_size > MEM_SIZE_MAX) {
423 		mem_size = MEM_SIZE_MAX;
424 	}
425 	if ((memory_size != 0) && (mem_size > memory_size)) {
426 		mem_size = memory_size;
427 		max_mem_actual = memory_size;
428 	} else {
429 		max_mem_actual = mem_actual;
430 	}
431 
432 	static_memory_end = gVirtBase + mem_size;
433 
434 	/* Calculate the nubmer of ~256MB segments of memory */
435 	mem_segments = (mem_size + 0x0FFFFFFF) >> 28;
436 
437 	/*
438 	 * Copy the boot mmu tt to create system mmu tt.
439 	 * System mmu tt start after the boot mmu tt.
440 	 * Determine translation table base virtual address: - aligned at end
441 	 * of executable.
442 	 */
443 	boot_ttep = args->topOfKernelData;
444 	boot_tte = (tt_entry_t *) phystokv(boot_ttep);
445 
446 	cpu_ttep = boot_ttep + ARM_PGBYTES * 4;
447 	cpu_tte = (tt_entry_t *) phystokv(cpu_ttep);
448 
449 	bcopy(boot_tte, cpu_tte, ARM_PGBYTES * 4);
450 
451 	/*
452 	 * Clear out any V==P mappings that may have been established in e.g. start.s
453 	 */
454 	tte = &cpu_tte[ttenum(gPhysBase)];
455 	tte_limit = &cpu_tte[ttenum(gPhysBase + gPhysSize)];
456 
457 	/* Hands off [gVirtBase, gVirtBase + gPhysSize) please. */
458 	if (gPhysBase < gVirtBase) {
459 		if (gPhysBase + gPhysSize > gVirtBase) {
460 			tte_limit = &cpu_tte[ttenum(gVirtBase)];
461 		}
462 	} else {
463 		if (gPhysBase < gVirtBase + gPhysSize) {
464 			tte = &cpu_tte[ttenum(gVirtBase + gPhysSize)];
465 		}
466 	}
467 
468 	while (tte < tte_limit) {
469 		*tte = ARM_TTE_TYPE_FAULT;
470 		tte++;
471 	}
472 
473 	/* Skip 6 pages (four L1 + two L2 entries) */
474 	avail_start = cpu_ttep + ARM_PGBYTES * 6;
475 	avail_end = gPhysBase + mem_size;
476 
477 	/*
478 	 * Now retrieve addresses for end, edata, and etext
479 	 * from MACH-O headers for the currently running 32 bit kernel.
480 	 */
481 	segTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT", &segSizeTEXT);
482 	segLOWESTTEXT = segTEXTB;
483 	segLOWEST = segLOWESTTEXT;
484 	segDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA", &segSizeDATA);
485 	segLINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &segSizeLINK);
486 	segKLDB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &segSizeKLD);
487 	segKLDDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLDDATA", &segSizeKLDDATA);
488 	segLASTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LAST", &segSizeLAST);
489 	segLASTDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LASTDATA_CONST", &segSizeLASTDATACONST);
490 	segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &segSizePRELINKTEXT);
491 	segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_INFO", &segSizePRELINKINFO);
492 	segBOOTDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__BOOTDATA", &segSizeBOOTDATA);
493 
494 	segEXTRADATA = 0;
495 	segSizeEXTRADATA = 0;
496 
497 	DTEntry memory_map;
498 	MemoryMapFileInfo const *trustCacheRange;
499 	unsigned int trustCacheRangeSize;
500 	int err;
501 
502 	err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map);
503 	assert(err == kSuccess);
504 
505 	err = SecureDTGetProperty(memory_map, "TrustCache", (const void**)&trustCacheRange, &trustCacheRangeSize);
506 	if (err == kSuccess) {
507 		assert(trustCacheRangeSize == sizeof(MemoryMapFileInfo));
508 
509 		segEXTRADATA = phystokv(trustCacheRange->paddr);
510 		segSizeEXTRADATA = trustCacheRange->length;
511 	}
512 
513 	etext = (vm_offset_t) segTEXTB + segSizeTEXT;
514 	sdata = (vm_offset_t) segDATAB;
515 	edata = (vm_offset_t) segDATAB + segSizeDATA;
516 	end_kern = round_page(getlastaddr());   /* Force end to next page */
517 
518 	/*
519 	 * Special handling for the __DATA,__const *section*.
520 	 * A page of padding named lastkerneldataconst is at the end of the __DATA,__const
521 	 * so we can safely truncate the size. __DATA,__const is also aligned, but
522 	 * just in case we will round that to a page, too.
523 	 */
524 	segDATA = getsegbynamefromheader(&_mh_execute_header, "__DATA");
525 	sectDCONST = getsectbynamefromheader(&_mh_execute_header, "__DATA", "__const");
526 	sectCONSTB = sectDCONST->addr;
527 	sectSizeCONST = sectDCONST->size;
528 
529 	if (doconstro) {
530 		extern vm_offset_t _lastkerneldataconst;
531 		extern vm_size_t _lastkerneldataconst_padsize;
532 		vm_offset_t sdataconst = sectCONSTB;
533 
534 		/* this should already be aligned, but so that we can protect we round */
535 		sectCONSTB = round_page(sectCONSTB);
536 
537 		/* make sure lastkerneldataconst is really last and the right size */
538 		if ((_lastkerneldataconst == sdataconst + sectSizeCONST - _lastkerneldataconst_padsize) &&
539 		    (_lastkerneldataconst_padsize >= PAGE_SIZE)) {
540 			sectSizeCONST = trunc_page(sectSizeCONST);
541 		} else {
542 			/* otherwise see if next section is aligned then protect up to it */
543 			kernel_section_t *next_sect = nextsect(segDATA, sectDCONST);
544 
545 			if (next_sect && ((next_sect->addr & PAGE_MASK) == 0)) {
546 				sectSizeCONST = next_sect->addr - sectCONSTB;
547 			} else {
548 				/* lastly just go ahead and truncate so we try to protect something */
549 				sectSizeCONST = trunc_page(sectSizeCONST);
550 			}
551 		}
552 
553 		/* sanity check */
554 		if ((sectSizeCONST == 0) || (sectCONSTB < sdata) || (sectCONSTB + sectSizeCONST) >= edata) {
555 			doconstro = FALSE;
556 		}
557 	}
558 
559 	vm_set_page_size();
560 
561 	vm_prelink_stext = segPRELINKTEXTB;
562 	vm_prelink_etext = segPRELINKTEXTB + segSizePRELINKTEXT;
563 	vm_prelink_sinfo = segPRELINKINFOB;
564 	vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO;
565 	vm_slinkedit = segLINKB;
566 	vm_elinkedit = segLINKB + segSizeLINK;
567 
568 	sane_size = mem_size - (avail_start - gPhysBase);
569 	max_mem = mem_size;
570 	vm_kernel_slide = gVirtBase - VM_KERNEL_LINK_ADDRESS;
571 	vm_kernel_stext = segTEXTB;
572 	vm_kernel_etext = segTEXTB + segSizeTEXT;
573 	vm_kernel_base = gVirtBase;
574 	vm_kernel_top = (vm_offset_t) &last_kernel_symbol;
575 	vm_kext_base = segPRELINKTEXTB;
576 	vm_kext_top = vm_kext_base + segSizePRELINKTEXT;
577 	vm_kernel_slid_base = segTEXTB;
578 	vm_kernel_slid_top = vm_kext_top;
579 
580 	pmap_bootstrap((gVirtBase + MEM_SIZE_MAX + 0x3FFFFF) & 0xFFC00000);
581 
582 	arm_vm_prot_init(args);
583 
584 	vm_page_kernelcache_count = (unsigned int) (atop_64(end_kern - segLOWEST));
585 
586 	/*
587 	 * To avoid recursing while trying to init the vm_page and object * mechanisms,
588 	 * pre-initialize kernel pmap page table pages to cover this address range:
589 	 *    2MB + FrameBuffer size + 3MB for each 256MB segment
590 	 */
591 	off_end = (2 + (mem_segments * 3)) << 20;
592 	off_end += (unsigned int) round_page(args->Video.v_height * args->Video.v_rowBytes);
593 
594 	for (off = 0, va = (gVirtBase + MEM_SIZE_MAX + 0x3FFFFF) & 0xFFC00000; off < off_end; off += ARM_TT_L1_PT_SIZE) {
595 		pt_entry_t   *ptp;
596 		pmap_paddr_t ptp_phys;
597 
598 		ptp = (pt_entry_t *) phystokv(avail_start);
599 		ptp_phys = (pmap_paddr_t)avail_start;
600 		avail_start += ARM_PGBYTES;
601 		bzero(ptp, ARM_PGBYTES);
602 		pmap_init_pte_page(kernel_pmap, ptp, va + off, 2, TRUE);
603 		tte = &cpu_tte[ttenum(va + off)];
604 		*tte     = pa_to_tte((ptp_phys)) | ARM_TTE_TYPE_TABLE;
605 		*(tte + 1) = pa_to_tte((ptp_phys + 0x400)) | ARM_TTE_TYPE_TABLE;
606 		*(tte + 2) = pa_to_tte((ptp_phys + 0x800)) | ARM_TTE_TYPE_TABLE;
607 		*(tte + 3) = pa_to_tte((ptp_phys + 0xC00)) | ARM_TTE_TYPE_TABLE;
608 	}
609 
610 	set_mmu_ttb(cpu_ttep);
611 	set_mmu_ttb_alternate(cpu_ttep);
612 	flush_mmu_tlb();
613 #if __arm__ && __ARM_USER_PROTECT__
614 	{
615 		unsigned int ttbr0_val, ttbr1_val;
616 		thread_t thread = current_thread();
617 
618 		__asm__ volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val));
619 		__asm__ volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val));
620 		thread->machine.uptw_ttb = ttbr0_val;
621 		thread->machine.kptw_ttb = ttbr1_val;
622 	}
623 #endif
624 	avail_start = (avail_start + PAGE_MASK) & ~PAGE_MASK;
625 
626 	first_avail = avail_start;
627 	patch_low_glo_static_region(args->topOfKernelData, avail_start - args->topOfKernelData);
628 }
629