1 /*
2 * Copyright (c) 2003-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57
58 #include <mach/i386/vm_param.h>
59
60 #include <string.h>
61 #include <stdint.h>
62 #include <mach/vm_param.h>
63 #include <mach/vm_prot.h>
64 #include <mach/machine.h>
65 #include <mach/time_value.h>
66 #include <kern/spl.h>
67 #include <kern/assert.h>
68 #include <kern/debug.h>
69 #include <kern/misc_protos.h>
70 #include <kern/startup.h>
71 #include <kern/clock.h>
72 #include <kern/pms.h>
73 #include <kern/cpu_data.h>
74 #include <kern/processor.h>
75 #include <sys/kdebug.h>
76 #include <console/serial_protos.h>
77 #include <vm/vm_page.h>
78 #include <vm/pmap.h>
79 #include <vm/vm_kern.h>
80 #include <machine/pal_routines.h>
81 #include <i386/fpu.h>
82 #include <i386/pmap.h>
83 #include <i386/misc_protos.h>
84 #include <i386/cpu_threads.h>
85 #include <i386/cpuid.h>
86 #include <i386/lapic.h>
87 #include <i386/mp.h>
88 #include <i386/mp_desc.h>
89 #if CONFIG_MTRR
90 #include <i386/mtrr.h>
91 #endif
92 #include <i386/machine_routines.h>
93 #if CONFIG_MCA
94 #include <i386/machine_check.h>
95 #endif
96 #include <i386/ucode.h>
97 #include <i386/postcode.h>
98 #include <i386/Diagnostics.h>
99 #include <i386/pmCPU.h>
100 #include <i386/tsc.h>
101 #include <i386/locks.h> /* LcksOpts */
102 #include <i386/acpi.h>
103 #if DEBUG
104 #include <machine/pal_routines.h>
105 #endif
106 extern void xcpm_bootstrap(void);
107 #if DEVELOPMENT || DEBUG
108 #include <i386/trap.h>
109 #endif
110
111 #if MONOTONIC
112 #include <kern/monotonic.h>
113 #endif /* MONOTONIC */
114
115 #if KPERF
116 #include <kperf/kptimer.h>
117 #endif /* KPERF */
118
119 #include <san/kasan.h>
120
121 #if DEBUG || DEVELOPMENT
122 #define DBG(x, ...) kprintf(x, ##__VA_ARGS__)
123 #define dyldLogFunc(x, ...) kprintf(x, ##__VA_ARGS__)
124 #else
125 #define DBG(x ...)
126 #endif
127
128 #include <libkern/kernel_mach_header.h>
129 #include <mach/dyld_kernel_fixups.h>
130
131
132 int debug_task;
133
134 int early_boot = 1;
135
136 bool serial_console_enabled = false;
137
138 static boot_args *kernelBootArgs;
139
140 extern int disableConsoleOutput;
141 extern const char version[];
142 extern const char version_variant[];
143 extern int nx_enabled;
144
145 /*
146 * Set initial values so that ml_phys_* routines can use the booter's ID mapping
147 * to touch physical space before the kernel's physical aperture exists.
148 */
149 uint64_t physmap_base = 0;
150 uint64_t physmap_max = 4 * GB;
151
152 pd_entry_t *KPTphys;
153 pd_entry_t *IdlePTD;
154 pdpt_entry_t *IdlePDPT;
155 pml4_entry_t *IdlePML4;
156
157 int kernPhysPML4Index;
158 int kernPhysPML4EntryCount;
159
160 /*
161 * These are 4K mapping page table pages from KPTphys[] that we wound
162 * up not using. They get ml_static_mfree()'d once the VM is initialized.
163 */
164 ppnum_t released_PT_ppn = 0;
165 uint32_t released_PT_cnt = 0;
166
167 #if DEVELOPMENT || DEBUG
168 int panic_on_cacheline_mismatch = -1;
169 char panic_on_trap_procname[64];
170 uint32_t panic_on_trap_mask;
171 #endif
172 lbr_modes_t last_branch_enabled_modes;
173 int insn_copyin_count;
174 #if DEVELOPMENT || DEBUG
175 #define DEFAULT_INSN_COPYIN_COUNT x86_INSTRUCTION_STATE_MAX_INSN_BYTES
176 #else
177 #define DEFAULT_INSN_COPYIN_COUNT 192
178 #endif
179
180 char *physfree;
181 void idt64_remap(void);
182
183 /*
184 * Note: ALLOCPAGES() can only be used safely within Idle_PTs_init()
185 * due to the mutation of physfree.
186 */
187 static void *
ALLOCPAGES(int npages)188 ALLOCPAGES(int npages)
189 {
190 uintptr_t tmp = (uintptr_t)physfree;
191 bzero(physfree, npages * PAGE_SIZE);
192 physfree += npages * PAGE_SIZE;
193 tmp += VM_MIN_KERNEL_ADDRESS & ~LOW_4GB_MASK;
194 return (void *)tmp;
195 }
196
197 static void
fillkpt(pt_entry_t * base,int prot,uintptr_t src,int index,int count)198 fillkpt(pt_entry_t *base, int prot, uintptr_t src, int index, int count)
199 {
200 int i;
201 for (i = 0; i < count; i++) {
202 base[index] = src | prot | INTEL_PTE_VALID;
203 src += PAGE_SIZE;
204 index++;
205 }
206 }
207
208 extern pmap_paddr_t first_avail;
209
210 int break_kprintf = 0;
211
212 uint64_t
x86_64_pre_sleep(void)213 x86_64_pre_sleep(void)
214 {
215 IdlePML4[0] = IdlePML4[KERNEL_PML4_INDEX];
216 uint64_t oldcr3 = get_cr3_raw();
217 set_cr3_raw((uint32_t) (uintptr_t)ID_MAP_VTOP(IdlePML4));
218 return oldcr3;
219 }
220
221 void
x86_64_post_sleep(uint64_t new_cr3)222 x86_64_post_sleep(uint64_t new_cr3)
223 {
224 IdlePML4[0] = 0;
225 set_cr3_raw((uint32_t) new_cr3);
226 }
227
228
229
230
231 // Set up the physical mapping - NPHYSMAP GB of memory mapped at a high address
232 // NPHYSMAP is determined by the maximum supported RAM size plus 4GB to account
233 // the PCI hole (which is less 4GB but not more).
234
235 static int
physmap_init_L2(uint64_t * physStart,pt_entry_t ** l2ptep)236 physmap_init_L2(uint64_t *physStart, pt_entry_t **l2ptep)
237 {
238 unsigned i;
239 pt_entry_t *physmapL2 = ALLOCPAGES(1);
240
241 if (physmapL2 == NULL) {
242 DBG("physmap_init_L2 page alloc failed when initting L2 for physAddr 0x%llx.\n", *physStart);
243 *l2ptep = NULL;
244 return -1;
245 }
246
247 for (i = 0; i < NPDPG; i++) {
248 physmapL2[i] = *physStart
249 | INTEL_PTE_PS
250 | INTEL_PTE_VALID
251 | INTEL_PTE_NX
252 | INTEL_PTE_WRITE;
253
254 *physStart += NBPD;
255 }
256 *l2ptep = physmapL2;
257 return 0;
258 }
259
260 static int
physmap_init_L3(int startIndex,uint64_t highest_phys,uint64_t * physStart,pt_entry_t ** l3ptep)261 physmap_init_L3(int startIndex, uint64_t highest_phys, uint64_t *physStart, pt_entry_t **l3ptep)
262 {
263 unsigned i;
264 int ret;
265 pt_entry_t *l2pte;
266 pt_entry_t *physmapL3 = ALLOCPAGES(1); /* ALLOCPAGES bzeroes the memory */
267
268 if (physmapL3 == NULL) {
269 DBG("physmap_init_L3 page alloc failed when initting L3 for physAddr 0x%llx.\n", *physStart);
270 *l3ptep = NULL;
271 return -1;
272 }
273
274 for (i = startIndex; i < NPDPTPG && *physStart < highest_phys; i++) {
275 if ((ret = physmap_init_L2(physStart, &l2pte)) < 0) {
276 return ret;
277 }
278
279 physmapL3[i] = ((uintptr_t)ID_MAP_VTOP(l2pte))
280 | INTEL_PTE_VALID
281 | INTEL_PTE_NX
282 | INTEL_PTE_WRITE;
283 }
284
285 *l3ptep = physmapL3;
286
287 return 0;
288 }
289
290 static void
physmap_init(uint8_t phys_random_L3,uint64_t * new_physmap_base,uint64_t * new_physmap_max)291 physmap_init(uint8_t phys_random_L3, uint64_t *new_physmap_base, uint64_t *new_physmap_max)
292 {
293 pt_entry_t *l3pte;
294 int pml4_index, i;
295 int L3_start_index;
296 uint64_t physAddr = 0;
297 uint64_t highest_physaddr;
298 unsigned pdpte_count;
299
300 #if DEVELOPMENT || DEBUG
301 if (kernelBootArgs->PhysicalMemorySize > K64_MAXMEM) {
302 panic("Installed physical memory exceeds configured maximum.");
303 }
304 #endif
305
306 /*
307 * Add 4GB to the loader-provided physical memory size to account for MMIO space
308 * XXX in a perfect world, we'd scan PCI buses and count the max memory requested in BARs by
309 * XXX all enumerated device, then add more for hot-pluggable devices.
310 */
311 highest_physaddr = kernelBootArgs->PhysicalMemorySize + 4 * GB;
312
313 /*
314 * Calculate the number of PML4 entries we'll need. The total number of entries is
315 * pdpte_count = (((highest_physaddr) >> PDPT_SHIFT) + entropy_value +
316 * ((highest_physaddr & PDPT_MASK) == 0 ? 0 : 1))
317 * pml4e_count = pdpte_count >> (PML4_SHIFT - PDPT_SHIFT)
318 */
319 assert(highest_physaddr < (UINT64_MAX - PDPTMASK));
320 pdpte_count = (unsigned) (((highest_physaddr + PDPTMASK) >> PDPTSHIFT) + phys_random_L3);
321 kernPhysPML4EntryCount = (pdpte_count + ((1U << (PML4SHIFT - PDPTSHIFT)) - 1)) >> (PML4SHIFT - PDPTSHIFT);
322 if (kernPhysPML4EntryCount == 0) {
323 kernPhysPML4EntryCount = 1;
324 }
325 if (kernPhysPML4EntryCount > KERNEL_PHYSMAP_PML4_COUNT_MAX) {
326 #if DEVELOPMENT || DEBUG
327 panic("physmap too large");
328 #else
329 kprintf("[pmap] Limiting physmap to %d PML4s (was %d)\n", KERNEL_PHYSMAP_PML4_COUNT_MAX,
330 kernPhysPML4EntryCount);
331 kernPhysPML4EntryCount = KERNEL_PHYSMAP_PML4_COUNT_MAX;
332 #endif
333 }
334
335 kernPhysPML4Index = KERNEL_KEXTS_INDEX - kernPhysPML4EntryCount; /* utb: KERNEL_PHYSMAP_PML4_INDEX */
336
337 /*
338 * XXX: Make sure that the addresses returned for physmapL3 and physmapL2 plus their extents
339 * are in the system-available memory range
340 */
341
342
343 /* We assume NX support. Mark all levels of the PHYSMAP NX
344 * to avoid granting executability via a single bit flip.
345 */
346 #if DEVELOPMENT || DEBUG
347 uint32_t reg[4];
348 do_cpuid(0x80000000, reg);
349 if (reg[eax] >= 0x80000001) {
350 do_cpuid(0x80000001, reg);
351 assert(reg[edx] & CPUID_EXTFEATURE_XD);
352 }
353 #endif /* DEVELOPMENT || DEBUG */
354
355 L3_start_index = phys_random_L3;
356
357 for (pml4_index = kernPhysPML4Index;
358 pml4_index < (kernPhysPML4Index + kernPhysPML4EntryCount) && physAddr < highest_physaddr;
359 pml4_index++) {
360 if (physmap_init_L3(L3_start_index, highest_physaddr, &physAddr, &l3pte) < 0) {
361 panic("Physmap page table initialization failed");
362 /* NOTREACHED */
363 }
364
365 L3_start_index = 0;
366
367 IdlePML4[pml4_index] = ((uintptr_t)ID_MAP_VTOP(l3pte))
368 | INTEL_PTE_VALID
369 | INTEL_PTE_NX
370 | INTEL_PTE_WRITE;
371 }
372
373 *new_physmap_base = KVADDR(kernPhysPML4Index, phys_random_L3, 0, 0);
374 /*
375 * physAddr contains the last-mapped physical address, so that's what we
376 * add to physmap_base to derive the ending VA for the physmap.
377 */
378 *new_physmap_max = *new_physmap_base + physAddr;
379
380 DBG("Physical address map base: 0x%qx\n", *new_physmap_base);
381 for (i = kernPhysPML4Index; i < (kernPhysPML4Index + kernPhysPML4EntryCount); i++) {
382 DBG("Physical map idlepml4[%d]: 0x%llx\n", i, IdlePML4[i]);
383 }
384 }
385
386 void doublemap_init(uint8_t);
387
388 static void
Idle_PTs_init(void)389 Idle_PTs_init(void)
390 {
391 uint64_t rand64;
392 uint64_t new_physmap_base, new_physmap_max;
393
394 /* Allocate the "idle" kernel page tables: */
395 KPTphys = ALLOCPAGES(NKPT); /* level 1 */
396 IdlePTD = ALLOCPAGES(NPGPTD); /* level 2 */
397 IdlePDPT = ALLOCPAGES(1); /* level 3 */
398 IdlePML4 = ALLOCPAGES(1); /* level 4 */
399
400 // Fill the lowest level with everything up to physfree
401 fillkpt(KPTphys,
402 INTEL_PTE_WRITE, 0, 0, (int)(((uintptr_t)physfree) >> PAGE_SHIFT));
403
404 /* IdlePTD */
405 fillkpt(IdlePTD,
406 INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(KPTphys), 0, NKPT);
407
408 // IdlePDPT entries
409 fillkpt(IdlePDPT,
410 INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePTD), 0, NPGPTD);
411
412 // IdlePML4 single entry for kernel space.
413 fillkpt(IdlePML4 + KERNEL_PML4_INDEX,
414 INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePDPT), 0, 1);
415
416 postcode(VSTART_PHYSMAP_INIT);
417
418 /*
419 * early_random() cannot be called more than one time before the cpu's
420 * gsbase is initialized, so use the full 64-bit value to extract the
421 * two 8-bit entropy values needed for address randomization.
422 */
423 rand64 = early_random();
424 physmap_init(rand64 & 0xFF, &new_physmap_base, &new_physmap_max);
425 doublemap_init((rand64 >> 8) & 0xFF);
426 idt64_remap();
427
428 postcode(VSTART_SET_CR3);
429
430 /*
431 * Switch to the page tables. We set physmap_base and physmap_max just
432 * before switching to the new page tables to avoid someone calling
433 * kprintf() or otherwise using physical memory in between.
434 * This is needed because kprintf() writes to physical memory using
435 * ml_phys_read_data and PHYSMAP_PTOV, which requires physmap_base to be
436 * set correctly.
437 */
438 physmap_base = new_physmap_base;
439 physmap_max = new_physmap_max;
440 set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));
441 }
442
443 /*
444 * Release any still unused, preallocated boot kernel page tables.
445 * start..end is the VA range currently unused.
446 */
447 void
Idle_PTs_release(vm_offset_t start,vm_offset_t end)448 Idle_PTs_release(vm_offset_t start, vm_offset_t end)
449 {
450 uint32_t i;
451 uint32_t index_start;
452 uint32_t index_limit;
453 ppnum_t pn_first;
454 ppnum_t pn;
455 uint32_t cnt;
456
457 /*
458 * Align start to the next large page boundary
459 */
460 start = ((start + I386_LPGMASK) & ~I386_LPGMASK);
461
462 /*
463 * convert start into an index in KPTphys[]
464 */
465 index_start = (uint32_t)((start - KERNEL_BASE) >> PAGE_SHIFT);
466
467 /*
468 * Find the ending index in KPTphys[]
469 */
470 index_limit = (uint32_t)((end - KERNEL_BASE) >> PAGE_SHIFT);
471
472 if (index_limit > NKPT * PTE_PER_PAGE) {
473 index_limit = NKPT * PTE_PER_PAGE;
474 }
475
476 /*
477 * Make sure all the 4K page tables are empty.
478 * If not, panic a development/debug kernel.
479 * On a production kernel, since this would stop us from booting,
480 * just abort the operation.
481 */
482 for (i = index_start; i < index_limit; ++i) {
483 assert(KPTphys[i] == 0);
484 if (KPTphys[i] != 0) {
485 return;
486 }
487 }
488
489 /*
490 * Now figure out the indices into the 2nd level page tables, IdlePTD[].
491 */
492 index_start >>= PTPGSHIFT;
493 index_limit >>= PTPGSHIFT;
494 if (index_limit > NPGPTD * PTE_PER_PAGE) {
495 index_limit = NPGPTD * PTE_PER_PAGE;
496 }
497
498 if (index_limit <= index_start) {
499 return;
500 }
501
502
503 /*
504 * Now check the pages referenced from Level 2 tables.
505 * They should be contiguous, assert fail if not on development/debug.
506 * In production, just fail the removal to allow the system to boot.
507 */
508 pn_first = 0;
509 cnt = 0;
510 for (i = index_start; i < index_limit; ++i) {
511 assert(IdlePTD[i] != 0);
512 if (IdlePTD[i] == 0) {
513 return;
514 }
515
516 pn = (ppnum_t)((PG_FRAME & IdlePTD[i]) >> PTSHIFT);
517 if (cnt == 0) {
518 pn_first = pn;
519 } else {
520 assert(pn == pn_first + cnt);
521 if (pn != pn_first + cnt) {
522 return;
523 }
524 }
525 ++cnt;
526 }
527
528 /*
529 * Good to go, clear the level 2 entries and invalidate the TLB
530 */
531 for (i = index_start; i < index_limit; ++i) {
532 IdlePTD[i] = 0;
533 }
534 set_cr3_raw(get_cr3_raw());
535
536 /*
537 * Remember these PFNs to be released later in pmap_lowmem_finalize()
538 */
539 released_PT_ppn = pn_first;
540 released_PT_cnt = cnt;
541 #if DEVELOPMENT || DEBUG
542 printf("Idle_PTs_release %d pages from PFN 0x%x\n", released_PT_cnt, released_PT_ppn);
543 #endif
544 }
545
546 extern void vstart_trap_handler;
547
548 #define BOOT_TRAP_VECTOR(t) \
549 [t] = { \
550 (uintptr_t) &vstart_trap_handler, \
551 KERNEL64_CS, \
552 0, \
553 ACC_P|ACC_PL_K|ACC_INTR_GATE, \
554 0 \
555 },
556
557 /* Recursive macro to iterate 0..31 */
558 #define L0(x, n) x(n)
559 #define L1(x, n) L0(x,n-1) L0(x,n)
560 #define L2(x, n) L1(x,n-2) L1(x,n)
561 #define L3(x, n) L2(x,n-4) L2(x,n)
562 #define L4(x, n) L3(x,n-8) L3(x,n)
563 #define L5(x, n) L4(x,n-16) L4(x,n)
564 #define FOR_0_TO_31(x) L5(x,31)
565
566 /*
567 * Bootstrap IDT. Active only during early startup.
568 * Only the trap vectors are defined since interrupts are masked.
569 * All traps point to a common handler.
570 */
571 struct fake_descriptor64 master_boot_idt64[IDTSZ]
572 __attribute__((section("__HIB,__desc")))
573 __attribute__((aligned(PAGE_SIZE))) = {
574 FOR_0_TO_31(BOOT_TRAP_VECTOR)
575 };
576
577 static void
vstart_idt_init(boolean_t master)578 vstart_idt_init(boolean_t master)
579 {
580 x86_64_desc_register_t vstart_idt = {
581 sizeof(master_boot_idt64),
582 master_boot_idt64
583 };
584
585 if (master) {
586 fix_desc64(master_boot_idt64, 32);
587 }
588 lidt((void *)&vstart_idt);
589 }
590
591 extern void *collection_base_pointers[KCNumKinds];
592
593 kern_return_t
i386_slide_individual_kext(kernel_mach_header_t * mh,uintptr_t slide)594 i386_slide_individual_kext(kernel_mach_header_t *mh, uintptr_t slide)
595 {
596 int ret = kernel_collection_slide(mh, (const void **) (void *)collection_base_pointers);
597 if (ret != 0) {
598 printf("Sliding pageable kc was stopped\n");
599 return KERN_FAILURE;
600 }
601
602 kernel_collection_adjust_fileset_entry_addrs(mh, slide);
603 return KERN_SUCCESS;
604 }
605
606 kern_return_t
i386_slide_kext_collection_mh_addrs(kernel_mach_header_t * mh,uintptr_t slide,bool adjust_mach_headers)607 i386_slide_kext_collection_mh_addrs(kernel_mach_header_t *mh, uintptr_t slide, bool adjust_mach_headers)
608 {
609 int ret = kernel_collection_slide(mh, (const void **) (void *)collection_base_pointers);
610 if (ret != KERN_SUCCESS) {
611 printf("Kernel Collection slide was stopped with value %d\n", ret);
612 return KERN_FAILURE;
613 }
614
615 kernel_collection_adjust_mh_addrs(mh, slide, adjust_mach_headers,
616 NULL, NULL, NULL, NULL, NULL, NULL, NULL);
617
618 return KERN_SUCCESS;
619 }
620
621 static void
i386_slide_and_rebase_image(uintptr_t kstart_addr)622 i386_slide_and_rebase_image(uintptr_t kstart_addr)
623 {
624 extern uintptr_t kc_highest_nonlinkedit_vmaddr;
625 kernel_mach_header_t *k_mh, *kc_mh = NULL;
626 kernel_segment_command_t *seg;
627 uintptr_t slide;
628
629 k_mh = &_mh_execute_header;
630 /*
631 * If we're not booting, an MH_FILESET, we don't need to slide
632 * anything because EFI has done that for us. When booting an
633 * MH_FILESET, EFI will slide the kernel proper, but not the kexts.
634 * Below, we infer the slide by comparing the slid address of the
635 * kernel's mach-o header and the unslid vmaddr of the first segment
636 * of the mach-o (which is assumed to always point to the mach-o
637 * header).
638 */
639 if (!kernel_mach_header_is_in_fileset(k_mh)) {
640 DBG("[MH] kcgen-style KC\n");
641 return;
642 }
643
644 /*
645 * The kernel is part of a MH_FILESET kernel collection: determine slide
646 * based on first segment's mach-o vmaddr.
647 */
648 seg = (kernel_segment_command_t *)((uintptr_t)k_mh + sizeof(*k_mh));
649 assert(seg->cmd == LC_SEGMENT_KERNEL);
650 slide = (uintptr_t)k_mh - seg->vmaddr;
651 DBG("[MH] Sliding new-style KC: %llu\n", (unsigned long long)slide);
652
653 /*
654 * The kernel collection mach-o header should be the start address
655 * passed to us by EFI.
656 */
657 kc_mh = (kernel_mach_header_t *)(kstart_addr);
658 assert(kc_mh->filetype == MH_FILESET);
659
660 PE_set_kc_header(KCKindPrimary, kc_mh, slide);
661
662 /*
663 * rebase/slide all the kexts in the collection
664 * (EFI should have already rebased the kernel)
665 */
666 kernel_collection_slide(kc_mh, (const void **) (void *)collection_base_pointers);
667
668
669 /*
670 * Now adjust the vmaddr fields of all mach-o headers
671 * and symbols in this MH_FILESET
672 */
673 kernel_collection_adjust_mh_addrs(kc_mh, slide, false,
674 NULL, NULL, NULL, NULL, NULL, NULL, &kc_highest_nonlinkedit_vmaddr);
675 }
676
677 /*
678 * vstart() is called in the natural mode (64bit for K64, 32 for K32)
679 * on a set of bootstrap pagetables which use large, 2MB pages to map
680 * all of physical memory in both. See idle_pt.c for details.
681 *
682 * In K64 this identity mapping is mirrored the top and bottom 512GB
683 * slots of PML4.
684 *
685 * The bootstrap processor called with argument boot_args_start pointing to
686 * the boot-args block. The kernel's (4K page) page tables are allocated and
687 * initialized before switching to these.
688 *
689 * Non-bootstrap processors are called with argument boot_args_start NULL.
690 * These processors switch immediately to the existing kernel page tables.
691 */
692 __attribute__((noreturn))
693 void
vstart(vm_offset_t boot_args_start)694 vstart(vm_offset_t boot_args_start)
695 {
696 boolean_t is_boot_cpu = !(boot_args_start == 0);
697 int cpu = 0;
698 uint32_t lphysfree;
699 #if DEBUG
700 uint64_t gsbase;
701 #endif
702
703
704 postcode(VSTART_ENTRY);
705
706 /*
707 * Set-up temporary trap handlers during page-table set-up.
708 */
709
710 if (is_boot_cpu) {
711 vstart_idt_init(TRUE);
712 postcode(VSTART_IDT_INIT);
713
714 /*
715 * Ensure that any %gs-relative access results in an immediate fault
716 * until gsbase is properly initialized below
717 */
718 wrmsr64(MSR_IA32_GS_BASE, EARLY_GSBASE_MAGIC);
719
720 /*
721 * Get startup parameters.
722 */
723 kernelBootArgs = (boot_args *)boot_args_start;
724 lphysfree = kernelBootArgs->kaddr + kernelBootArgs->ksize;
725 physfree = (void *)(uintptr_t)((lphysfree + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
726
727 pal_serial_init();
728
729 DBG("revision 0x%x\n", kernelBootArgs->Revision);
730 DBG("version 0x%x\n", kernelBootArgs->Version);
731 DBG("command line %s\n", kernelBootArgs->CommandLine);
732 DBG("memory map 0x%x\n", kernelBootArgs->MemoryMap);
733 DBG("memory map sz 0x%x\n", kernelBootArgs->MemoryMapSize);
734 DBG("kaddr 0x%x\n", kernelBootArgs->kaddr);
735 DBG("ksize 0x%x\n", kernelBootArgs->ksize);
736 DBG("physfree %p\n", physfree);
737 DBG("bootargs: %p, &ksize: %p &kaddr: %p\n",
738 kernelBootArgs,
739 &kernelBootArgs->ksize,
740 &kernelBootArgs->kaddr);
741 DBG("SMBIOS mem sz 0x%llx\n", kernelBootArgs->PhysicalMemorySize);
742 DBG("KC_hdrs_vaddr %p\n", (void *)kernelBootArgs->KC_hdrs_vaddr);
743
744 if (kernelBootArgs->Version >= 2 && kernelBootArgs->Revision >= 1 &&
745 kernelBootArgs->KC_hdrs_vaddr != 0) {
746 /*
747 * slide the header addresses in all mach-o segments and sections, and
748 * perform any new-style chained-fixup sliding for kexts, as necessary.
749 * Note that efiboot has already loaded the kernel and all LC_SEGMENT_64s
750 * that correspond to the kexts present in the primary KC, into slid addresses.
751 */
752 i386_slide_and_rebase_image((uintptr_t)ml_static_ptovirt(kernelBootArgs->KC_hdrs_vaddr));
753 }
754
755 /*
756 * Setup boot args given the physical start address.
757 * Note: PE_init_platform needs to be called before Idle_PTs_init
758 * because access to the DeviceTree is required to read the
759 * random seed before generating a random physical map slide.
760 */
761 kernelBootArgs = (boot_args *)
762 ml_static_ptovirt(boot_args_start);
763 DBG("i386_init(0x%lx) kernelBootArgs=%p\n",
764 (unsigned long)boot_args_start, kernelBootArgs);
765
766 #if KASAN
767 kasan_reserve_memory(kernelBootArgs);
768 #endif
769
770 PE_init_platform(FALSE, kernelBootArgs);
771 postcode(PE_INIT_PLATFORM_D);
772
773 Idle_PTs_init();
774 postcode(VSTART_IDLE_PTS_INIT);
775
776 #if KASAN
777 /* Init kasan and map whatever was stolen from physfree */
778 kasan_init();
779 kasan_notify_stolen((uintptr_t)ml_static_ptovirt((vm_offset_t)physfree));
780 #endif
781
782 #if MONOTONIC
783 mt_early_init();
784 #endif /* MONOTONIC */
785
786 first_avail = (vm_offset_t)ID_MAP_VTOP(physfree);
787
788 cpu_data_alloc(TRUE);
789
790 cpu_desc_init(cpu_datap(0));
791 postcode(VSTART_CPU_DESC_INIT);
792 cpu_desc_load(cpu_datap(0));
793
794 postcode(VSTART_CPU_MODE_INIT);
795 cpu_syscall_init(cpu_datap(0)); /* cpu_syscall_init() will be
796 * invoked on the APs
797 * via i386_init_slave()
798 */
799 } else {
800 /* Slave CPUs should use the basic IDT until i386_init_slave() */
801 vstart_idt_init(FALSE);
802
803 /* Switch to kernel's page tables (from the Boot PTs) */
804 set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));
805
806 /* Find our logical cpu number */
807 cpu = lapic_to_cpu[lapic_safe_apicid()];
808 #if DEBUG
809 gsbase = rdmsr64(MSR_IA32_GS_BASE);
810 #endif
811 cpu_desc_load(cpu_datap(cpu));
812 #if DEBUG
813 DBG("CPU: %d, GSBASE initial value: 0x%llx\n", cpu, (unsigned long long)gsbase);
814 #endif
815
816 /*
817 * Before we can discover our local APIC ID, we need to potentially
818 * initialize X2APIC, if it's enabled and firmware started us with
819 * the APIC in legacy mode.
820 */
821 lapic_init_slave();
822 }
823
824 early_boot = 0;
825 postcode(VSTART_EXIT);
826 x86_init_wrapper(is_boot_cpu ? (uintptr_t) i386_init
827 : (uintptr_t) i386_init_slave,
828 cpu_datap(cpu)->cpu_int_stack_top);
829 }
830
831 void
pstate_trace(void)832 pstate_trace(void)
833 {
834 }
835
836 /*
837 * Cpu initialization. Running virtual, but without MACH VM
838 * set up.
839 */
840 void
i386_init(void)841 i386_init(void)
842 {
843 unsigned int maxmem;
844 uint64_t maxmemtouse;
845 unsigned int cpus = 0;
846 boolean_t fidn;
847 boolean_t IA32e = TRUE;
848
849 postcode(I386_INIT_ENTRY);
850
851 pal_i386_init();
852 tsc_init();
853 rtclock_early_init(); /* mach_absolute_time() now functional */
854
855 kernel_debug_string_early("i386_init");
856 pstate_trace();
857
858 #if CONFIG_MCA
859 /* Initialize machine-check handling */
860 mca_cpu_init();
861 #endif
862
863 master_cpu = 0;
864
865 kernel_debug_string_early("kernel_startup_bootstrap");
866 kernel_startup_bootstrap();
867
868 /*
869 * Initialize the timer callout world
870 */
871 timer_call_init();
872
873 cpu_init();
874
875 postcode(CPU_INIT_D);
876
877 /* setup debugging output if one has been chosen */
878 kernel_startup_initialize_upto(STARTUP_SUB_KPRINTF);
879 kprintf("kprintf initialized\n");
880
881 if (!PE_parse_boot_argn("diag", &dgWork.dgFlags, sizeof(dgWork.dgFlags))) {
882 dgWork.dgFlags = 0;
883 }
884
885 if (PE_parse_boot_argn("insn_capcnt", &insn_copyin_count, sizeof(insn_copyin_count))) {
886 /*
887 * Enforce max and min values (allowing 0 to disable copying completely)
888 * for the instruction copyin count
889 */
890 if (insn_copyin_count > x86_INSTRUCTION_STATE_MAX_INSN_BYTES ||
891 (insn_copyin_count != 0 && insn_copyin_count < 64)) {
892 insn_copyin_count = DEFAULT_INSN_COPYIN_COUNT;
893 }
894 } else {
895 insn_copyin_count = DEFAULT_INSN_COPYIN_COUNT;
896 }
897
898 #if DEVELOPMENT || DEBUG
899 if (!PE_parse_boot_argn("panic_clmismatch", &panic_on_cacheline_mismatch,
900 sizeof(panic_on_cacheline_mismatch))) {
901 panic_on_cacheline_mismatch = 0;
902 }
903
904 if (!PE_parse_boot_argn("panic_on_trap_procname", &panic_on_trap_procname[0],
905 sizeof(panic_on_trap_procname))) {
906 panic_on_trap_procname[0] = 0;
907 }
908
909 if (!PE_parse_boot_argn("panic_on_trap_mask", &panic_on_trap_mask,
910 sizeof(panic_on_trap_mask))) {
911 if (panic_on_trap_procname[0] != 0) {
912 panic_on_trap_mask = DEFAULT_PANIC_ON_TRAP_MASK;
913 } else {
914 panic_on_trap_mask = 0;
915 }
916 }
917 #endif
918 /* But allow that to be overridden via boot-arg: */
919 if (!PE_parse_boot_argn("lbr_support", &last_branch_enabled_modes,
920 sizeof(last_branch_enabled_modes))) {
921 /* Disable LBR support by default due to its high context switch overhead */
922 last_branch_enabled_modes = LBR_ENABLED_NONE;
923 }
924
925 serialmode = 0;
926 if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) {
927 /* We want a serial keyboard and/or console */
928 kprintf("Serial mode specified: %08X\n", serialmode);
929 int force_sync = serialmode & SERIALMODE_SYNCDRAIN;
930 disable_iolog_serial_output = (serialmode & SERIALMODE_NO_IOLOG) != 0;
931 if (force_sync || PE_parse_boot_argn("drain_uart_sync", &force_sync, sizeof(force_sync))) {
932 if (force_sync) {
933 serialmode |= SERIALMODE_SYNCDRAIN;
934 kprintf(
935 "WARNING: Forcing uart driver to output synchronously."
936 "printf()s/IOLogs will impact kernel performance.\n"
937 "You are advised to avoid using 'drain_uart_sync' boot-arg.\n");
938 }
939 }
940 }
941 if (serialmode & SERIALMODE_OUTPUT) {
942 serial_console_enabled = true;
943 (void)switch_to_serial_console();
944 disableConsoleOutput = FALSE; /* Allow printfs to happen */
945 }
946
947 /* setup console output */
948 kernel_debug_string_early("PE_init_printf");
949 PE_init_printf(FALSE);
950
951 kprintf("version_variant = %s\n", version_variant);
952 kprintf("version = %s\n", version);
953
954 if (!PE_parse_boot_argn("maxmem", &maxmem, sizeof(maxmem))) {
955 maxmemtouse = 0;
956 } else {
957 maxmemtouse = ((uint64_t)maxmem) * MB;
958 }
959
960 max_cpus_from_firmware = acpi_count_enabled_logical_processors();
961
962 if (PE_parse_boot_argn("cpus", &cpus, sizeof(cpus))) {
963 if ((0 < cpus) && (cpus < max_ncpus)) {
964 max_ncpus = cpus;
965 }
966 }
967
968 /*
969 * debug support for > 4G systems
970 */
971 PE_parse_boot_argn("himemory_mode", &vm_himemory_mode, sizeof(vm_himemory_mode));
972 if (!vm_himemory_mode) {
973 kprintf("himemory_mode disabled\n");
974 }
975
976 if (!PE_parse_boot_argn("immediate_NMI", &fidn, sizeof(fidn))) {
977 force_immediate_debugger_NMI = FALSE;
978 } else {
979 force_immediate_debugger_NMI = fidn;
980 }
981
982 #if DEBUG
983 nanoseconds_to_absolutetime(URGENCY_NOTIFICATION_ASSERT_NS, &urgency_notification_assert_abstime_threshold);
984 #endif
985 PE_parse_boot_argn("urgency_notification_abstime",
986 &urgency_notification_assert_abstime_threshold,
987 sizeof(urgency_notification_assert_abstime_threshold));
988
989 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD)) {
990 nx_enabled = 0;
991 }
992
993 /*
994 * VM initialization, after this we're using page tables...
995 * Thn maximum number of cpus must be set beforehand.
996 */
997 kernel_debug_string_early("i386_vm_init");
998 i386_vm_init(maxmemtouse, IA32e, kernelBootArgs);
999
1000 /* create the console for verbose or pretty mode */
1001 /* Note: doing this prior to tsc_init() allows for graceful panic! */
1002 PE_init_platform(TRUE, kernelBootArgs);
1003 PE_create_console();
1004
1005 kernel_debug_string_early("power_management_init");
1006 power_management_init();
1007 xcpm_bootstrap();
1008
1009 #if MONOTONIC
1010 mt_cpu_up(cpu_datap(0));
1011 #endif /* MONOTONIC */
1012
1013 processor_bootstrap();
1014 thread_t thread = thread_bootstrap();
1015 machine_set_current_thread(thread);
1016
1017 pstate_trace();
1018 kernel_debug_string_early("machine_startup");
1019 machine_startup();
1020 pstate_trace();
1021 }
1022
1023 static void __dead2
do_init_slave(boolean_t fast_restart)1024 do_init_slave(boolean_t fast_restart)
1025 {
1026 void *init_param = FULL_SLAVE_INIT;
1027
1028 postcode(I386_INIT_SLAVE);
1029
1030 if (!fast_restart) {
1031 /* Ensure that caching and write-through are enabled */
1032 set_cr0(get_cr0() & ~(CR0_NW | CR0_CD));
1033
1034 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
1035 get_cpu_number(), get_cpu_phys_number());
1036
1037 assert(!ml_get_interrupts_enabled());
1038
1039 cpu_syscall_init(current_cpu_datap());
1040 pmap_cpu_init();
1041
1042 #if CONFIG_MCA
1043 mca_cpu_init();
1044 #endif
1045
1046 LAPIC_INIT();
1047 /*
1048 * Note that the true argument here does not necessarily mean we're
1049 * here from a resume (this code path is also executed on boot).
1050 * The implementation of lapic_configure checks to see if the
1051 * state variable has been initialized, as it would be before
1052 * sleep. If it has not been, it's construed as an indicator of
1053 * first boot.
1054 */
1055 lapic_configure(true);
1056 LAPIC_DUMP();
1057 LAPIC_CPU_MAP_DUMP();
1058
1059 init_fpu();
1060
1061 #if CONFIG_MTRR
1062 mtrr_update_cpu();
1063 #endif
1064 /* update CPU microcode and apply CPU workarounds */
1065 ucode_update_wake_and_apply_cpu_was();
1066
1067 /* Enable LBRs on non-boot CPUs */
1068 i386_lbr_init(cpuid_info(), false);
1069 } else {
1070 init_param = FAST_SLAVE_INIT;
1071 }
1072
1073 #if CONFIG_VMX
1074 /* resume VT operation */
1075 vmx_resume(FALSE);
1076 #endif
1077
1078 #if CONFIG_MTRR
1079 if (!fast_restart) {
1080 pat_init();
1081 }
1082 #endif
1083
1084 cpu_thread_init(); /* not strictly necessary */
1085
1086 cpu_init(); /* Sets cpu_running which starter cpu waits for */
1087
1088
1089 #if MONOTONIC
1090 mt_cpu_up(current_cpu_datap());
1091 #endif /* MONOTONIC */
1092
1093 #if KPERF
1094 /*
1095 * We can only directly invoke kptimer_curcpu_up() when there is already an
1096 * active thread (that is, that this CPU has already been started at some point),
1097 * otherwise the ktrace calls within the kptimer operations will try to deref
1098 * the current thread and will instead cause a system reset.
1099 * If this is the first time the CPU is being started, we don't need to call
1100 * kptimer_curcpu_up().
1101 */
1102 if (current_processor()->active_thread != THREAD_NULL) {
1103 kptimer_curcpu_up();
1104 }
1105 #endif /* KPERF */
1106
1107 slave_main(init_param);
1108
1109 panic("do_init_slave() returned from slave_main()");
1110 }
1111
1112 /*
1113 * i386_init_slave() is called from pstart.
1114 * We're in the cpu's interrupt stack with interrupts disabled.
1115 * At this point we are in legacy mode. We need to switch on IA32e
1116 * if the mode is set to 64-bits.
1117 */
1118 void
i386_init_slave(void)1119 i386_init_slave(void)
1120 {
1121 do_init_slave(FALSE);
1122 }
1123
1124 /*
1125 * i386_init_slave_fast() is called from pmCPUHalt.
1126 * We're running on the idle thread and need to fix up
1127 * some accounting and get it so that the scheduler sees this
1128 * CPU again.
1129 */
1130 void
i386_init_slave_fast(void)1131 i386_init_slave_fast(void)
1132 {
1133 do_init_slave(TRUE);
1134 }
1135
1136
1137 /* TODO: Evaluate global PTEs for the double-mapped translations */
1138
1139 uint64_t dblmap_base, dblmap_max;
1140 kernel_segment_command_t *hdescseg;
1141
1142 pt_entry_t *dblmapL3;
1143 unsigned int dblallocs;
1144 uint64_t dblmap_dist;
1145 extern uint64_t idt64_hndl_table0[];
1146
1147
1148 void
doublemap_init(uint8_t randL3)1149 doublemap_init(uint8_t randL3)
1150 {
1151 dblmapL3 = ALLOCPAGES(1); // for 512 1GiB entries
1152 dblallocs++;
1153
1154 struct {
1155 pt_entry_t entries[PTE_PER_PAGE];
1156 } * dblmapL2 = ALLOCPAGES(1); // for 512 2MiB entries
1157 dblallocs++;
1158
1159 dblmapL3[randL3] = ((uintptr_t)ID_MAP_VTOP(&dblmapL2[0]))
1160 | INTEL_PTE_VALID
1161 | INTEL_PTE_WRITE;
1162
1163 hdescseg = getsegbynamefromheader(&_mh_execute_header, "__HIB");
1164
1165 vm_offset_t hdescb = hdescseg->vmaddr;
1166 unsigned long hdescsz = hdescseg->vmsize;
1167 unsigned long hdescszr = round_page_64(hdescsz);
1168 vm_offset_t hdescc = hdescb, hdesce = hdescb + hdescszr;
1169
1170 kernel_section_t *thdescsect = getsectbynamefromheader(&_mh_execute_header, "__HIB", "__text");
1171 vm_offset_t thdescb = thdescsect->addr;
1172 unsigned long thdescsz = thdescsect->size;
1173 unsigned long thdescszr = round_page_64(thdescsz);
1174 vm_offset_t thdesce = thdescb + thdescszr;
1175
1176 assert((hdescb & 0xFFF) == 0);
1177 /* Mirror HIB translations into the double-mapped pagetable subtree*/
1178 for (int i = 0; hdescc < hdesce; i++) {
1179 struct {
1180 pt_entry_t entries[PTE_PER_PAGE];
1181 } * dblmapL1 = ALLOCPAGES(1);
1182 dblallocs++;
1183 dblmapL2[0].entries[i] = ((uintptr_t)ID_MAP_VTOP(&dblmapL1[0])) | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_REF;
1184 int hdescn = (int) ((hdesce - hdescc) / PAGE_SIZE);
1185 for (int j = 0; j < MIN(PTE_PER_PAGE, hdescn); j++) {
1186 uint64_t template = INTEL_PTE_VALID;
1187 if ((hdescc >= thdescb) && (hdescc < thdesce)) {
1188 /* executable */
1189 } else {
1190 template |= INTEL_PTE_WRITE | INTEL_PTE_NX; /* Writeable, NX */
1191 }
1192 dblmapL1[0].entries[j] = ((uintptr_t)ID_MAP_VTOP(hdescc)) | template;
1193 hdescc += PAGE_SIZE;
1194 }
1195 }
1196
1197 IdlePML4[KERNEL_DBLMAP_PML4_INDEX] = ((uintptr_t)ID_MAP_VTOP(dblmapL3)) | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_REF;
1198
1199 dblmap_base = KVADDR(KERNEL_DBLMAP_PML4_INDEX, randL3, 0, 0);
1200 dblmap_max = dblmap_base + hdescszr;
1201 /* Calculate the double-map distance, which accounts for the current
1202 * KASLR slide
1203 */
1204
1205 dblmap_dist = dblmap_base - hdescb;
1206 idt64_hndl_table0[1] = DBLMAP(idt64_hndl_table0[1]); /* 64-bit exit trampoline */
1207 idt64_hndl_table0[3] = DBLMAP(idt64_hndl_table0[3]); /* 32-bit exit trampoline */
1208 idt64_hndl_table0[6] = (uint64_t)(uintptr_t)&kernel_stack_mask;
1209
1210 extern cpu_data_t cpshadows[], scdatas[];
1211 uintptr_t cd1 = (uintptr_t) &cpshadows[0];
1212 uintptr_t cd2 = (uintptr_t) &scdatas[0];
1213 /* Record the displacement from the kernel's per-CPU data pointer, eventually
1214 * programmed into GSBASE, to the "shadows" in the doublemapped
1215 * region. These are not aliases, but separate physical allocations
1216 * containing data required in the doublemapped trampolines.
1217 */
1218 idt64_hndl_table0[2] = dblmap_dist + cd1 - cd2;
1219
1220 DBG("Double map base: 0x%qx\n", dblmap_base);
1221 DBG("double map idlepml4[%d]: 0x%llx\n", KERNEL_DBLMAP_PML4_INDEX, IdlePML4[KERNEL_DBLMAP_PML4_INDEX]);
1222 assert(LDTSZ > LDTSZ_MIN);
1223 }
1224
1225 vm_offset_t dyn_dblmap(vm_offset_t, vm_offset_t);
1226
1227 #include <i386/pmap_internal.h>
1228
1229 /* Use of this routine is expected to be synchronized by callers
1230 * Creates non-executable aliases.
1231 */
1232 vm_offset_t
dyn_dblmap(vm_offset_t cva,vm_offset_t sz)1233 dyn_dblmap(vm_offset_t cva, vm_offset_t sz)
1234 {
1235 vm_offset_t ava = dblmap_max;
1236
1237 assert((sz & PAGE_MASK) == 0);
1238 assert(cva != 0);
1239
1240 pmap_alias(ava, cva, cva + sz, VM_PROT_READ | VM_PROT_WRITE, PMAP_EXPAND_OPTIONS_ALIASMAP);
1241 dblmap_max += sz;
1242 return ava - cva;
1243 }
1244 /* Adjust offsets interior to the bootstrap interrupt descriptor table to redirect
1245 * control to the double-mapped interrupt vectors. The IDTR proper will be
1246 * programmed via cpu_desc_load()
1247 */
1248 void
idt64_remap(void)1249 idt64_remap(void)
1250 {
1251 for (int i = 0; i < IDTSZ; i++) {
1252 master_idt64[i].offset64 = DBLMAP(master_idt64[i].offset64);
1253 }
1254 }
1255