xref: /xnu-8792.61.2/osfmk/i386/mp_desc.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 /*
58  */
59 
60 #include <kern/cpu_number.h>
61 #include <kern/cpu_data.h>
62 #include <kern/percpu.h>
63 #include <mach/mach_types.h>
64 #include <mach/machine.h>
65 #include <mach/vm_map.h>
66 #include <mach/machine/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_map.h>
69 
70 #include <i386/bit_routines.h>
71 #include <i386/mp_desc.h>
72 #include <i386/misc_protos.h>
73 #include <i386/mp.h>
74 #include <i386/pmap.h>
75 #include <i386/postcode.h>
76 #include <i386/pmap_internal.h>
77 #if CONFIG_MCA
78 #include <i386/machine_check.h>
79 #endif
80 
81 #include <kern/misc_protos.h>
82 
83 #if MONOTONIC
84 #include <kern/monotonic.h>
85 #endif /* MONOTONIC */
86 #include <san/kasan.h>
87 
88 #define K_INTR_GATE (ACC_P|ACC_PL_K|ACC_INTR_GATE)
89 #define U_INTR_GATE (ACC_P|ACC_PL_U|ACC_INTR_GATE)
90 
91 // Declare macros that will declare the externs
92 #define TRAP(n, name)           extern void *name ;
93 #define TRAP_ERR(n, name)       extern void *name ;
94 #define TRAP_SPC(n, name)       extern void *name ;
95 #define TRAP_IST1(n, name)      extern void *name ;
96 #define TRAP_IST2(n, name)      extern void *name ;
97 #define INTERRUPT(n)            extern void *_intr_ ## n ;
98 #define USER_TRAP(n, name)      extern void *name ;
99 #define USER_TRAP_SPC(n, name)  extern void *name ;
100 
101 // Include the table to declare the externs
102 #include "../x86_64/idt_table.h"
103 
104 // Undef the macros, then redefine them so we can declare the table
105 #undef TRAP
106 #undef TRAP_ERR
107 #undef TRAP_SPC
108 #undef TRAP_IST1
109 #undef TRAP_IST2
110 #undef INTERRUPT
111 #undef USER_TRAP
112 #undef USER_TRAP_SPC
113 
114 #define TRAP(n, name)                   \
115 	[n] = {                         \
116 	        (uintptr_t)&name,       \
117 	        KERNEL64_CS,            \
118 	        0,                      \
119 	        K_INTR_GATE,            \
120 	        0                       \
121 	},
122 
123 #define TRAP_ERR TRAP
124 #define TRAP_SPC TRAP
125 
126 #define TRAP_IST1(n, name) \
127 	[n] = {                         \
128 	        (uintptr_t)&name,       \
129 	        KERNEL64_CS,            \
130 	        1,                      \
131 	        K_INTR_GATE,            \
132 	        0                       \
133 	},
134 
135 #define TRAP_IST2(n, name) \
136 	[n] = {                         \
137 	        (uintptr_t)&name,       \
138 	        KERNEL64_CS,            \
139 	        2,                      \
140 	        K_INTR_GATE,            \
141 	        0                       \
142 	},
143 
144 #define INTERRUPT(n) \
145 	[n] = {                         \
146 	        (uintptr_t)&_intr_ ## n,\
147 	        KERNEL64_CS,            \
148 	        0,                      \
149 	        K_INTR_GATE,            \
150 	        0                       \
151 	},
152 
153 #define USER_TRAP(n, name) \
154 	[n] = {                         \
155 	        (uintptr_t)&name,       \
156 	        KERNEL64_CS,            \
157 	        0,                      \
158 	        U_INTR_GATE,            \
159 	        0                       \
160 	},
161 
162 #define USER_TRAP_SPC USER_TRAP
163 
164 // Declare the table using the macros we just set up
165 struct fake_descriptor64 master_idt64[IDTSZ]
166 __attribute__ ((section("__HIB,__desc")))
167 __attribute__ ((aligned(PAGE_SIZE))) = {
168 #include "../x86_64/idt_table.h"
169 };
170 
171 /*
172  * First cpu`s interrupt stack.
173  */
174 extern uint32_t         low_intstack[];         /* bottom */
175 extern uint32_t         low_eintstack[];        /* top */
176 
177 /*
178  * Per-cpu data area pointers.
179  */
180 cpu_data_t cpshadows[MAX_CPUS] __attribute__((aligned(64))) __attribute__((section("__HIB, __desc")));
181 cpu_data_t scdatas[MAX_CPUS] __attribute__((aligned(64))) = {
182 	[0].cpu_this = &scdatas[0],
183 	[0].cpu_nanotime = &pal_rtc_nanotime_info,
184 	[0].cpu_int_stack_top = (vm_offset_t) low_eintstack,
185 	[0].cd_shadow = &cpshadows[0]
186 };
187 cpu_data_t *cpu_data_master = &scdatas[0];
188 
189 cpu_data_t      *cpu_data_ptr[MAX_CPUS] = {[0] = &scdatas[0] };
190 
191 SECURITY_READ_ONLY_LATE(struct percpu_base) percpu_base;
192 
193 decl_simple_lock_data(, ncpus_lock);     /* protects real_ncpus */
194 unsigned int    real_ncpus = 1;
195 unsigned int    max_ncpus = MAX_CPUS;
196 unsigned int    max_cpus_from_firmware = 0;
197 
198 extern void hi64_sysenter(void);
199 extern void hi64_syscall(void);
200 
201 typedef struct {
202 	struct real_descriptor pcldts[LDTSZ];
203 } cldt_t;
204 
205 cpu_desc_table64_t scdtables[MAX_CPUS] __attribute__((aligned(64))) __attribute__((section("__HIB, __desc")));
206 cpu_fault_stack_t scfstks[MAX_CPUS] __attribute__((aligned(64))) __attribute__((section("__HIB, __desc")));
207 
208 cldt_t *dyn_ldts;
209 
210 /*
211  * Multiprocessor i386/i486 systems use a separate copy of the
212  * GDT, IDT, LDT, and kernel TSS per processor.  The first three
213  * are separate to avoid lock contention: the i386 uses locked
214  * memory cycles to access the descriptor tables.  The TSS is
215  * separate since each processor needs its own kernel stack,
216  * and since using a TSS marks it busy.
217  */
218 
219 /*
220  * Allocate and initialize the per-processor descriptor tables.
221  */
222 
223 /*
224  * This is the expanded, 64-bit variant of the kernel LDT descriptor.
225  * When switching to 64-bit mode this replaces KERNEL_LDT entry
226  * and the following empty slot. This enables the LDT to be referenced
227  * in the uber-space remapping window on the kernel.
228  */
229 struct fake_descriptor64 kernel_ldt_desc64 = {
230 	.offset64 = 0,
231 	.lim_or_seg = LDTSZ_MIN * sizeof(struct fake_descriptor) - 1,
232 	.size_or_IST = 0,
233 	.access = ACC_P | ACC_PL_K | ACC_LDT,
234 	.reserved = 0
235 };
236 
237 /*
238  * This is the expanded, 64-bit variant of the kernel TSS descriptor.
239  * It is follows pattern of the KERNEL_LDT.
240  */
241 struct fake_descriptor64 kernel_tss_desc64 = {
242 	.offset64 = 0,
243 	.lim_or_seg = sizeof(struct x86_64_tss) - 1,
244 	.size_or_IST = 0,
245 	.access = ACC_P | ACC_PL_K | ACC_TSS,
246 	.reserved = 0
247 };
248 
249 /*
250  * Convert a descriptor from fake to real format.
251  *
252  * Fake descriptor format:
253  *	bytes 0..3		base 31..0
254  *	bytes 4..5		limit 15..0
255  *	byte  6			access byte 2 | limit 19..16
256  *	byte  7			access byte 1
257  *
258  * Real descriptor format:
259  *	bytes 0..1		limit 15..0
260  *	bytes 2..3		base 15..0
261  *	byte  4			base 23..16
262  *	byte  5			access byte 1
263  *	byte  6			access byte 2 | limit 19..16
264  *	byte  7			base 31..24
265  *
266  * Fake gate format:
267  *	bytes 0..3		offset
268  *	bytes 4..5		selector
269  *	byte  6			word count << 4 (to match fake descriptor)
270  *	byte  7			access byte 1
271  *
272  * Real gate format:
273  *	bytes 0..1		offset 15..0
274  *	bytes 2..3		selector
275  *	byte  4			word count
276  *	byte  5			access byte 1
277  *	bytes 6..7		offset 31..16
278  */
279 void
fix_desc(void * d,int num_desc)280 fix_desc(void *d, int num_desc)
281 {
282 	uint8_t *desc = (uint8_t*) d;
283 
284 	do {
285 		if ((desc[7] & 0x14) == 0x04) { /* gate */
286 			uint32_t offset;
287 			uint16_t selector;
288 			uint8_t wordcount;
289 			uint8_t acc;
290 
291 			offset = *((uint32_t*)(desc));
292 			selector = *((uint32_t*)(desc + 4));
293 			wordcount = desc[6] >> 4;
294 			acc = desc[7];
295 
296 			*((uint16_t*)desc) = offset & 0xFFFF;
297 			*((uint16_t*)(desc + 2)) = selector;
298 			desc[4] = wordcount;
299 			desc[5] = acc;
300 			*((uint16_t*)(desc + 6)) = offset >> 16;
301 		} else { /* descriptor */
302 			uint32_t base;
303 			uint16_t limit;
304 			uint8_t acc1, acc2;
305 
306 			base = *((uint32_t*)(desc));
307 			limit = *((uint16_t*)(desc + 4));
308 			acc2 = desc[6];
309 			acc1 = desc[7];
310 
311 			*((uint16_t*)(desc)) = limit;
312 			*((uint16_t*)(desc + 2)) = base & 0xFFFF;
313 			desc[4] = (base >> 16) & 0xFF;
314 			desc[5] = acc1;
315 			desc[6] = acc2;
316 			desc[7] = base >> 24;
317 		}
318 		desc += 8;
319 	} while (--num_desc);
320 }
321 
322 void
fix_desc64(void * descp,int count)323 fix_desc64(void *descp, int count)
324 {
325 	struct fake_descriptor64        *fakep;
326 	union {
327 		struct real_gate64              gate;
328 		struct real_descriptor64        desc;
329 	}                               real;
330 	int                             i;
331 
332 	fakep = (struct fake_descriptor64 *) descp;
333 
334 	for (i = 0; i < count; i++, fakep++) {
335 		/*
336 		 * Construct the real decriptor locally.
337 		 */
338 
339 		bzero((void *) &real, sizeof(real));
340 
341 		switch (fakep->access & ACC_TYPE) {
342 		case 0:
343 			break;
344 		case ACC_CALL_GATE:
345 		case ACC_INTR_GATE:
346 		case ACC_TRAP_GATE:
347 			real.gate.offset_low16 = (uint16_t)(fakep->offset64 & 0xFFFF);
348 			real.gate.selector16 = fakep->lim_or_seg & 0xFFFF;
349 			real.gate.IST = fakep->size_or_IST & 0x7;
350 			real.gate.access8 = fakep->access;
351 			real.gate.offset_high16 = (uint16_t)((fakep->offset64 >> 16) & 0xFFFF);
352 			real.gate.offset_top32 = (uint32_t)(fakep->offset64 >> 32);
353 			break;
354 		default:        /* Otherwise */
355 			real.desc.limit_low16 = fakep->lim_or_seg & 0xFFFF;
356 			real.desc.base_low16 = (uint16_t)(fakep->offset64 & 0xFFFF);
357 			real.desc.base_med8 = (uint8_t)((fakep->offset64 >> 16) & 0xFF);
358 			real.desc.access8 = fakep->access;
359 			real.desc.limit_high4 = (fakep->lim_or_seg >> 16) & 0xFF;
360 			real.desc.granularity4 = fakep->size_or_IST;
361 			real.desc.base_high8 = (uint8_t)((fakep->offset64 >> 24) & 0xFF);
362 			real.desc.base_top32 = (uint32_t)(fakep->offset64 >> 32);
363 		}
364 
365 		/*
366 		 * Now copy back over the fake structure.
367 		 */
368 		bcopy((void *) &real, (void *) fakep, sizeof(real));
369 	}
370 }
371 
372 extern unsigned mldtsz;
373 void
cpu_desc_init(cpu_data_t * cdp)374 cpu_desc_init(cpu_data_t *cdp)
375 {
376 	cpu_desc_index_t        *cdi = &cdp->cpu_desc_index;
377 
378 	if (cdp == cpu_data_master) {
379 		/*
380 		 * Populate the double-mapped 'u' and base 'b' fields in the
381 		 * KTSS with I/G/LDT and sysenter stack data.
382 		 */
383 		cdi->cdi_ktssu = (void *)DBLMAP(&master_ktss64);
384 		cdi->cdi_ktssb = (void *)&master_ktss64;
385 		cdi->cdi_sstku = (vm_offset_t) DBLMAP(&master_sstk.top);
386 		cdi->cdi_sstkb = (vm_offset_t) &master_sstk.top;
387 
388 		cdi->cdi_gdtu.ptr = (void *)DBLMAP((uintptr_t) &master_gdt);
389 		cdi->cdi_gdtb.ptr = (void *)&master_gdt;
390 		cdi->cdi_idtu.ptr  = (void *)DBLMAP((uintptr_t) &master_idt64);
391 		cdi->cdi_idtb.ptr  = (void *)((uintptr_t) &master_idt64);
392 		cdi->cdi_ldtu  = (struct real_descriptor *)DBLMAP((uintptr_t)&master_ldt[0]);
393 		cdi->cdi_ldtb  = &master_ldt[0];
394 
395 		/* Replace the expanded LDTs and TSS slots in the GDT */
396 		kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldtu;
397 		*(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_LDT)] =
398 		    kernel_ldt_desc64;
399 		*(struct fake_descriptor64 *) &master_gdt[sel_idx(USER_LDT)] =
400 		    kernel_ldt_desc64;
401 		kernel_tss_desc64.offset64 = (uintptr_t) DBLMAP(&master_ktss64);
402 		*(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_TSS)] =
403 		    kernel_tss_desc64;
404 
405 		/* Fix up the expanded descriptors for 64-bit. */
406 		fix_desc64((void *) &master_idt64, IDTSZ);
407 		fix_desc64((void *) &master_gdt[sel_idx(KERNEL_LDT)], 1);
408 		fix_desc64((void *) &master_gdt[sel_idx(USER_LDT)], 1);
409 		fix_desc64((void *) &master_gdt[sel_idx(KERNEL_TSS)], 1);
410 
411 		/*
412 		 * Set the NMI/fault stacks as IST2/IST1 in the 64-bit TSS
413 		 */
414 		master_ktss64.ist2 = (uintptr_t) low_eintstack;
415 		master_ktss64.ist1 = (uintptr_t) low_eintstack - sizeof(x86_64_intr_stack_frame_t);
416 	} else if (cdi->cdi_ktssu == NULL) {    /* Skipping re-init on wake */
417 		cpu_desc_table64_t      *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep;
418 
419 		cdi->cdi_idtu.ptr  = (void *)DBLMAP((uintptr_t) &master_idt64);
420 
421 		cdi->cdi_ktssu = (void *)DBLMAP(&cdt->ktss);
422 		cdi->cdi_ktssb = (void *)(&cdt->ktss);
423 		cdi->cdi_sstku = (vm_offset_t)DBLMAP(&cdt->sstk.top);
424 		cdi->cdi_sstkb = (vm_offset_t)(&cdt->sstk.top);
425 		cdi->cdi_ldtu  = (void *)LDTALIAS(cdp->cpu_ldtp);
426 		cdi->cdi_ldtb  = (void *)(cdp->cpu_ldtp);
427 
428 		/*
429 		 * Copy the tables
430 		 */
431 		bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt));
432 		bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, mldtsz);
433 		bcopy((char *)&master_ktss64, (char *)&cdt->ktss, sizeof(struct x86_64_tss));
434 		cdi->cdi_gdtu.ptr  = (void *)DBLMAP(cdt->gdt);
435 		cdi->cdi_gdtb.ptr  = (void *)(cdt->gdt);
436 		/*
437 		 * Fix up the entries in the GDT to point to
438 		 * this LDT and this TSS.
439 		 * Note reuse of global 'kernel_ldt_desc64, which is not
440 		 * concurrency-safe. Higher level synchronization is expected
441 		 */
442 		kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldtu;
443 		*(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_LDT)] =
444 		    kernel_ldt_desc64;
445 		fix_desc64(&cdt->gdt[sel_idx(KERNEL_LDT)], 1);
446 
447 		kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldtu;
448 		*(struct fake_descriptor64 *) &cdt->gdt[sel_idx(USER_LDT)] =
449 		    kernel_ldt_desc64;
450 		fix_desc64(&cdt->gdt[sel_idx(USER_LDT)], 1);
451 
452 		kernel_tss_desc64.offset64 = (uintptr_t) cdi->cdi_ktssu;
453 		*(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_TSS)] =
454 		    kernel_tss_desc64;
455 		fix_desc64(&cdt->gdt[sel_idx(KERNEL_TSS)], 1);
456 
457 		/* Set (zeroed) fault stack as IST1, NMI intr stack IST2 */
458 		uint8_t *cfstk = &scfstks[cdp->cpu_number].fstk[0];
459 		cdt->fstkp = cfstk;
460 		bzero((void *) cfstk, FSTK_SZ);
461 		cdt->ktss.ist2 = DBLMAP((uint64_t)cdt->fstkp + FSTK_SZ);
462 		cdt->ktss.ist1 = cdt->ktss.ist2 - sizeof(x86_64_intr_stack_frame_t);
463 	}
464 
465 	/* Require that the top of the sysenter stack is 16-byte aligned */
466 	if ((cdi->cdi_sstku % 16) != 0) {
467 		panic("cpu_desc_init() sysenter stack not 16-byte aligned");
468 	}
469 }
470 void
cpu_desc_load(cpu_data_t * cdp)471 cpu_desc_load(cpu_data_t *cdp)
472 {
473 	cpu_desc_index_t        *cdi = &cdp->cpu_desc_index;
474 
475 	postcode(CPU_DESC_LOAD_ENTRY);
476 
477 	/* Stuff the kernel per-cpu data area address into the MSRs */
478 	postcode(CPU_DESC_LOAD_GS_BASE);
479 	wrmsr64(MSR_IA32_GS_BASE, (uintptr_t) cdp);
480 	postcode(CPU_DESC_LOAD_KERNEL_GS_BASE);
481 	wrmsr64(MSR_IA32_KERNEL_GS_BASE, (uintptr_t) cdp);
482 
483 	/*
484 	 * Ensure the TSS segment's busy bit is clear. This is required
485 	 * for the case of reloading descriptors at wake to avoid
486 	 * their complete re-initialization.
487 	 */
488 	gdt_desc_p(KERNEL_TSS)->access &= ~ACC_TSS_BUSY;
489 
490 	/* Load the GDT, LDT, IDT and TSS */
491 	cdi->cdi_gdtb.size = sizeof(struct real_descriptor) * GDTSZ - 1;
492 	cdi->cdi_gdtu.size = cdi->cdi_gdtb.size;
493 	cdi->cdi_idtb.size = 0x1000 + cdp->cpu_number;
494 	cdi->cdi_idtu.size = cdi->cdi_idtb.size;
495 
496 	postcode(CPU_DESC_LOAD_GDT);
497 	lgdt((uintptr_t *) &cdi->cdi_gdtu);
498 	postcode(CPU_DESC_LOAD_IDT);
499 	lidt((uintptr_t *) &cdi->cdi_idtu);
500 	postcode(CPU_DESC_LOAD_LDT);
501 	lldt(KERNEL_LDT);
502 	postcode(CPU_DESC_LOAD_TSS);
503 	set_tr(KERNEL_TSS);
504 
505 	postcode(CPU_DESC_LOAD_EXIT);
506 }
507 
508 /*
509  * Set MSRs for sysenter/sysexit and syscall/sysret for 64-bit.
510  */
511 void
cpu_syscall_init(cpu_data_t * cdp)512 cpu_syscall_init(cpu_data_t *cdp)
513 {
514 #pragma unused(cdp)
515 
516 	wrmsr64(MSR_IA32_SYSENTER_CS, SYSENTER_CS);
517 	wrmsr64(MSR_IA32_SYSENTER_EIP, DBLMAP((uintptr_t) hi64_sysenter));
518 	wrmsr64(MSR_IA32_SYSENTER_ESP, current_cpu_datap()->cpu_desc_index.cdi_sstku);
519 	/* Enable syscall/sysret */
520 	wrmsr64(MSR_IA32_EFER, rdmsr64(MSR_IA32_EFER) | MSR_IA32_EFER_SCE);
521 
522 	/*
523 	 * MSRs for 64-bit syscall/sysret
524 	 * Note USER_CS because sysret uses this + 16 when returning to
525 	 * 64-bit code.
526 	 */
527 	wrmsr64(MSR_IA32_LSTAR, DBLMAP((uintptr_t) hi64_syscall));
528 	wrmsr64(MSR_IA32_STAR, (((uint64_t)USER_CS) << 48) | (((uint64_t)KERNEL64_CS) << 32));
529 	/*
530 	 * Emulate eflags cleared by sysenter but note that
531 	 * we also clear the trace trap to avoid the complications
532 	 * of single-stepping into a syscall. The nested task bit
533 	 * is also cleared to avoid a spurious "task switch"
534 	 * should we choose to return via an IRET.
535 	 */
536 	wrmsr64(MSR_IA32_FMASK, EFL_DF | EFL_IF | EFL_TF | EFL_NT);
537 }
538 extern vm_offset_t dyn_dblmap(vm_offset_t, vm_offset_t);
539 uint64_t ldt_alias_offset;
540 
541 __startup_func
542 static void
cpu_data_startup_init(void)543 cpu_data_startup_init(void)
544 {
545 	int flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_PERMANENT |
546 	    KMA_ZERO | KMA_KOBJECT | KMA_NOFAIL;
547 	uint32_t cpus = max_cpus_from_firmware;
548 	vm_size_t size = percpu_section_size() * (cpus - 1);
549 
550 	percpu_base.size = percpu_section_size();
551 	if (cpus == 0) {
552 		panic("percpu: max_cpus_from_firmware not yet initialized");
553 	}
554 	if (cpus == 1) {
555 		percpu_base.start = VM_MAX_KERNEL_ADDRESS;
556 		return;
557 	}
558 
559 	kmem_alloc(kernel_map, &percpu_base.start,
560 	    round_page(size) + ptoa(2), flags, VM_KERN_MEMORY_CPU);
561 
562 	percpu_base.start += PAGE_SIZE - percpu_section_start();
563 	percpu_base.end    = percpu_base.start + size - 1;
564 }
565 STARTUP(PERCPU, STARTUP_RANK_FIRST, cpu_data_startup_init);
566 
567 cpu_data_t *
cpu_data_alloc(boolean_t is_boot_cpu)568 cpu_data_alloc(boolean_t is_boot_cpu)
569 {
570 	cpu_data_t      *cdp;
571 
572 	if (is_boot_cpu) {
573 		assert(real_ncpus == 1);
574 		cdp = cpu_datap(0);
575 		if (cdp->cpu_processor == NULL) {
576 			simple_lock_init(&ncpus_lock, 0);
577 			cdp->cpu_processor = PERCPU_GET_MASTER(processor);
578 		}
579 		return cdp;
580 	}
581 
582 	boolean_t do_ldt_alloc = FALSE;
583 	simple_lock(&ncpus_lock, LCK_GRP_NULL);
584 	int cnum = real_ncpus;
585 	real_ncpus++;
586 	if (dyn_ldts == NULL) {
587 		do_ldt_alloc = TRUE;
588 	}
589 	simple_unlock(&ncpus_lock);
590 
591 	/*
592 	 * Allocate per-cpu data:
593 	 */
594 
595 	cdp = &scdatas[cnum];
596 	bzero((void*) cdp, sizeof(cpu_data_t));
597 	cdp->cpu_this = cdp;
598 	cdp->cpu_number = cnum;
599 	cdp->cd_shadow = &cpshadows[cnum];
600 	cdp->cpu_pcpu_base = percpu_base.start + (cnum - 1) * percpu_section_size();
601 	cdp->cpu_processor = PERCPU_GET_WITH_BASE(cdp->cpu_pcpu_base, processor);
602 
603 	/*
604 	 * Allocate interrupt stack:
605 	 */
606 	kmem_alloc(kernel_map, (vm_offset_t *)&cdp->cpu_int_stack_top,
607 	    INTSTACK_SIZE + ptoa(2), KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO |
608 	    KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KOBJECT, VM_KERN_MEMORY_CPU);
609 	cdp->cpu_int_stack_top += INTSTACK_SIZE + PAGE_SIZE;
610 
611 	/*
612 	 * Allocate descriptor table:
613 	 */
614 
615 	cdp->cpu_desc_tablep = (struct cpu_desc_table *) &scdtables[cnum];
616 	/*
617 	 * Allocate LDT
618 	 */
619 	if (do_ldt_alloc) {
620 		boolean_t do_ldt_free = FALSE;
621 		vm_offset_t sldtoffset = 0;
622 		/*
623 		 * Allocate LDT
624 		 */
625 		vm_offset_t ldtalloc = 0, ldtallocsz = round_page_64(MAX_CPUS * sizeof(struct real_descriptor) * LDTSZ);
626 
627 		kmem_alloc(kernel_map, (vm_offset_t *)&ldtalloc,
628 		    ldtallocsz, KMA_NOFAIL | KMA_KOBJECT, VM_KERN_MEMORY_CPU);
629 
630 		simple_lock(&ncpus_lock, LCK_GRP_NULL);
631 		if (dyn_ldts == NULL) {
632 			dyn_ldts = (cldt_t *)ldtalloc;
633 		} else {
634 			do_ldt_free = TRUE;
635 		}
636 		simple_unlock(&ncpus_lock);
637 
638 		if (do_ldt_free) {
639 			kmem_free(kernel_map, ldtalloc, ldtallocsz);
640 		} else {
641 			/* CPU registration and startup are expected to execute
642 			 * serially, as invoked by the platform driver.
643 			 * Create trampoline alias of LDT region.
644 			 */
645 			sldtoffset = dyn_dblmap(ldtalloc, ldtallocsz);
646 			ldt_alias_offset = sldtoffset;
647 		}
648 	}
649 	cdp->cpu_ldtp = &dyn_ldts[cnum].pcldts[0];
650 
651 #if CONFIG_MCA
652 	/* Machine-check shadow register allocation. */
653 	mca_cpu_alloc(cdp);
654 #endif
655 
656 	/*
657 	 * Before this cpu has been assigned a real thread context,
658 	 * we give it a fake, unique, non-zero thread id which the locking
659 	 * primitives use as their lock value.
660 	 * Note that this does not apply to the boot processor, cpu 0, which
661 	 * transitions to a thread context well before other processors are
662 	 * started.
663 	 */
664 	cdp->cpu_active_thread = (thread_t) (uintptr_t) cdp->cpu_number;
665 	cdp->cpu_NMI_acknowledged = TRUE;
666 	cdp->cpu_nanotime = &pal_rtc_nanotime_info;
667 
668 	kprintf("cpu_data_alloc(%d) %p desc_table: %p "
669 	    "ldt: %p "
670 	    "int_stack: 0x%lx-0x%lx\n",
671 	    cdp->cpu_number, cdp, cdp->cpu_desc_tablep, cdp->cpu_ldtp,
672 	    (long)(cdp->cpu_int_stack_top - INTSTACK_SIZE), (long)(cdp->cpu_int_stack_top));
673 	cpu_data_ptr[cnum] = cdp;
674 
675 	return cdp;
676 }
677 
678 boolean_t
valid_user_data_selector(uint16_t selector)679 valid_user_data_selector(uint16_t selector)
680 {
681 	sel_t       sel = selector_to_sel(selector);
682 
683 	if (selector == 0) {
684 		return TRUE;
685 	}
686 
687 	if (sel.ti == SEL_LDT) {
688 		return TRUE;
689 	} else if (sel.index < GDTSZ) {
690 		if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) {
691 			return TRUE;
692 		}
693 	}
694 	return FALSE;
695 }
696 
697 boolean_t
valid_user_code_selector(uint16_t selector)698 valid_user_code_selector(uint16_t selector)
699 {
700 	sel_t       sel = selector_to_sel(selector);
701 
702 	if (selector == 0) {
703 		return FALSE;
704 	}
705 
706 	if (sel.ti == SEL_LDT) {
707 		if (sel.rpl == USER_PRIV) {
708 			return TRUE;
709 		}
710 	} else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) {
711 		if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) {
712 			return TRUE;
713 		}
714 		/* Explicitly validate the system code selectors
715 		 * even if not instantaneously privileged,
716 		 * since they are dynamically re-privileged
717 		 * at context switch
718 		 */
719 		if ((selector == USER_CS) || (selector == USER64_CS)) {
720 			return TRUE;
721 		}
722 	}
723 
724 	return FALSE;
725 }
726 
727 boolean_t
valid_user_stack_selector(uint16_t selector)728 valid_user_stack_selector(uint16_t selector)
729 {
730 	sel_t       sel = selector_to_sel(selector);
731 
732 	if (selector == 0) {
733 		return FALSE;
734 	}
735 
736 	if (sel.ti == SEL_LDT) {
737 		if (sel.rpl == USER_PRIV) {
738 			return TRUE;
739 		}
740 	} else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) {
741 		if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) {
742 			return TRUE;
743 		}
744 	}
745 
746 	return FALSE;
747 }
748 
749 boolean_t
valid_user_segment_selectors(uint16_t cs,uint16_t ss,uint16_t ds,uint16_t es,uint16_t fs,uint16_t gs)750 valid_user_segment_selectors(uint16_t cs,
751     uint16_t ss,
752     uint16_t ds,
753     uint16_t es,
754     uint16_t fs,
755     uint16_t gs)
756 {
757 	return valid_user_code_selector(cs) &&
758 	       valid_user_stack_selector(ss) &&
759 	       valid_user_data_selector(ds) &&
760 	       valid_user_data_selector(es) &&
761 	       valid_user_data_selector(fs) &&
762 	       valid_user_data_selector(gs);
763 }
764 
765 /*
766  * Allocate a new interrupt stack for the boot processor from the
767  * heap rather than continue to use the statically allocated space.
768  * Also switch to a dynamically allocated cpu data area.
769  */
770 void
cpu_data_realloc(void)771 cpu_data_realloc(void)
772 {
773 	vm_offset_t     istk;
774 	cpu_data_t      *cdp;
775 	boolean_t       istate;
776 
777 	kmem_alloc(kernel_map, &istk,
778 	    INTSTACK_SIZE + ptoa(2), KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO |
779 	    KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KOBJECT, VM_KERN_MEMORY_CPU);
780 
781 	istk += INTSTACK_SIZE + PAGE_SIZE;
782 
783 	cdp = &scdatas[0];
784 
785 	/* Copy old contents into new area and make fix-ups */
786 	assert(cpu_number() == 0);
787 	bcopy((void *) cpu_data_ptr[0], (void*) cdp, sizeof(cpu_data_t));
788 	cdp->cpu_this = cdp;
789 	cdp->cpu_int_stack_top = istk;
790 	timer_call_queue_init(&cdp->rtclock_timer.queue);
791 	cdp->cpu_desc_tablep = (struct cpu_desc_table *) &scdtables[0];
792 	cpu_desc_table64_t      *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep;
793 
794 	uint8_t *cfstk = &scfstks[cdp->cpu_number].fstk[0];
795 	cdt->fstkp = cfstk;
796 	cfstk += FSTK_SZ;
797 
798 	/*
799 	 * With interrupts disabled commmit the new areas.
800 	 */
801 	istate = ml_set_interrupts_enabled(FALSE);
802 	cpu_data_ptr[0] = cdp;
803 	master_ktss64.ist2 = DBLMAP((uintptr_t) cfstk);
804 	master_ktss64.ist1 = DBLMAP((uintptr_t) cfstk - sizeof(x86_64_intr_stack_frame_t));
805 	wrmsr64(MSR_IA32_GS_BASE, (uintptr_t) cdp);
806 	wrmsr64(MSR_IA32_KERNEL_GS_BASE, (uintptr_t) cdp);
807 	(void) ml_set_interrupts_enabled(istate);
808 
809 	kprintf("Reallocated master cpu data: %p,"
810 	    " interrupt stack: %p, fault stack: %p\n",
811 	    (void *) cdp, (void *) istk, (void *) cfstk);
812 }
813