xref: /xnu-8020.101.4/osfmk/i386/pal_routines.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2009-2010 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * file: pal_routines.c
31  *       Platform Abstraction Layer routines for bare-metal i386 and x86_64
32  */
33 
34 
35 #include <kern/kern_types.h>
36 #include <mach/mach_types.h>
37 #include <kern/thread.h>
38 #include <kern/simple_lock.h>
39 
40 #include <sys/kdebug.h>
41 #include <machine/pal_routines.h>
42 #include <i386/serial_io.h>
43 #include <i386/lapic.h>
44 #include <i386/proc_reg.h>
45 #include <i386/misc_protos.h>
46 #include <i386/machine_routines.h>
47 #include <i386/pmap.h>
48 
49 //#define PAL_DEBUG 1
50 #ifdef PAL_DEBUG
51 #define DBG(x...)       kprintf("PAL_DBG: " x)
52 #else
53 #define DBG(x...)
54 #endif /* PAL_DEBUG */
55 
56 extern void *gPEEFIRuntimeServices;
57 extern void *gPEEFISystemTable;
58 
59 /* nanotime conversion information */
60 pal_rtc_nanotime_t pal_rtc_nanotime_info = {0, 0, 0, 0, 1, 0};
61 
62 /* APIC kext may use this to access xnu internal state */
63 struct pal_apic_table *apic_table = NULL;
64 
65 decl_simple_lock_data(static, pal_efi_lock);
66 #ifdef __x86_64__
67 static pml4_entry_t IDPML4[PTE_PER_PAGE] __attribute__ ((aligned(4096)));
68 uint64_t        pal_efi_saved_cr0;
69 uint64_t        pal_efi_saved_cr3;
70 #endif
71 
72 
73 /* Serial routines */
74 int
pal_serial_init(void)75 pal_serial_init(void)
76 {
77 	return serial_init();
78 }
79 
80 void
pal_serial_putc_nocr(char c)81 pal_serial_putc_nocr(char c)
82 {
83 	serial_putc(c);
84 }
85 
86 void
pal_serial_putc(char c)87 pal_serial_putc(char c)
88 {
89 	serial_putc(c);
90 	if (c == '\n') {
91 		serial_putc('\r');
92 	}
93 }
94 
95 int
pal_serial_getc(void)96 pal_serial_getc(void)
97 {
98 	return serial_getc();
99 }
100 
101 
102 /* Generic routines */
103 void
pal_i386_init(void)104 pal_i386_init(void)
105 {
106 	simple_lock_init(&pal_efi_lock, 0);
107 }
108 
109 void
pal_get_control_registers(pal_cr_t * cr0,pal_cr_t * cr2,pal_cr_t * cr3,pal_cr_t * cr4)110 pal_get_control_registers( pal_cr_t *cr0, pal_cr_t *cr2,
111     pal_cr_t *cr3, pal_cr_t *cr4 )
112 {
113 	*cr0 = get_cr0();
114 	*cr2 = get_cr2();
115 	*cr3 = get_cr3_raw();
116 	*cr4 = get_cr4();
117 }
118 
119 
120 /*
121  * define functions below here to ensure we have symbols for these,
122  * even though they're not used on this platform.
123  */
124 #undef pal_dbg_page_fault
125 void
pal_dbg_page_fault(thread_t thread __unused,user_addr_t vaddr __unused,kern_return_t kr __unused)126 pal_dbg_page_fault( thread_t thread __unused,
127     user_addr_t vaddr __unused,
128     kern_return_t kr __unused )
129 {
130 }
131 
132 #undef pal_dbg_set_task_name
133 void
pal_dbg_set_task_name(task_t task __unused)134 pal_dbg_set_task_name( task_t task __unused )
135 {
136 }
137 
138 #undef pal_set_signal_delivery
139 void
pal_set_signal_delivery(thread_t thread __unused)140 pal_set_signal_delivery(thread_t thread __unused)
141 {
142 }
143 
144 /* EFI thunks */
145 extern void
146 _pal_efi_call_in_64bit_mode_asm(uint64_t func,
147     struct pal_efi_registers *efi_reg,
148     void *stack_contents,
149     size_t stack_contents_size);
150 
151 kern_return_t
pal_efi_call_in_64bit_mode(uint64_t func,struct pal_efi_registers * efi_reg,void * stack_contents,size_t stack_contents_size,uint64_t * efi_status)152 pal_efi_call_in_64bit_mode(uint64_t func,
153     struct pal_efi_registers *efi_reg,
154     void *stack_contents,
155     size_t stack_contents_size,                        /* 16-byte multiple */
156     uint64_t *efi_status)
157 {
158 	DBG("pal_efi_call_in_64bit_mode(0x%016llx, %p, %p, %lu, %p)\n",
159 	    func, efi_reg, stack_contents, stack_contents_size, efi_status);
160 
161 	if (func == 0) {
162 		return KERN_INVALID_ADDRESS;
163 	}
164 
165 	if ((efi_reg == NULL)
166 	    || (stack_contents == NULL)
167 	    || (stack_contents_size % 16 != 0)) {
168 		return KERN_INVALID_ARGUMENT;
169 	}
170 
171 	if (!gPEEFISystemTable || !gPEEFIRuntimeServices) {
172 		return KERN_NOT_SUPPORTED;
173 	}
174 
175 	if (func < VM_MIN_KERNEL_ADDRESS) {
176 		/*
177 		 * EFI Runtime Services must be mapped in our address
178 		 * space at an appropriate location.
179 		 */
180 		return KERN_INVALID_ADDRESS;
181 	}
182 
183 	_pal_efi_call_in_64bit_mode_asm(func,
184 	    efi_reg,
185 	    stack_contents,
186 	    stack_contents_size);
187 
188 	*efi_status = efi_reg->rax;
189 
190 	return KERN_SUCCESS;
191 }
192 
193 extern void
194 _pal_efi_call_in_32bit_mode_asm(uint32_t func,
195     struct pal_efi_registers *efi_reg,
196     void *stack_contents,
197     size_t stack_contents_size);
198 
199 kern_return_t
pal_efi_call_in_32bit_mode(uint32_t func,struct pal_efi_registers * efi_reg,void * stack_contents,size_t stack_contents_size,uint32_t * efi_status)200 pal_efi_call_in_32bit_mode(uint32_t func,
201     struct pal_efi_registers *efi_reg,
202     void *stack_contents,
203     size_t stack_contents_size,                        /* 16-byte multiple */
204     uint32_t *efi_status)
205 {
206 	DBG("pal_efi_call_in_32bit_mode(0x%08x, %p, %p, %lu, %p)\n",
207 	    func, efi_reg, stack_contents, stack_contents_size, efi_status);
208 
209 	if (func == 0) {
210 		return KERN_INVALID_ADDRESS;
211 	}
212 
213 	if ((efi_reg == NULL)
214 	    || (stack_contents == NULL)
215 	    || (stack_contents_size % 16 != 0)) {
216 		return KERN_INVALID_ARGUMENT;
217 	}
218 
219 	if (!gPEEFISystemTable || !gPEEFIRuntimeServices) {
220 		return KERN_NOT_SUPPORTED;
221 	}
222 
223 	DBG("pal_efi_call_in_32bit_mode() efi_reg:\n");
224 	DBG("  rcx: 0x%016llx\n", efi_reg->rcx);
225 	DBG("  rdx: 0x%016llx\n", efi_reg->rdx);
226 	DBG("   r8: 0x%016llx\n", efi_reg->r8);
227 	DBG("   r9: 0x%016llx\n", efi_reg->r9);
228 	DBG("  rax: 0x%016llx\n", efi_reg->rax);
229 
230 	DBG("pal_efi_call_in_32bit_mode() stack:\n");
231 #if PAL_DEBUG
232 	size_t i;
233 	for (i = 0; i < stack_contents_size; i += sizeof(uint32_t)) {
234 		uint32_t *p = (uint32_t *) ((uintptr_t)stack_contents + i);
235 		DBG("  %p: 0x%08x\n", p, *p);
236 	}
237 #endif
238 
239 #ifdef __x86_64__
240 	/*
241 	 * Ensure no interruptions.
242 	 * Taking a spinlock for serialization is technically unnecessary
243 	 * because the EFIRuntime kext should serialize.
244 	 */
245 	boolean_t istate = ml_set_interrupts_enabled(FALSE);
246 	simple_lock(&pal_efi_lock, LCK_GRP_NULL);
247 
248 	/*
249 	 * Switch to special page tables with the entire high kernel space
250 	 * double-mapped into the bottom 4GB.
251 	 *
252 	 * NB: We assume that all data passed exchanged with RuntimeServices is
253 	 * located in the 4GB of KVA based at VM_MIN_ADDRESS. In particular, kexts
254 	 * loaded the basement (below VM_MIN_ADDRESS) cannot pass static data.
255 	 * Kernel stack and heap space is OK.
256 	 */
257 	MARK_CPU_IDLE(cpu_number());
258 	pal_efi_saved_cr3 = get_cr3_raw();
259 	pal_efi_saved_cr0 = get_cr0();
260 	IDPML4[KERNEL_PML4_INDEX] = IdlePML4[KERNEL_PML4_INDEX];
261 	IDPML4[0]                 = IdlePML4[KERNEL_PML4_INDEX];
262 	clear_ts();
263 	set_cr3_raw((uint64_t) ID_MAP_VTOP(IDPML4));
264 
265 	swapgs();               /* Save kernel's GS base */
266 
267 	/* Set segment state ready for compatibility mode */
268 	set_gs(NULL_SEG);
269 	set_fs(NULL_SEG);
270 	set_es(KERNEL_DS);
271 	set_ds(KERNEL_DS);
272 	set_ss(KERNEL_DS);
273 
274 	_pal_efi_call_in_32bit_mode_asm(func,
275 	    efi_reg,
276 	    stack_contents,
277 	    stack_contents_size);
278 
279 	/* Restore NULL segment state */
280 	set_ss(NULL_SEG);
281 	set_es(NULL_SEG);
282 	set_ds(NULL_SEG);
283 
284 	swapgs();               /* Restore kernel's GS base */
285 
286 	/* Restore the 64-bit user GS base we just destroyed */
287 	wrmsr64(MSR_IA32_KERNEL_GS_BASE,
288 	    current_cpu_datap()->cpu_uber.cu_user_gs_base);
289 
290 	/* End of mapping games */
291 	set_cr3_raw(pal_efi_saved_cr3);
292 	set_cr0(pal_efi_saved_cr0);
293 	MARK_CPU_ACTIVE(cpu_number());
294 
295 	simple_unlock(&pal_efi_lock);
296 	ml_set_interrupts_enabled(istate);
297 #else
298 	_pal_efi_call_in_32bit_mode_asm(func,
299 	    efi_reg,
300 	    stack_contents,
301 	    stack_contents_size);
302 #endif
303 
304 	*efi_status = (uint32_t)efi_reg->rax;
305 	DBG("pal_efi_call_in_32bit_mode() efi_status: 0x%x\n", *efi_status);
306 
307 	return KERN_SUCCESS;
308 }
309 
310 /* wind-back a syscall instruction */
311 void
pal_syscall_restart(thread_t thread __unused,x86_saved_state_t * state)312 pal_syscall_restart(thread_t thread __unused, x86_saved_state_t *state)
313 {
314 	/* work out which flavour thread it is */
315 	if (is_saved_state32(state)) {
316 		x86_saved_state32_t     *regs32;
317 		regs32 = saved_state32(state);
318 
319 		if (regs32->cs == SYSENTER_CS || regs32->cs == SYSENTER_TF_CS) {
320 			regs32->eip -= 5;
321 		} else {
322 			regs32->eip -= 2;
323 		}
324 	} else {
325 		x86_saved_state64_t     *regs64;
326 
327 		assert( is_saved_state64(state));
328 		regs64 = saved_state64(state);
329 
330 		/* Only one instruction for 64-bit threads */
331 		regs64->isf.rip -= 2;
332 	}
333 }
334 
335 /* Helper function to put the machine to sleep (or shutdown) */
336 
337 boolean_t
pal_machine_sleep(uint8_t type_a __unused,uint8_t type_b __unused,uint32_t bit_position __unused,uint32_t disable_mask __unused,uint32_t enable_mask __unused)338 pal_machine_sleep(uint8_t type_a __unused, uint8_t type_b __unused, uint32_t bit_position __unused,
339     uint32_t disable_mask __unused, uint32_t enable_mask __unused)
340 {
341 	return 0;
342 }
343 
344 
345 /* shouldn't be used on native */
346 void
pal_get_kern_regs(x86_saved_state_t * state)347 pal_get_kern_regs( x86_saved_state_t *state )
348 {
349 	panic( "pal_get_kern_regs called. state %p", state );
350 }
351 
352 void
pal_preemption_assert(void)353 pal_preemption_assert(void)
354 {
355 }
356