xref: /xnu-8792.61.2/osfmk/x86_64/locore.s (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  [email protected]
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <debug.h>
58#include <mach_kdp.h>
59#include <mach_assert.h>
60
61#include <sys/errno.h>
62#include <i386/asm.h>
63#include <i386/cpuid.h>
64#include <i386/eflags.h>
65#include <i386/postcode.h>
66#include <i386/proc_reg.h>
67#include <i386/trap.h>
68#include <assym.s>
69#include <mach/exception_types.h>
70#include <config_dtrace.h>
71#include <kern/ticket_lock.h>
72
73#define _ARCH_I386_ASM_HELP_H_          /* Prevent inclusion of user header */
74#include <mach/i386/syscall_sw.h>
75
76/*
77 * Fault recovery.
78 */
79
80#ifdef	__MACHO__
81#define	RECOVERY_SECTION	.section	__VECTORS, __recover
82#else
83#define	RECOVERY_SECTION	.text
84#endif
85
86#define	RECOVER_TABLE_START	\
87	.align 3		; \
88	.globl	EXT(recover_table) ;\
89LEXT(recover_table)		;\
90	.text
91
92#define	RECOVER(addr)		\
93	.align	3;		\
94	.quad	9f		;\
95	.quad	addr		;\
96	.text			;\
979:
98
99#define	RECOVER_TABLE_END		\
100	.align	3			;\
101	.globl	EXT(recover_table_end)	;\
102LEXT(recover_table_end)			;\
103	.text
104
105/*
106 * Allocate recovery and table.
107 */
108	RECOVERY_SECTION
109	RECOVER_TABLE_START
110
111/*
112 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
113 */
114ENTRY(rdmsr_carefully)
115	movl	%edi, %ecx
116	movq	%rdx, %rdi
117	RECOVERY_SECTION
118	RECOVER(rdmsr_fail)
119	rdmsr
120	movl	%eax, (%rsi)
121	movl	%edx, (%rdi)
122	xorl	%eax, %eax
123	ret
124
125rdmsr_fail:
126	movq	$1, %rax
127	ret
128/*
129 * int rdmsr64_carefully(uint32_t msr, uint64_t *val);
130 */
131
132ENTRY(rdmsr64_carefully)
133	movl	%edi, %ecx
134	RECOVERY_SECTION
135	RECOVER(rdmsr64_carefully_fail)
136	rdmsr
137	movl	%eax, (%rsi)
138	movl	%edx, 4(%rsi)
139	xorl	%eax, %eax
140	ret
141rdmsr64_carefully_fail:
142	movl	$1, %eax
143	ret
144/*
145 * int wrmsr64_carefully(uint32_t msr, uint64_t val);
146 */
147
148ENTRY(wrmsr_carefully)
149	movl	%edi, %ecx
150	movl	%esi, %eax
151	shr	$32, %rsi
152	movl	%esi, %edx
153	RECOVERY_SECTION
154	RECOVER(wrmsr_fail)
155	wrmsr
156	xorl	%eax, %eax
157	ret
158wrmsr_fail:
159	movl	$1, %eax
160	ret
161
162#if DEBUG
163#ifndef TERI
164#define TERI 1
165#endif
166#endif
167
168#if TERI
169.globl	EXT(thread_exception_return_internal)
170#else
171.globl	EXT(thread_exception_return)
172#endif
173.globl	EXT(thread_bootstrap_return)
174LEXT(thread_bootstrap_return)
175#if CONFIG_DTRACE
176	call EXT(dtrace_thread_bootstrap)
177#endif
178
179#if TERI
180LEXT(thread_exception_return_internal)
181#else
182LEXT(thread_exception_return)
183#endif
184	cli
185	xorl	%ecx, %ecx		/* don't check if we're in the PFZ */
186	jmp	EXT(return_from_trap)
187
188/*
189 * Copyin/out from user/kernel address space.
190 * rdi:	source address
191 * rsi:	destination address
192 * rdx:	byte count (in fact, always < 64MB -- see copyio)
193 */
194Entry(_bcopy)
195	xchg	%rdi, %rsi		/* source %rsi, dest %rdi */
196
197	cld				/* count up */
198	mov	%rdx, %rcx		/* move by longwords first */
199	shr	$3, %rcx
200	RECOVERY_SECTION
201	RECOVER(_bcopy_fail)
202	rep
203	movsq				/* move longwords */
204
205	movl	%edx, %ecx		/* now move remaining bytes */
206	andl	$7, %ecx
207	RECOVERY_SECTION
208	RECOVER(_bcopy_fail)
209	rep
210	movsb
211
212	xorl	%eax,%eax		/* return 0 for success */
213	ret				/* and return */
214
215_bcopy_fail:
216	movl	$(EFAULT),%eax		/* return error for failure */
217	ret
218
219Entry(pmap_safe_read)
220	RECOVERY_SECTION
221	RECOVER(_pmap_safe_read_fail)
222	movq	(%rdi), %rcx
223	mov	%rcx, (%rsi)
224	mov	$1, %eax
225	ret
226_pmap_safe_read_fail:
227	xor	%eax, %eax
228	ret
229
230/*
231 * 2-byte copy used by ml_copy_phys().
232 * rdi:	source address
233 * rsi:	destination address
234 */
235Entry(_bcopy2)
236	RECOVERY_SECTION
237	RECOVER(_bcopy_fail)
238	movw	(%rdi), %cx
239	RECOVERY_SECTION
240	RECOVER(_bcopy_fail)
241	movw	%cx, (%rsi)
242
243	xorl	%eax,%eax		/* return 0 for success */
244	ret				/* and return */
245
246/*
247 * 4-byte copy used by ml_copy_phys().
248 * rdi:	source address
249 * rsi:	destination address
250 */
251Entry(_bcopy4)
252	RECOVERY_SECTION
253	RECOVER(_bcopy_fail)
254	movl	(%rdi), %ecx
255	RECOVERY_SECTION
256	RECOVER(_bcopy_fail)
257	mov	%ecx, (%rsi)
258
259	xorl	%eax,%eax		/* return 0 for success */
260	ret				/* and return */
261
262/*
263 * 8-byte copy used by ml_copy_phys().
264 * rdi:	source address
265 * rsi:	destination address
266 */
267Entry(_bcopy8)
268	RECOVERY_SECTION
269	RECOVER(_bcopy_fail)
270	movq	(%rdi), %rcx
271	RECOVERY_SECTION
272	RECOVER(_bcopy_fail)
273	mov	%rcx, (%rsi)
274
275	xorl	%eax,%eax		/* return 0 for success */
276	ret				/* and return */
277
278
279
280/*
281 * Copyin string from user/kern address space.
282 * rdi:	source address
283 * rsi:	destination address
284 * rdx:	max byte count
285 * rcx:	actual byte count (OUT)
286 */
287Entry(_bcopystr)
288	pushq	%rdi
289	xchgq	%rdi, %rsi		/* source %rsi, dest %rdi */
290
291	xorl	%eax,%eax		/* set to 0 here so that high 24 bits */
292					/* are 0 for the cmpl against 0 */
2932:
294	RECOVERY_SECTION
295	RECOVER(_bcopystr_fail)		/* copy bytes... */
296	movb	(%rsi),%al
297	incq	%rsi
298	testq	%rdi,%rdi		/* if kernel address is ... */
299	jz	3f			/* not NULL */
300	movb	%al,(%rdi)		/* copy the byte */
301	incq	%rdi
3023:
303	testl	%eax,%eax		/* did we just stuff the 0-byte? */
304	jz	4f			/* yes, return 0 already in %eax */
305	decq	%rdx			/* decrement #bytes left in buffer */
306	jnz	2b			/* buffer not full, copy another byte */
307	movl	$(ENAMETOOLONG),%eax	/* buffer full, no \0: ENAMETOOLONG */
3084:
309	cmpq	$0,%rcx			/* get OUT len ptr */
310	jz	_bcopystr_ret		/* if null, just return */
311	subq	(%rsp),%rsi
312	movq	%rsi,(%rcx)		/* else set OUT arg to xfer len */
313	popq	%rdi			/* restore registers */
314_bcopystr_ret:
315	ret				/* and return */
316
317_bcopystr_fail:
318	popq	%rdi			/* restore registers */
319	movl	$(EFAULT),%eax		/* return error for failure */
320	ret
321
322#if CONFIG_DTRACE
323
324/*
325 * Copyin 8 bit aligned word as a single transaction
326 * rdi: source address (user)
327 * rsi: destination address (kernel)
328 */
329Entry(dtrace_nofault_copy8)
330	pushq	%rbp			/* Save registers */
331	movq	%rsp, %rbp
332	RECOVERY_SECTION
333	RECOVER(L_copyin_atomic8_fail)	/* Set up recovery handler for next instruction */
334	movb	(%rdi), %al		/* Load long from user */
335	movb	%al, (%rsi)		/* Store to kernel */
336	xorl	%eax, %eax		/* Return success */
337	popq	%rbp			/* Restore registers */
338	retq				/* Return */
339
340L_copyin_atomic8_fail:
341	movl	$(EFAULT), %eax		/* Return error for failure */
342	popq	%rbp			/* Restore registers */
343	retq				/* Return */
344
345/*
346 * Copyin 16 bit aligned word as a single transaction
347 * rdi: source address (user)
348 * rsi: destination address (kernel)
349 */
350Entry(dtrace_nofault_copy16)
351	pushq	%rbp			/* Save registers */
352	movq	%rsp, %rbp
353	RECOVERY_SECTION
354	RECOVER(L_copyin_atomic16_fail)	/* Set up recovery handler for next instruction */
355	movw	(%rdi), %ax		/* Load long from user */
356	movw	%ax, (%rsi)		/* Store to kernel */
357	xorl	%eax, %eax		/* Return success */
358	popq	%rbp			/* Restore registers */
359	retq				/* Return */
360
361L_copyin_atomic16_fail:
362	movl	$(EFAULT), %eax		/* Return error for failure */
363	popq	%rbp			/* Restore registers */
364	retq				/* Return */
365
366#endif /* CONFIG_DTRACE */
367
368/*
369 * Copyin 32 bit aligned word as a single transaction
370 * rdi: source address (user)
371 * rsi: destination address (kernel)
372 */
373#if CONFIG_DTRACE
374Entry(dtrace_nofault_copy32)
375#endif
376Entry(_copyin_atomic32)
377	pushq	%rbp			/* Save registers */
378	movq	%rsp, %rbp
379	RECOVERY_SECTION
380	RECOVER(L_copyin_atomic32_fail)	/* Set up recovery handler for next instruction */
381	movl	(%rdi), %eax		/* Load long from user */
382	movl	%eax, (%rsi)		/* Store to kernel */
383	xorl	%eax, %eax		/* Return success */
384	popq	%rbp			/* Restore registers */
385	retq				/* Return */
386
387L_copyin_atomic32_fail:
388	movl	$(EFAULT), %eax		/* Return error for failure */
389	popq	%rbp			/* Restore registers */
390	retq				/* Return */
391
392/*
393 * Copyin 64 bit aligned word as a single transaction
394 * rdi: source address (user)
395 * rsi: destination address (kernel)
396 */
397#if CONFIG_DTRACE
398Entry(dtrace_nofault_copy64)
399#endif
400Entry(_copyin_atomic64)
401	pushq	%rbp			/* Save registers */
402	movq	%rsp, %rbp
403	RECOVERY_SECTION
404	RECOVER(L_copyin_atomic64_fail)	/* Set up recovery handler for next instruction*/
405	movq	(%rdi), %rax		/* Load quad from user */
406	movq	%rax, (%rsi)		/* Store to kernel */
407	xorl	%eax, %eax		/* Return success */
408	popq	%rbp			/* Restore registers */
409	retq				/* Return */
410
411L_copyin_atomic64_fail:
412	movl	$(EFAULT), %eax		/* Return error for failure */
413	popq	%rbp			/* Restore registers */
414	retq				/* Return */
415
416/*
417 * Copyin 32 bit aligned word as a single transaction
418 * rdi: source address (kernel)
419 * rsi: destination address (user)
420 */
421Entry(_copyout_atomic32)
422	pushq	%rbp			/* Save registers */
423	movq	%rsp, %rbp
424	movl    (%rdi), %eax            /* Load long from kernel */
425	RECOVERY_SECTION
426	RECOVER(L_copyout_atomic32_fail)	/* Set up recovery handler for next instruction*/
427	movl	%eax, (%rsi)		/* Store long to user */
428	xorl	%eax, %eax		/* Return success */
429	popq	%rbp			/* Restore registers */
430	retq				/* Return */
431
432L_copyout_atomic32_fail:
433	movl	$(EFAULT), %eax		/* Return error for failure */
434	popq	%rbp			/* Restore registers */
435	retq				/* Return */
436
437/*
438 * Copyin 64 bit aligned word as a single transaction
439 * rdi: source address (kernel)
440 * rsi: destination address (user)
441 */
442Entry(_copyout_atomic64)
443	pushq	%rbp			/* Save registers */
444	movq	%rsp, %rbp
445	movq    (%rdi), %rax            /* Load quad from kernel */
446	RECOVERY_SECTION
447	RECOVER(L_copyout_atomic64_fail)	/* Set up recovery handler for next instruction*/
448	movq	%rax, (%rsi)		/* Store quad to user */
449	xorl	%eax, %eax		/* Return success */
450	popq	%rbp			/* Restore registers */
451	retq				/* Return */
452
453L_copyout_atomic64_fail:
454	movl	$(EFAULT), %eax		/* Return error for failure */
455	popq	%rbp			/* Restore registers */
456	retq				/* Return */
457
458/*
459 * hw_lck_ticket_t
460 * hw_lck_ticket_reserve_orig_allow_invalid(hw_lck_ticket_t *lck)
461 *
462 * rdi: lock address
463 */
464Entry(hw_lck_ticket_reserve_orig_allow_invalid)
465	pushq	%rbp			/* Save registers */
466	movq	%rsp, %rbp
467
468	RECOVERY_SECTION
469	RECOVER(3f)			/* Set up recovery handler for next instruction*/
470	movl	(%rdi), %eax		/* Load lock value */
4711:
472	btl	$HW_LCK_TICKET_LOCK_VALID_BIT, %eax
473	jae	3f			/* is the lock valid ? */
474
475	leal	HW_LCK_TICKET_LOCK_INC_WORD(%rax), %edx
476	RECOVERY_SECTION
477	RECOVER(3f)			/* Set up recovery handler for next instruction*/
478	lock	cmpxchgl %edx, (%rdi)
479	jne	1b
480
481	popq	%rbp			/* Restore registers */
482	retq				/* Return */
483
4843: /* invalid */
485	xorl	%eax, %eax
486	popq	%rbp			/* Restore registers */
487	retq				/* Return */
488
489/*
490 * Done with recovery table.
491 */
492	RECOVERY_SECTION
493	RECOVER_TABLE_END
494
495
496/*
497 * Vector here on any exception at startup prior to switching to
498 * the kernel's idle page-tables and installing the kernel master IDT.
499 */
500Entry(vstart_trap_handler)
501	POSTCODE(BOOT_TRAP_HLT)
502	hlt
503
504