xref: /xnu-11417.140.69/osfmk/x86_64/cswitch.s (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  [email protected]
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59
60#include <i386/asm.h>
61#include <i386/proc_reg.h>
62#include <assym.s>
63
64/*
65 * void Load_context(
66 *                   thread_t thread)    // %rdi
67 *
68 * Loads the first thread context to run on a CPU,
69 * i.e. without switching from a previous thread.
70 *
71 * returns 'old' thread in %rax (which is always NULL)
72 */
73Entry(Load_context)
74	movq    %rdi, %rdx                      /* move thread arg to rdx */
75
76	movq    %rdx,%gs:CPU_ACTIVE_THREAD      /* new thread is active */
77	movq    TH_KERNEL_STACK(%rdx),%rdx      /* get its kernel stack */
78	lea     -IKS_SIZE(%rdx),%rcx
79	add     EXT(kernel_stack_size)(%rip),%rcx /* point to stack top */
80
81	movq    %rdx,%gs:CPU_ACTIVE_STACK       /* set current stack */
82	movq    %rcx,%gs:CPU_KERNEL_STACK       /* set stack top */
83
84	movq    KSS_RSP(%rcx),%rsp              /* switch stacks */
85	movq    KSS_RBX(%rcx),%rbx              /* restore registers */
86	movq    KSS_RBP(%rcx),%rbp
87	movq    KSS_R12(%rcx),%r12
88	movq    KSS_R13(%rcx),%r13
89	movq    KSS_R14(%rcx),%r14
90	movq    KSS_R15(%rcx),%r15
91
92	xorl    %eax, %eax                      /* set return value to zero (no old thread) */
93
94	jmp    *KSS_RIP(%rcx)                   /* return old thread */
95
96/*
97 * thread_t Switch_context(
98 *		thread_t old,				// %rdi
99 *		thread_continue_t continuation,		// %rsi
100 *		thread_t new)				// %rdx
101 *
102 * returns 'old' thread in %rax
103 */
104Entry(Switch_context)
105	popq	%rax				/* pop return PC */
106
107	/* Test for a continuation and skip all state saving if so... */
108	cmpq	$0, %rsi
109	jne 	5f
110	movq	%gs:CPU_KERNEL_STACK,%rcx	/* get old kernel stack top */
111	movq	%rbx,KSS_RBX(%rcx)		/* save registers */
112	movq	%rbp,KSS_RBP(%rcx)
113	movq	%r12,KSS_R12(%rcx)
114	movq	%r13,KSS_R13(%rcx)
115	movq	%r14,KSS_R14(%rcx)
116	movq	%r15,KSS_R15(%rcx)
117	movq	%rax,KSS_RIP(%rcx)		/* save return PC */
118	movq	%rsp,KSS_RSP(%rcx)		/* save SP */
1195:
120	movq	%rdi,%rax			/* return old thread */
121	/* new thread in %rdx */
122	movq    %rdx,%gs:CPU_ACTIVE_THREAD      /* new thread is active */
123	movq	TH_KERNEL_STACK(%rdx),%rdx	/* get its kernel stack */
124	lea	-IKS_SIZE(%rdx),%rcx
125	add	EXT(kernel_stack_size)(%rip),%rcx /* point to stack top */
126
127	movq	%rdx,%gs:CPU_ACTIVE_STACK	/* set current stack */
128	movq	%rcx,%gs:CPU_KERNEL_STACK	/* set stack top */
129
130	movq	KSS_RSP(%rcx),%rsp		/* switch stacks */
131	movq	KSS_RBX(%rcx),%rbx		/* restore registers */
132	movq	KSS_RBP(%rcx),%rbp
133	movq	KSS_R12(%rcx),%r12
134	movq	KSS_R13(%rcx),%r13
135	movq	KSS_R14(%rcx),%r14
136	movq	KSS_R15(%rcx),%r15
137	jmp	*KSS_RIP(%rcx)			/* return old thread in %rax */
138
139/*
140 * machine_stack_attach sets this as the RIP of newly-attached stacks
141 * %rbx is the C routine to call
142 * %rax is the parameter to pass to the C routine
143 *
144 * This stub is needed to convert the return value of the old thread from Switch_context
145 * in %rax into a parameter to thread_continue passed in %rdi, because using the
146 * same register for the first argument and first retval makes too much sense for the SysV ABI.
147 */
148Entry(Thread_continue)
149	movq	%rax, %rdi			/* this is the old thread from Switch_context */
150	call	*%rbx				/* call real continuation */
151	int3					/* (should never return) */
152
153/*
154 * thread_t Shutdown_context(
155 *		thread_t thread,		// %rdi
156 *		void (*routine)(processor_t),	// %rsi
157 *		processor_t processor)		// %rdx
158 *
159 * saves the kernel context of the thread,
160 * switches to the interrupt stack,
161 * then runs routine on the interrupt stack.
162 */
163Entry(Shutdown_context)
164	movq	%gs:CPU_KERNEL_STACK,%rcx	/* get old kernel stack top */
165	movq	%rbx,KSS_RBX(%rcx)		/* save registers */
166	movq	%rbp,KSS_RBP(%rcx)
167	movq	%r12,KSS_R12(%rcx)
168	movq	%r13,KSS_R13(%rcx)
169	movq	%r14,KSS_R14(%rcx)
170	movq	%r15,KSS_R15(%rcx)
171	popq	%r8				/* extract return PC */
172	movq	%r8,KSS_RIP(%rcx)		/* save return PC */
173	movq	%rsp,KSS_RSP(%rcx)		/* save SP */
174
175	movq	%gs:CPU_ACTIVE_STACK,%rcx	/* get old kernel stack */
176	movq	%rdi,%rax			/* get old thread */
177	movq	%rcx,TH_KERNEL_STACK(%rax)	/* save old stack */
178
179	movq	%gs:CPU_INT_STACK_TOP,%rsp 	/* switch to interrupt stack */
180
181	movq	%rsp, %gs:CPU_ACTIVE_STACK
182	movq	EXT(kernel_stack_size)(%rip),%rcx /* point to stack top */
183	subq	%rcx, %gs:CPU_ACTIVE_STACK
184
185	pushq   %r8                             /* set up a call frame on new stack */
186	pushq   %rbp
187	movq    %rsp, %rbp
188
189	movq	%rdx,%rdi			/* processor arg to routine */
190	call	*%rsi				/* call routine to run */
191	int3					/* (should never return) */
192
193