xref: /xnu-10002.1.13/osfmk/kern/hv_support_kext.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2013 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/ast.h>
30 #include <kern/locks.h>
31 #include <kern/task.h>
32 #include <kern/thread.h>
33 #include <libkern/OSAtomic.h>
34 #include <vm/vm_pageout.h>
35 #include <mach/sdt.h>
36 #include <sys/kdebug.h>
37 
38 #if defined(__x86_64__) && CONFIG_VMX
39 #include <i386/vmx/vmx_cpu.h>
40 #endif
41 
42 #include <kern/hv_support.h>
43 
44 int hv_support_available = 0;
45 
46 int hv_disable = 0;
47 
48 /* callbacks for tasks/threads with associated hv objects */
49 hv_callbacks_t hv_callbacks = {
50 	.dispatch = NULL,               /* thread is being dispatched for execution */
51 	.preempt = NULL,                /* thread is being preempted */
52 	.suspend = NULL,                /* system is being suspended */
53 	.thread_destroy = NULL, /* thread is being destroyed */
54 	.task_destroy = NULL,   /* task is being destroyed */
55 	.volatile_state = NULL, /* thread state is becoming volatile */
56 	.resume = NULL,         /* system is being resumed */
57 	.memory_pressure = NULL,/* (unused) */
58 };
59 
60 /* trap tables for hv_*_trap syscalls */
61 static hv_trap_table_t hv_trap_table[] = {
62 	[HV_TASK_TRAP] = {
63 		.traps = NULL,
64 		.trap_count = 0
65 	},
66 	[HV_THREAD_TRAP] = {
67 		.traps = NULL,
68 		.trap_count = 0
69 	}
70 };
71 
72 static int hv_callbacks_enabled = 0;
73 static LCK_GRP_DECLARE(hv_support_lck_grp, "hv_support");
74 static LCK_MTX_DECLARE(hv_support_lck_mtx, &hv_support_lck_grp);
75 
76 /* hv_support boot initialization */
77 void
hv_support_init(void)78 hv_support_init(void)
79 {
80 #if defined(__x86_64__) && CONFIG_VMX
81 	hv_support_available = vmx_hv_support();
82 #endif
83 }
84 
85 /* returns true if hv_support is available on this machine */
86 int
hv_get_support(void)87 hv_get_support(void)
88 {
89 	return hv_support_available;
90 }
91 
92 /* associate an hv object with the current task */
93 void
hv_set_task_target(void * target)94 hv_set_task_target(void *target)
95 {
96 	current_task()->hv_task_target = target;
97 }
98 
99 /* associate an hv object with the current thread */
100 void
hv_set_thread_target(void * target)101 hv_set_thread_target(void *target)
102 {
103 	current_thread()->hv_thread_target = target;
104 }
105 
106 /* get hv object associated with the current task */
107 void*
hv_get_task_target(void)108 hv_get_task_target(void)
109 {
110 	return current_task()->hv_task_target;
111 }
112 
113 /* get hv object associated with the current thread */
114 void*
hv_get_thread_target(void)115 hv_get_thread_target(void)
116 {
117 	return current_thread()->hv_thread_target;
118 }
119 
120 /* test if a given thread state may be volatile between dispatch
121  *  and preemption */
122 int
hv_get_volatile_state(hv_volatile_state_t state)123 hv_get_volatile_state(hv_volatile_state_t state)
124 {
125 	int is_volatile = 0;
126 
127 #if (defined(__x86_64__))
128 	if (state == HV_DEBUG_STATE) {
129 		is_volatile = (current_thread()->machine.ids != NULL);
130 	}
131 #endif
132 
133 	return is_volatile;
134 }
135 
136 /* register a list of trap handlers for the hv_*_trap syscalls */
137 kern_return_t
hv_set_traps(hv_trap_type_t trap_type,const hv_trap_t * traps,unsigned trap_count)138 hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps,
139     unsigned trap_count)
140 {
141 	hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
142 	kern_return_t kr = KERN_FAILURE;
143 
144 	lck_mtx_lock(&hv_support_lck_mtx);
145 	if (trap_table->trap_count == 0) {
146 		trap_table->traps = traps;
147 		OSMemoryBarrier();
148 		trap_table->trap_count = trap_count;
149 		kr = KERN_SUCCESS;
150 	}
151 	lck_mtx_unlock(&hv_support_lck_mtx);
152 
153 	return kr;
154 }
155 
156 /* release hv_*_trap traps */
157 void
hv_release_traps(hv_trap_type_t trap_type)158 hv_release_traps(hv_trap_type_t trap_type)
159 {
160 	hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
161 
162 	lck_mtx_lock(&hv_support_lck_mtx);
163 	trap_table->trap_count = 0;
164 	OSMemoryBarrier();
165 	trap_table->traps = NULL;
166 	lck_mtx_unlock(&hv_support_lck_mtx);
167 }
168 
169 /* register callbacks for certain task/thread events for tasks/threads with
170  *  associated hv objects */
171 kern_return_t
hv_set_callbacks(hv_callbacks_t callbacks)172 hv_set_callbacks(hv_callbacks_t callbacks)
173 {
174 	kern_return_t kr = KERN_FAILURE;
175 
176 	lck_mtx_lock(&hv_support_lck_mtx);
177 	if (hv_callbacks_enabled == 0) {
178 		hv_callbacks = callbacks;
179 		hv_callbacks_enabled = 1;
180 		kr = KERN_SUCCESS;
181 	}
182 	lck_mtx_unlock(&hv_support_lck_mtx);
183 
184 	return kr;
185 }
186 
187 /* release callbacks for task/thread events */
188 void
hv_release_callbacks(void)189 hv_release_callbacks(void)
190 {
191 	lck_mtx_lock(&hv_support_lck_mtx);
192 	hv_callbacks = (hv_callbacks_t) {
193 		.dispatch = NULL,
194 		.preempt = NULL,
195 		.suspend = NULL,
196 		.thread_destroy = NULL,
197 		.task_destroy = NULL,
198 		.volatile_state = NULL,
199 		.resume = NULL,
200 	};
201 
202 	hv_callbacks_enabled = 0;
203 	lck_mtx_unlock(&hv_support_lck_mtx);
204 }
205 
206 /* system suspend notification */
207 void
hv_suspend(void)208 hv_suspend(void)
209 {
210 	if (hv_callbacks_enabled) {
211 		hv_callbacks.suspend();
212 	}
213 }
214 
215 /* system resume notification */
216 void
hv_resume(void)217 hv_resume(void)
218 {
219 	if (hv_callbacks_enabled && hv_callbacks.resume) {
220 		hv_callbacks.resume();
221 	}
222 }
223 
224 /* dispatch hv_task_trap/hv_thread_trap syscalls to trap handlers,
225  *  fail for invalid index or absence of trap handlers, trap handler is
226  *  responsible for validating targets */
227 #define HV_TRAP_DISPATCH(type, index, target, argument) \
228 	((__probable(index < hv_trap_table[type].trap_count)) ? \
229 	        hv_trap_table[type].traps[index](target, argument) \
230 	                : KERN_INVALID_ARGUMENT)
231 
232 kern_return_t
hv_task_trap(uint64_t index,uint64_t arg)233 hv_task_trap(uint64_t index, uint64_t arg)
234 {
235 	KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_X86_TRAP_TASK) | DBG_FUNC_START, index, arg);
236 	DTRACE_HV2(task__trap__begin, uint64_t, index, uint64_t, arg);
237 
238 	kern_return_t ret = HV_TRAP_DISPATCH(HV_TASK_TRAP, index, hv_get_task_target(), arg);
239 
240 	DTRACE_HV2(task__trap__end, uint64_t, index, uint64_t, ret);
241 	KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_X86_TRAP_TASK) | DBG_FUNC_END, index, ret);
242 
243 	return ret;
244 }
245 
246 kern_return_t
hv_thread_trap(uint64_t index,uint64_t arg)247 hv_thread_trap(uint64_t index, uint64_t arg)
248 {
249 	KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_X86_TRAP_THREAD) | DBG_FUNC_START, index, arg);
250 	DTRACE_HV2(thread__trap__begin, uint64_t, index, uint64_t, arg);
251 
252 	kern_return_t ret = HV_TRAP_DISPATCH(HV_THREAD_TRAP, index, hv_get_thread_target(), arg);
253 
254 	DTRACE_HV2(thread__trap__end, uint64_t, index, uint64_t, ret);
255 	KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_X86_TRAP_THREAD) | DBG_FUNC_END, index, ret);
256 
257 	return ret;
258 }
259 
260 boolean_t
hv_ast_pending(void)261 hv_ast_pending(void)
262 {
263 	return current_cpu_datap()->cpu_pending_ast != 0;
264 }
265