1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <kern/ast.h>
58 #include <kern/counter.h>
59 #include <kern/misc_protos.h>
60 #include <kern/queue.h>
61 #include <kern/sched_prim.h>
62 #include <kern/thread.h>
63 #include <kern/processor.h>
64 #include <kern/restartable.h>
65 #include <kern/spl.h>
66 #include <kern/sfi.h>
67 #if CONFIG_TELEMETRY
68 #include <kern/telemetry.h>
69 #endif
70 #include <kern/waitq.h>
71 #include <kern/ledger.h>
72 #include <kern/machine.h>
73 #include <kperf/kperf_kpc.h>
74 #include <mach/policy.h>
75 #include <security/mac_mach_internal.h> // for MACF AST hook
76 #include <stdatomic.h>
77
78 #if CONFIG_ARCADE
79 #include <kern/arcade.h>
80 #endif
81
82 static void __attribute__((noinline, noreturn, disable_tail_calls))
thread_preempted(__unused void * parameter,__unused wait_result_t result)83 thread_preempted(__unused void* parameter, __unused wait_result_t result)
84 {
85 /*
86 * We've been scheduled again after a userspace preemption,
87 * try again to return to userspace.
88 */
89 thread_exception_return();
90 }
91
92 /*
93 * Create a dedicated frame to clarify that this thread has been preempted
94 * while running in kernel space.
95 */
96 static void __attribute__((noinline, disable_tail_calls))
thread_preempted_in_kernel(ast_t urgent_reason)97 thread_preempted_in_kernel(ast_t urgent_reason)
98 {
99 thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason);
100
101 assert(ml_get_interrupts_enabled() == FALSE);
102 }
103
104 /*
105 * AST_URGENT was detected while in kernel mode
106 * Called with interrupts disabled, returns the same way
107 * Must return to caller
108 */
109 void
ast_taken_kernel(void)110 ast_taken_kernel(void)
111 {
112 assert(ml_get_interrupts_enabled() == FALSE);
113
114 thread_t thread = current_thread();
115
116 /* Idle threads handle preemption themselves */
117 if ((thread->state & TH_IDLE)) {
118 ast_off(AST_PREEMPTION);
119 return;
120 }
121
122 /*
123 * It's possible for this to be called after AST_URGENT
124 * has already been handled, due to races in enable_preemption
125 */
126 if (ast_peek(AST_URGENT) != AST_URGENT) {
127 return;
128 }
129
130 /*
131 * Don't preempt if the thread is already preparing to block.
132 * TODO: the thread can cheese this with clear_wait()
133 */
134 if (waitq_wait_possible(thread) == FALSE) {
135 /* Consume AST_URGENT or the interrupt will call us again */
136 ast_consume(AST_URGENT);
137 return;
138 }
139
140 /* TODO: Should we csw_check again to notice if conditions have changed? */
141
142 ast_t urgent_reason = ast_consume(AST_PREEMPTION);
143
144 assert(urgent_reason & AST_PREEMPT);
145
146 /* We've decided to try context switching */
147 thread_preempted_in_kernel(urgent_reason);
148 }
149
150 /*
151 * An AST flag was set while returning to user mode
152 * Called with interrupts disabled, returns with interrupts enabled
153 * May call continuation instead of returning
154 */
155 void
ast_taken_user(void)156 ast_taken_user(void)
157 {
158 assert(ml_get_interrupts_enabled() == FALSE);
159
160 thread_t thread = current_thread();
161 task_t task = get_threadtask(thread);
162
163 /* We are about to return to userspace, there must not be a pending wait */
164 assert(waitq_wait_possible(thread));
165 assert((thread->state & TH_IDLE) == 0);
166
167 /* TODO: Add more 'return to userspace' assertions here */
168
169 /*
170 * If this thread was urgently preempted in userspace,
171 * take the preemption before processing the ASTs.
172 * The trap handler will call us again if we have more ASTs, so it's
173 * safe to block in a continuation here.
174 */
175 if (ast_peek(AST_URGENT) == AST_URGENT) {
176 ast_t urgent_reason = ast_consume(AST_PREEMPTION);
177
178 assert(urgent_reason & AST_PREEMPT);
179
180 /* TODO: Should we csw_check again to notice if conditions have changed? */
181
182 thread_block_reason(thread_preempted, NULL, urgent_reason);
183 /* NOTREACHED */
184 }
185
186 /*
187 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
188 * on a different processor. Only the ast bit on the thread will be set.
189 *
190 * Force a propagate for concurrent updates without an IPI.
191 */
192 ast_propagate(thread);
193
194 /*
195 * Consume all non-preemption processor ASTs matching reasons
196 * because we're handling them here.
197 *
198 * If one of the AST handlers blocks in a continuation,
199 * we'll reinstate the unserviced thread-level AST flags
200 * from the thread to the processor on context switch.
201 * If one of the AST handlers sets another AST,
202 * the trap handler will call ast_taken_user again.
203 *
204 * We expect the AST handlers not to thread_exception_return
205 * without an ast_propagate or context switch to reinstate
206 * the per-processor ASTs.
207 *
208 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
209 */
210 ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);
211
212 ml_set_interrupts_enabled(TRUE);
213
214 #if CONFIG_DTRACE
215 if (reasons & AST_DTRACE) {
216 dtrace_ast();
217 }
218 #endif
219
220 #ifdef MACH_BSD
221 if (reasons & AST_BSD) {
222 thread_ast_clear(thread, AST_BSD);
223 bsd_ast(thread);
224 }
225 #endif
226
227 #if CONFIG_MACF
228 if (reasons & AST_MACF) {
229 thread_ast_clear(thread, AST_MACF);
230 mac_thread_userret(thread);
231 }
232 #endif
233
234 #if CONFIG_ARCADE
235 if (reasons & AST_ARCADE) {
236 thread_ast_clear(thread, AST_ARCADE);
237 arcade_ast(thread);
238 }
239 #endif
240
241 if (reasons & AST_APC) {
242 thread_ast_clear(thread, AST_APC);
243 thread_apc_ast(thread);
244 }
245
246 if (reasons & AST_GUARD) {
247 thread_ast_clear(thread, AST_GUARD);
248 guard_ast(thread);
249 }
250
251 if (reasons & AST_LEDGER) {
252 thread_ast_clear(thread, AST_LEDGER);
253 ledger_ast(thread);
254 }
255
256 if (reasons & AST_KPERF) {
257 thread_ast_clear(thread, AST_KPERF);
258 kperf_kpc_thread_ast(thread);
259 }
260
261 if (reasons & AST_RESET_PCS) {
262 thread_ast_clear(thread, AST_RESET_PCS);
263 thread_reset_pcs_ast(task, thread);
264 }
265
266 if (reasons & AST_KEVENT) {
267 thread_ast_clear(thread, AST_KEVENT);
268 uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
269 if (bits) {
270 kevent_ast(thread, bits);
271 }
272 }
273
274 if (reasons & AST_PROC_RESOURCE) {
275 thread_ast_clear(thread, AST_PROC_RESOURCE);
276 task_port_space_ast(task);
277 #if MACH_BSD
278 proc_filedesc_ast(task);
279 #endif /* MACH_BSD */
280 }
281
282 #if CONFIG_TELEMETRY
283 if (reasons & AST_TELEMETRY_ALL) {
284 ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
285 thread_ast_clear(thread, AST_TELEMETRY_ALL);
286 telemetry_ast(thread, telemetry_reasons);
287 }
288 #endif
289
290 #if MACH_ASSERT
291 if (reasons & AST_DEBUG_ASSERT) {
292 thread_ast_clear(thread, AST_DEBUG_ASSERT);
293 thread_debug_return_to_user_ast(thread);
294 }
295 #endif
296
297 spl_t s = splsched();
298
299 #if CONFIG_SCHED_SFI
300 /*
301 * SFI is currently a per-processor AST, not a per-thread AST
302 * TODO: SFI should be a per-thread AST
303 */
304 if (ast_consume(AST_SFI) == AST_SFI) {
305 sfi_ast(thread);
306 }
307 #endif
308
309 /* We are about to return to userspace, there must not be a pending wait */
310 assert(waitq_wait_possible(thread));
311
312 /*
313 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
314 *
315 * We delay reading the preemption bits until now in case the thread
316 * blocks while handling per-thread ASTs.
317 *
318 * If one of the AST handlers had managed to set a new AST bit,
319 * thread_exception_return will call ast_taken again.
320 */
321 ast_t preemption_reasons = ast_consume(AST_PREEMPTION);
322
323 if (preemption_reasons & AST_PREEMPT) {
324 /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */
325
326 thread_lock(thread);
327 preemption_reasons = csw_check(thread, current_processor(), (preemption_reasons & AST_QUANTUM));
328 thread_unlock(thread);
329
330 #if CONFIG_SCHED_SFI
331 /* csw_check might tell us that SFI is needed */
332 if (preemption_reasons & AST_SFI) {
333 sfi_ast(thread);
334 }
335 #endif
336
337 if (preemption_reasons & AST_PREEMPT) {
338 /* switching to a continuation implicitly re-enables interrupts */
339 thread_block_reason(thread_preempted, NULL, preemption_reasons);
340 /* NOTREACHED */
341 }
342
343 /*
344 * We previously had a pending AST_PREEMPT, but csw_check
345 * decided that it should no longer be set, and to keep
346 * executing the current thread instead.
347 * Clear the pending preemption timer as we no longer
348 * have a pending AST_PREEMPT to time out.
349 *
350 * TODO: just do the thread block if we see AST_PREEMPT
351 * to avoid taking the pset lock twice.
352 * To do that thread block needs to be smarter
353 * about not context switching when it's not necessary
354 * e.g. the first-timeslice check for queue has priority
355 */
356 clear_pending_nonurgent_preemption(current_processor());
357 }
358
359 splx(s);
360
361 /*
362 * Here's a good place to put assertions of things which must be true
363 * upon return to userspace.
364 */
365 assert(thread->kern_promotion_schedpri == 0);
366 if (thread->rwlock_count > 0) {
367 panic("rwlock_count is %d for thread %p, possibly it still holds a rwlock", thread->rwlock_count, thread);
368 }
369 assert(thread->priority_floor_count == 0);
370
371 assert3u(0, ==, thread->sched_flags &
372 (TH_SFLAG_WAITQ_PROMOTED |
373 TH_SFLAG_RW_PROMOTED |
374 TH_SFLAG_EXEC_PROMOTED |
375 TH_SFLAG_FLOOR_PROMOTED |
376 TH_SFLAG_PROMOTED |
377 TH_SFLAG_DEPRESS));
378 }
379
380 /*
381 * Set AST flags on current processor
382 * Called at splsched
383 */
384 void
ast_on(ast_t reasons)385 ast_on(ast_t reasons)
386 {
387 ast_t *pending_ast = ast_pending();
388
389 *pending_ast |= reasons;
390 }
391
392 /*
393 * Clear AST flags on current processor
394 * Called at splsched
395 */
396 void
ast_off(ast_t reasons)397 ast_off(ast_t reasons)
398 {
399 ast_t *pending_ast = ast_pending();
400
401 *pending_ast &= ~reasons;
402 }
403
404 /*
405 * Consume the requested subset of the AST flags set on the processor
406 * Return the bits that were set
407 * Called at splsched
408 */
409 ast_t
ast_consume(ast_t reasons)410 ast_consume(ast_t reasons)
411 {
412 ast_t *pending_ast = ast_pending();
413
414 reasons &= *pending_ast;
415 *pending_ast &= ~reasons;
416
417 return reasons;
418 }
419
420 /*
421 * Read the requested subset of the AST flags set on the processor
422 * Return the bits that were set, don't modify the processor
423 * Called at splsched
424 */
425 ast_t
ast_peek(ast_t reasons)426 ast_peek(ast_t reasons)
427 {
428 ast_t *pending_ast = ast_pending();
429
430 reasons &= *pending_ast;
431
432 return reasons;
433 }
434
435 /*
436 * Re-set current processor's per-thread AST flags to those set on thread
437 * Called at splsched
438 */
439 void
ast_context(thread_t thread)440 ast_context(thread_t thread)
441 {
442 ast_t *pending_ast = ast_pending();
443
444 *pending_ast = (*pending_ast & ~AST_PER_THREAD) | thread_ast_get(thread);
445 }
446
447 /*
448 * Propagate ASTs set on a thread to the current processor
449 * Called at splsched
450 */
451 void
ast_propagate(thread_t thread)452 ast_propagate(thread_t thread)
453 {
454 ast_on(thread_ast_get(thread));
455 }
456
457 void
ast_dtrace_on(void)458 ast_dtrace_on(void)
459 {
460 ast_on(AST_DTRACE);
461 }
462