xref: /xnu-12377.81.4/osfmk/kern/ast.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #include <kern/ast.h>
58 #include <kern/counter.h>
59 #include <kern/misc_protos.h>
60 #include <kern/queue.h>
61 #include <kern/sched_prim.h>
62 #include <kern/thread.h>
63 #include <kern/processor.h>
64 #include <kern/restartable.h>
65 #include <kern/spl.h>
66 #include <kern/sfi.h>
67 #if CONFIG_TELEMETRY
68 #include <kern/telemetry.h>
69 #endif
70 #include <kern/waitq.h>
71 #include <kern/ledger.h>
72 #include <kern/machine.h>
73 #include <kern/kpc.h>
74 #include <kperf/kperf.h>
75 #include <mach/policy.h>
76 #include <security/mac_mach_internal.h> // for MACF AST hook
77 #include <vm/vm_map_xnu.h> // for vm_map_enter_large_telemetry_ast
78 #include <stdatomic.h>
79 
80 #if CONFIG_ARCADE
81 #include <kern/arcade.h>
82 #endif
83 
84 static inline __attribute__((always_inline)) void handle_user_asts_interrupts_enabled(ast_t reasons, thread_t thread, task_t task);
85 static inline __attribute__((always_inline)) void assert_thread_return_to_user(thread_t thread);
86 
87 static void __attribute__((noinline, noreturn, disable_tail_calls))
thread_preempted(__unused void * parameter,__unused wait_result_t result)88 thread_preempted(__unused void* parameter, __unused wait_result_t result)
89 {
90 	/*
91 	 * We've been scheduled again after a userspace preemption,
92 	 * try again to return to userspace.
93 	 */
94 	thread_exception_return();
95 }
96 
97 /*
98  * Create a dedicated frame to clarify that this thread has been preempted
99  * while running in kernel space.
100  */
101 static void __attribute__((noinline, disable_tail_calls))
thread_preempted_in_kernel(ast_t urgent_reason)102 thread_preempted_in_kernel(ast_t urgent_reason)
103 {
104 	thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason);
105 
106 	assert(ml_get_interrupts_enabled() == FALSE);
107 }
108 
109 /*
110  * AST_URGENT was detected while in kernel mode
111  * Called with interrupts disabled, returns the same way
112  * Must return to caller
113  */
114 void
ast_taken_kernel(void)115 ast_taken_kernel(void)
116 {
117 	assert(ml_get_interrupts_enabled() == FALSE);
118 
119 	thread_t thread = current_thread();
120 
121 	/* Idle threads handle preemption themselves */
122 	if ((thread->state & TH_IDLE)) {
123 		ast_off(AST_PREEMPTION);
124 		return;
125 	}
126 
127 	/*
128 	 * It's possible for this to be called after AST_URGENT
129 	 * has already been handled, due to races in enable_preemption
130 	 */
131 	if (ast_peek(AST_URGENT) != AST_URGENT) {
132 		return;
133 	}
134 
135 	/*
136 	 * Don't preempt if the thread is already preparing to block.
137 	 * TODO: the thread can cheese this with clear_wait()
138 	 */
139 	if (waitq_wait_possible(thread) == FALSE) {
140 		/* Consume AST_URGENT or the interrupt will call us again */
141 		ast_consume(AST_URGENT);
142 		return;
143 	}
144 
145 	/* TODO: Should we csw_check again to notice if conditions have changed? */
146 
147 	ast_t urgent_reason = ast_consume(AST_PREEMPTION);
148 
149 	assert(urgent_reason & AST_PREEMPT);
150 
151 	/* We've decided to try context switching */
152 	thread_preempted_in_kernel(urgent_reason);
153 }
154 
155 /*
156  * An AST flag was set while returning to user mode
157  * Called with interrupts disabled, returns with interrupts enabled
158  * May call continuation instead of returning
159  */
160 void
ast_taken_user(void)161 ast_taken_user(void)
162 {
163 	assert(ml_get_interrupts_enabled() == FALSE);
164 
165 	thread_t thread = current_thread();
166 	task_t   task   = get_threadtask(thread);
167 
168 	/* We are about to return to userspace, there must not be a pending wait */
169 	assert(waitq_wait_possible(thread));
170 	assert((thread->state & TH_IDLE) == 0);
171 
172 	/* TODO: Add more 'return to userspace' assertions here */
173 
174 	/*
175 	 * If this thread was urgently preempted in userspace,
176 	 * take the preemption before processing the ASTs.
177 	 * The trap handler will call us again if we have more ASTs, so it's
178 	 * safe to block in a continuation here.
179 	 */
180 	if (ast_peek(AST_URGENT) == AST_URGENT) {
181 		ast_t urgent_reason = ast_consume(AST_PREEMPTION);
182 
183 		assert(urgent_reason & AST_PREEMPT);
184 
185 		/* TODO: Should we csw_check again to notice if conditions have changed? */
186 
187 		thread_block_reason(thread_preempted, NULL, urgent_reason);
188 		/* NOTREACHED */
189 	}
190 
191 	/*
192 	 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
193 	 * on a different processor. Only the ast bit on the thread will be set.
194 	 *
195 	 * Force a propagate for concurrent updates without an IPI.
196 	 */
197 	ast_propagate(thread);
198 
199 	/*
200 	 * Consume all non-preemption processor ASTs matching reasons
201 	 * because we're handling them here.
202 	 *
203 	 * If one of the AST handlers blocks in a continuation,
204 	 * we'll reinstate the unserviced thread-level AST flags
205 	 * from the thread to the processor on context switch.
206 	 * If one of the AST handlers sets another AST,
207 	 * the trap handler will call ast_taken_user again.
208 	 *
209 	 * We expect the AST handlers not to thread_exception_return
210 	 * without an ast_propagate or context switch to reinstate
211 	 * the per-processor ASTs.
212 	 *
213 	 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
214 	 */
215 	ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);
216 
217 	ml_set_interrupts_enabled(TRUE);
218 
219 	handle_user_asts_interrupts_enabled(reasons, thread, task);
220 
221 	spl_t s = splsched();
222 
223 #if CONFIG_SCHED_SFI
224 	/*
225 	 * SFI is currently a per-processor AST, not a per-thread AST
226 	 *      TODO: SFI should be a per-thread AST
227 	 */
228 	if (ast_consume(AST_SFI) == AST_SFI) {
229 		sfi_ast(thread);
230 	}
231 #endif
232 
233 	/* We are about to return to userspace, there must not be a pending wait */
234 	assert(waitq_wait_possible(thread));
235 
236 	/*
237 	 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
238 	 *
239 	 * We delay reading the preemption bits until now in case the thread
240 	 * blocks while handling per-thread ASTs.
241 	 *
242 	 * If one of the AST handlers had managed to set a new AST bit,
243 	 * thread_exception_return will call ast_taken again.
244 	 */
245 	ast_t preemption_reasons = ast_consume(AST_PREEMPTION);
246 
247 	if (preemption_reasons & AST_PREEMPT) {
248 		/* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */
249 
250 		thread_lock(thread);
251 		preemption_reasons = csw_check(thread, current_processor(), (preemption_reasons & AST_QUANTUM));
252 		thread_unlock(thread);
253 
254 #if CONFIG_SCHED_SFI
255 		/* csw_check might tell us that SFI is needed */
256 		if (preemption_reasons & AST_SFI) {
257 			sfi_ast(thread);
258 		}
259 #endif
260 
261 		if (preemption_reasons & AST_PREEMPT) {
262 			/* switching to a continuation implicitly re-enables interrupts */
263 			thread_block_reason(thread_preempted, NULL, preemption_reasons);
264 			/* NOTREACHED */
265 		}
266 
267 		/*
268 		 * We previously had a pending AST_PREEMPT, but csw_check
269 		 * decided that it should no longer be set, and to keep
270 		 * executing the current thread instead.
271 		 * Clear the pending preemption timer as we no longer
272 		 * have a pending AST_PREEMPT to time out.
273 		 *
274 		 * TODO: just do the thread block if we see AST_PREEMPT
275 		 * to avoid taking the pset lock twice.
276 		 * To do that thread block needs to be smarter
277 		 * about not context switching when it's not necessary
278 		 * e.g. the first-timeslice check for queue has priority
279 		 */
280 		clear_pending_nonurgent_preemption(current_processor());
281 	}
282 
283 	splx(s);
284 
285 	/*
286 	 * Here's a good place to put assertions of things which must be true
287 	 * upon return to userspace.
288 	 */
289 	assert_thread_return_to_user(thread);
290 }
291 
292 static inline void
handle_user_asts_interrupts_enabled(ast_t reasons,thread_t thread,task_t task)293 handle_user_asts_interrupts_enabled(ast_t reasons, thread_t thread, task_t task)
294 {
295 #if CONFIG_DTRACE
296 	if (reasons & AST_DTRACE) {
297 		dtrace_ast();
298 	}
299 #endif
300 
301 #ifdef MACH_BSD
302 	if (reasons & AST_BSD) {
303 		thread_ast_clear(thread, AST_BSD);
304 		bsd_ast(thread);
305 	}
306 #endif
307 
308 #if CONFIG_MACF
309 	if (reasons & AST_MACF) {
310 		thread_ast_clear(thread, AST_MACF);
311 		mac_thread_userret(thread);
312 	}
313 #endif
314 
315 #if CONFIG_ARCADE
316 	if (reasons & AST_ARCADE) {
317 		thread_ast_clear(thread, AST_ARCADE);
318 		arcade_ast(thread);
319 	}
320 #endif
321 
322 	if (reasons & AST_APC) {
323 		thread_ast_clear(thread, AST_APC);
324 		thread_apc_ast(thread);
325 	}
326 
327 #if HAS_MTE
328 	if (reasons & AST_SYNTHESIZE_MACH) {
329 		extern void mte_synthesize_async_tag_check_fault(thread_t thread, vm_map_t map);
330 		thread_ast_clear(thread, AST_SYNTHESIZE_MACH);
331 		mte_synthesize_async_tag_check_fault(thread, get_threadtask(thread)->map);
332 	}
333 #endif /* HAS_MTE */
334 
335 #if CONFIG_LARGE_SIZE_TELEMETRY
336 	if (reasons & AST_LARGE_ENTER_TELEMETRY) {
337 		thread_ast_clear(thread, AST_LARGE_ENTER_TELEMETRY);
338 		vm_map_enter_large_telemetry_ast();
339 	}
340 #endif /* CONFIG_LARGE_SIZE_TELEMETRY */
341 
342 	if (reasons & AST_MACH_EXCEPTION) {
343 		thread_ast_clear(thread, AST_MACH_EXCEPTION);
344 		mach_exception_ast(thread);
345 	}
346 
347 	if (reasons & AST_LEDGER) {
348 		thread_ast_clear(thread, AST_LEDGER);
349 		ledger_ast(thread);
350 	}
351 
352 	if (reasons & AST_KPERF) {
353 		thread_ast_clear(thread, AST_KPERF);
354 #if CONFIG_CPU_COUNTERS
355 		kpc_thread_ast_handler(thread);
356 #endif /* CONFIG_CPU_COUNTERS */
357 		kperf_thread_ast_handler(thread);
358 		thread->kperf_ast = 0;
359 	}
360 
361 	if (reasons & AST_RESET_PCS) {
362 		thread_ast_clear(thread, AST_RESET_PCS);
363 		thread_reset_pcs_ast(task, thread);
364 	}
365 
366 	if (reasons & AST_KEVENT) {
367 		thread_ast_clear(thread, AST_KEVENT);
368 		uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
369 		if (bits) {
370 			kevent_ast(thread, bits);
371 		}
372 	}
373 
374 	if (reasons & AST_PROC_RESOURCE) {
375 		thread_ast_clear(thread, AST_PROC_RESOURCE);
376 		task_port_space_ast(task);
377 #if MACH_BSD
378 		proc_filedesc_ast(task);
379 #endif /* MACH_BSD */
380 	}
381 
382 #if CONFIG_TELEMETRY
383 	if (reasons & AST_TELEMETRY_ALL) {
384 		ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
385 		thread_ast_clear(thread, AST_TELEMETRY_ALL);
386 		telemetry_ast(thread, telemetry_reasons);
387 	}
388 #endif
389 
390 #if MACH_ASSERT
391 	if (reasons & AST_DEBUG_ASSERT) {
392 		thread_ast_clear(thread, AST_DEBUG_ASSERT);
393 		thread_debug_return_to_user_ast(thread);
394 	}
395 #endif
396 }
397 
398 static inline void
assert_thread_return_to_user(thread_t thread)399 assert_thread_return_to_user(thread_t thread)
400 {
401 	assert(thread->kern_promotion_schedpri == 0);
402 	if (thread->rwlock_count > 0) {
403 		panic("rwlock_count is %d for thread %p, possibly it still holds a rwlock", thread->rwlock_count, thread);
404 	}
405 	assert(thread->priority_floor_count == 0);
406 
407 	assert3u(0, ==, thread->sched_flags &
408 	    (TH_SFLAG_WAITQ_PROMOTED |
409 	    TH_SFLAG_RW_PROMOTED |
410 	    TH_SFLAG_EXEC_PROMOTED |
411 	    TH_SFLAG_FLOOR_PROMOTED |
412 	    TH_SFLAG_DEPRESS));
413 
414 #if CONFIG_EXCLAVES
415 	assert3u(thread->options & TH_OPT_AOE, ==, 0);
416 #endif /* CONFIG_EXCLAVES */
417 }
418 
419 #define ASYNC_THREAD_ASTS_HANDLED (AST_MACH_EXCEPTION | AST_DTRACE | AST_TELEMETRY_ALL | AST_KPERF | AST_DEBUG_ASSERT)
420 
421 /*
422  * Check if ASTs need to be handled for threads that do work on other threads (currently
423  * aio threads).
424  * Called and returns with interrupts enabled
425  */
426 void
ast_check_async_thread(void)427 ast_check_async_thread(void)
428 {
429 	thread_t thread = current_thread();
430 	task_t   task   = get_threadtask(thread);
431 
432 	assert(ml_get_interrupts_enabled() == TRUE);
433 
434 	for (;;) {
435 		spl_t s = splsched();
436 		ast_t reasons = ast_consume(ASYNC_THREAD_ASTS_HANDLED);
437 		splx(s);
438 
439 		if (!(reasons & ASYNC_THREAD_ASTS_HANDLED)) {
440 			break;
441 		}
442 
443 		handle_user_asts_interrupts_enabled(reasons & ASYNC_THREAD_ASTS_HANDLED, thread,
444 		    task);
445 
446 		assert_thread_return_to_user(thread);
447 	}
448 }
449 
450 /*
451  * Set AST flags on current processor
452  * Called at splsched
453  */
454 void
ast_on(ast_t reasons)455 ast_on(ast_t reasons)
456 {
457 	ast_t *pending_ast = ast_pending();
458 
459 	*pending_ast |= reasons;
460 }
461 
462 /*
463  * Clear AST flags on current processor
464  * Called at splsched
465  */
466 void
ast_off(ast_t reasons)467 ast_off(ast_t reasons)
468 {
469 	ast_t *pending_ast = ast_pending();
470 
471 	*pending_ast &= ~reasons;
472 }
473 
474 /*
475  * Consume the requested subset of the AST flags set on the processor
476  * Return the bits that were set
477  * Called at splsched
478  */
479 ast_t
ast_consume(ast_t reasons)480 ast_consume(ast_t reasons)
481 {
482 	ast_t *pending_ast = ast_pending();
483 
484 	reasons &= *pending_ast;
485 	*pending_ast &= ~reasons;
486 
487 	return reasons;
488 }
489 
490 /*
491  * Read the requested subset of the AST flags set on the processor
492  * Return the bits that were set, don't modify the processor
493  * Called at splsched
494  */
495 ast_t
ast_peek(ast_t reasons)496 ast_peek(ast_t reasons)
497 {
498 	ast_t *pending_ast = ast_pending();
499 
500 	reasons &= *pending_ast;
501 
502 	return reasons;
503 }
504 
505 /*
506  * Re-set current processor's per-thread AST flags to those set on thread
507  * Called at splsched
508  */
509 void
ast_context(thread_t thread)510 ast_context(thread_t thread)
511 {
512 	ast_t *pending_ast = ast_pending();
513 
514 	*pending_ast = (*pending_ast & ~AST_PER_THREAD) | thread_ast_get(thread);
515 }
516 
517 /*
518  * Propagate ASTs set on a thread to the current processor
519  * Called at splsched
520  */
521 void
ast_propagate(thread_t thread)522 ast_propagate(thread_t thread)
523 {
524 	ast_on(thread_ast_get(thread));
525 }
526 
527 void
ast_dtrace_on(void)528 ast_dtrace_on(void)
529 {
530 	ast_on(AST_DTRACE);
531 }
532