xref: /xnu-12377.1.9/osfmk/kern/ast.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #include <kern/ast.h>
58 #include <kern/counter.h>
59 #include <kern/misc_protos.h>
60 #include <kern/queue.h>
61 #include <kern/sched_prim.h>
62 #include <kern/thread.h>
63 #include <kern/processor.h>
64 #include <kern/restartable.h>
65 #include <kern/spl.h>
66 #include <kern/sfi.h>
67 #if CONFIG_TELEMETRY
68 #include <kern/telemetry.h>
69 #endif
70 #include <kern/waitq.h>
71 #include <kern/ledger.h>
72 #include <kern/machine.h>
73 #include <kern/kpc.h>
74 #include <kperf/kperf.h>
75 #include <mach/policy.h>
76 #include <security/mac_mach_internal.h> // for MACF AST hook
77 #include <stdatomic.h>
78 
79 #if CONFIG_ARCADE
80 #include <kern/arcade.h>
81 #endif
82 
83 
84 static void __attribute__((noinline, noreturn, disable_tail_calls))
thread_preempted(__unused void * parameter,__unused wait_result_t result)85 thread_preempted(__unused void* parameter, __unused wait_result_t result)
86 {
87 	/*
88 	 * We've been scheduled again after a userspace preemption,
89 	 * try again to return to userspace.
90 	 */
91 	thread_exception_return();
92 }
93 
94 /*
95  * Create a dedicated frame to clarify that this thread has been preempted
96  * while running in kernel space.
97  */
98 static void __attribute__((noinline, disable_tail_calls))
thread_preempted_in_kernel(ast_t urgent_reason)99 thread_preempted_in_kernel(ast_t urgent_reason)
100 {
101 	thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason);
102 
103 	assert(ml_get_interrupts_enabled() == FALSE);
104 }
105 
106 /*
107  * AST_URGENT was detected while in kernel mode
108  * Called with interrupts disabled, returns the same way
109  * Must return to caller
110  */
111 void
ast_taken_kernel(void)112 ast_taken_kernel(void)
113 {
114 	assert(ml_get_interrupts_enabled() == FALSE);
115 
116 	thread_t thread = current_thread();
117 
118 	/* Idle threads handle preemption themselves */
119 	if ((thread->state & TH_IDLE)) {
120 		ast_off(AST_PREEMPTION);
121 		return;
122 	}
123 
124 	/*
125 	 * It's possible for this to be called after AST_URGENT
126 	 * has already been handled, due to races in enable_preemption
127 	 */
128 	if (ast_peek(AST_URGENT) != AST_URGENT) {
129 		return;
130 	}
131 
132 	/*
133 	 * Don't preempt if the thread is already preparing to block.
134 	 * TODO: the thread can cheese this with clear_wait()
135 	 */
136 	if (waitq_wait_possible(thread) == FALSE) {
137 		/* Consume AST_URGENT or the interrupt will call us again */
138 		ast_consume(AST_URGENT);
139 		return;
140 	}
141 
142 	/* TODO: Should we csw_check again to notice if conditions have changed? */
143 
144 	ast_t urgent_reason = ast_consume(AST_PREEMPTION);
145 
146 	assert(urgent_reason & AST_PREEMPT);
147 
148 	/* We've decided to try context switching */
149 	thread_preempted_in_kernel(urgent_reason);
150 }
151 
152 /*
153  * An AST flag was set while returning to user mode
154  * Called with interrupts disabled, returns with interrupts enabled
155  * May call continuation instead of returning
156  */
157 void
ast_taken_user(void)158 ast_taken_user(void)
159 {
160 	assert(ml_get_interrupts_enabled() == FALSE);
161 
162 	thread_t thread = current_thread();
163 	task_t   task   = get_threadtask(thread);
164 
165 	/* We are about to return to userspace, there must not be a pending wait */
166 	assert(waitq_wait_possible(thread));
167 	assert((thread->state & TH_IDLE) == 0);
168 
169 	/* TODO: Add more 'return to userspace' assertions here */
170 
171 	/*
172 	 * If this thread was urgently preempted in userspace,
173 	 * take the preemption before processing the ASTs.
174 	 * The trap handler will call us again if we have more ASTs, so it's
175 	 * safe to block in a continuation here.
176 	 */
177 	if (ast_peek(AST_URGENT) == AST_URGENT) {
178 		ast_t urgent_reason = ast_consume(AST_PREEMPTION);
179 
180 		assert(urgent_reason & AST_PREEMPT);
181 
182 		/* TODO: Should we csw_check again to notice if conditions have changed? */
183 
184 		thread_block_reason(thread_preempted, NULL, urgent_reason);
185 		/* NOTREACHED */
186 	}
187 
188 	/*
189 	 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
190 	 * on a different processor. Only the ast bit on the thread will be set.
191 	 *
192 	 * Force a propagate for concurrent updates without an IPI.
193 	 */
194 	ast_propagate(thread);
195 
196 	/*
197 	 * Consume all non-preemption processor ASTs matching reasons
198 	 * because we're handling them here.
199 	 *
200 	 * If one of the AST handlers blocks in a continuation,
201 	 * we'll reinstate the unserviced thread-level AST flags
202 	 * from the thread to the processor on context switch.
203 	 * If one of the AST handlers sets another AST,
204 	 * the trap handler will call ast_taken_user again.
205 	 *
206 	 * We expect the AST handlers not to thread_exception_return
207 	 * without an ast_propagate or context switch to reinstate
208 	 * the per-processor ASTs.
209 	 *
210 	 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
211 	 */
212 	ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);
213 
214 	ml_set_interrupts_enabled(TRUE);
215 
216 #if CONFIG_DTRACE
217 	if (reasons & AST_DTRACE) {
218 		dtrace_ast();
219 	}
220 #endif
221 
222 #ifdef MACH_BSD
223 	if (reasons & AST_BSD) {
224 		thread_ast_clear(thread, AST_BSD);
225 		bsd_ast(thread);
226 	}
227 #endif
228 
229 #if CONFIG_MACF
230 	if (reasons & AST_MACF) {
231 		thread_ast_clear(thread, AST_MACF);
232 		mac_thread_userret(thread);
233 	}
234 #endif
235 
236 #if CONFIG_ARCADE
237 	if (reasons & AST_ARCADE) {
238 		thread_ast_clear(thread, AST_ARCADE);
239 		arcade_ast(thread);
240 	}
241 #endif
242 
243 	if (reasons & AST_APC) {
244 		thread_ast_clear(thread, AST_APC);
245 		thread_apc_ast(thread);
246 	}
247 
248 
249 	if (reasons & AST_MACH_EXCEPTION) {
250 		thread_ast_clear(thread, AST_MACH_EXCEPTION);
251 		mach_exception_ast(thread);
252 	}
253 
254 	if (reasons & AST_LEDGER) {
255 		thread_ast_clear(thread, AST_LEDGER);
256 		ledger_ast(thread);
257 	}
258 
259 	if (reasons & AST_KPERF) {
260 		thread_ast_clear(thread, AST_KPERF);
261 #if CONFIG_CPU_COUNTERS
262 		kpc_thread_ast_handler(thread);
263 #endif /* CONFIG_CPU_COUNTERS */
264 		kperf_thread_ast_handler(thread);
265 		thread->kperf_ast = 0;
266 	}
267 
268 	if (reasons & AST_RESET_PCS) {
269 		thread_ast_clear(thread, AST_RESET_PCS);
270 		thread_reset_pcs_ast(task, thread);
271 	}
272 
273 	if (reasons & AST_KEVENT) {
274 		thread_ast_clear(thread, AST_KEVENT);
275 		uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
276 		if (bits) {
277 			kevent_ast(thread, bits);
278 		}
279 	}
280 
281 	if (reasons & AST_PROC_RESOURCE) {
282 		thread_ast_clear(thread, AST_PROC_RESOURCE);
283 		task_port_space_ast(task);
284 #if MACH_BSD
285 		proc_filedesc_ast(task);
286 #endif /* MACH_BSD */
287 	}
288 
289 #if CONFIG_TELEMETRY
290 	if (reasons & AST_TELEMETRY_ALL) {
291 		ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
292 		thread_ast_clear(thread, AST_TELEMETRY_ALL);
293 		telemetry_ast(thread, telemetry_reasons);
294 	}
295 #endif
296 
297 #if MACH_ASSERT
298 	if (reasons & AST_DEBUG_ASSERT) {
299 		thread_ast_clear(thread, AST_DEBUG_ASSERT);
300 		thread_debug_return_to_user_ast(thread);
301 	}
302 #endif
303 
304 	spl_t s = splsched();
305 
306 #if CONFIG_SCHED_SFI
307 	/*
308 	 * SFI is currently a per-processor AST, not a per-thread AST
309 	 *      TODO: SFI should be a per-thread AST
310 	 */
311 	if (ast_consume(AST_SFI) == AST_SFI) {
312 		sfi_ast(thread);
313 	}
314 #endif
315 
316 	/* We are about to return to userspace, there must not be a pending wait */
317 	assert(waitq_wait_possible(thread));
318 
319 	/*
320 	 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
321 	 *
322 	 * We delay reading the preemption bits until now in case the thread
323 	 * blocks while handling per-thread ASTs.
324 	 *
325 	 * If one of the AST handlers had managed to set a new AST bit,
326 	 * thread_exception_return will call ast_taken again.
327 	 */
328 	ast_t preemption_reasons = ast_consume(AST_PREEMPTION);
329 
330 	if (preemption_reasons & AST_PREEMPT) {
331 		/* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */
332 
333 		thread_lock(thread);
334 		preemption_reasons = csw_check(thread, current_processor(), (preemption_reasons & AST_QUANTUM));
335 		thread_unlock(thread);
336 
337 #if CONFIG_SCHED_SFI
338 		/* csw_check might tell us that SFI is needed */
339 		if (preemption_reasons & AST_SFI) {
340 			sfi_ast(thread);
341 		}
342 #endif
343 
344 		if (preemption_reasons & AST_PREEMPT) {
345 			/* switching to a continuation implicitly re-enables interrupts */
346 			thread_block_reason(thread_preempted, NULL, preemption_reasons);
347 			/* NOTREACHED */
348 		}
349 
350 		/*
351 		 * We previously had a pending AST_PREEMPT, but csw_check
352 		 * decided that it should no longer be set, and to keep
353 		 * executing the current thread instead.
354 		 * Clear the pending preemption timer as we no longer
355 		 * have a pending AST_PREEMPT to time out.
356 		 *
357 		 * TODO: just do the thread block if we see AST_PREEMPT
358 		 * to avoid taking the pset lock twice.
359 		 * To do that thread block needs to be smarter
360 		 * about not context switching when it's not necessary
361 		 * e.g. the first-timeslice check for queue has priority
362 		 */
363 		clear_pending_nonurgent_preemption(current_processor());
364 	}
365 
366 	splx(s);
367 
368 	/*
369 	 * Here's a good place to put assertions of things which must be true
370 	 * upon return to userspace.
371 	 */
372 	assert(thread->kern_promotion_schedpri == 0);
373 	if (thread->rwlock_count > 0) {
374 		panic("rwlock_count is %d for thread %p, possibly it still holds a rwlock", thread->rwlock_count, thread);
375 	}
376 	assert(thread->priority_floor_count == 0);
377 
378 	assert3u(0, ==, thread->sched_flags &
379 	    (TH_SFLAG_WAITQ_PROMOTED |
380 	    TH_SFLAG_RW_PROMOTED |
381 	    TH_SFLAG_EXEC_PROMOTED |
382 	    TH_SFLAG_FLOOR_PROMOTED |
383 	    TH_SFLAG_DEPRESS));
384 
385 #if CONFIG_EXCLAVES
386 	assert3u(thread->options & TH_OPT_AOE, ==, 0);
387 #endif /* CONFIG_EXCLAVES */
388 }
389 
390 /*
391  * Set AST flags on current processor
392  * Called at splsched
393  */
394 void
ast_on(ast_t reasons)395 ast_on(ast_t reasons)
396 {
397 	ast_t *pending_ast = ast_pending();
398 
399 	*pending_ast |= reasons;
400 }
401 
402 /*
403  * Clear AST flags on current processor
404  * Called at splsched
405  */
406 void
ast_off(ast_t reasons)407 ast_off(ast_t reasons)
408 {
409 	ast_t *pending_ast = ast_pending();
410 
411 	*pending_ast &= ~reasons;
412 }
413 
414 /*
415  * Consume the requested subset of the AST flags set on the processor
416  * Return the bits that were set
417  * Called at splsched
418  */
419 ast_t
ast_consume(ast_t reasons)420 ast_consume(ast_t reasons)
421 {
422 	ast_t *pending_ast = ast_pending();
423 
424 	reasons &= *pending_ast;
425 	*pending_ast &= ~reasons;
426 
427 	return reasons;
428 }
429 
430 /*
431  * Read the requested subset of the AST flags set on the processor
432  * Return the bits that were set, don't modify the processor
433  * Called at splsched
434  */
435 ast_t
ast_peek(ast_t reasons)436 ast_peek(ast_t reasons)
437 {
438 	ast_t *pending_ast = ast_pending();
439 
440 	reasons &= *pending_ast;
441 
442 	return reasons;
443 }
444 
445 /*
446  * Re-set current processor's per-thread AST flags to those set on thread
447  * Called at splsched
448  */
449 void
ast_context(thread_t thread)450 ast_context(thread_t thread)
451 {
452 	ast_t *pending_ast = ast_pending();
453 
454 	*pending_ast = (*pending_ast & ~AST_PER_THREAD) | thread_ast_get(thread);
455 }
456 
457 /*
458  * Propagate ASTs set on a thread to the current processor
459  * Called at splsched
460  */
461 void
ast_propagate(thread_t thread)462 ast_propagate(thread_t thread)
463 {
464 	ast_on(thread_ast_get(thread));
465 }
466 
467 void
ast_dtrace_on(void)468 ast_dtrace_on(void)
469 {
470 	ast_on(AST_DTRACE);
471 }
472