1 /* 2 * Copyright (c) 2000-2012 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 59 /* 60 * kern/ast.h: Definitions for Asynchronous System Traps. 61 */ 62 63 #ifndef _KERN_AST_H_ 64 #define _KERN_AST_H_ 65 66 67 #include <kern/assert.h> 68 #include <kern/macro_help.h> 69 #include <kern/spl.h> 70 71 /* 72 * A processor detects an AST when it is about to return from an 73 * interrupt context, and calls ast_taken_kernel or ast_taken_user 74 * depending on whether it was returning from userspace or kernelspace. 75 * 76 * Machine-dependent code is responsible for maintaining 77 * a set of reasons for an AST. 78 */ 79 typedef uint32_t ast_t; 80 81 typedef unsigned long ast_gen_t; 82 83 /* 84 * When returning from interrupt/trap context to kernel mode, 85 * if AST_URGENT is set, then ast_taken_kernel is called, for 86 * instance to effect preemption of a kernel thread by a realtime 87 * thread. 88 * 89 * This is also done when re-enabling preemption or re-enabling 90 * interrupts, since an AST may have been set while preemption 91 * was disabled, and it should take effect as soon as possible. 92 * 93 * When returning from interrupt/trap/syscall context to user 94 * mode, any and all ASTs that are pending should be handled by 95 * calling ast_taken_user. 96 * 97 * If a thread context switches, only ASTs not in AST_PER_THREAD 98 * remain active. The per-thread ASTs are stored in the thread_t 99 * and re-enabled when the thread context switches back. 100 * 101 * Typically the preemption ASTs are set as a result of threads 102 * becoming runnable, threads changing priority, or quantum 103 * expiration. If a thread becomes runnable and is chosen 104 * to run on another processor, cause_ast_check() may be called 105 * to IPI that processor and request csw_check() be run there. 106 */ 107 108 /* 109 * Bits for reasons 110 * TODO: Split the context switch and return-to-user AST namespaces 111 * NOTE: Some of these are exported as the 'reason' code in scheduler tracepoints 112 */ 113 #define AST_PREEMPT 0x01 114 #define AST_QUANTUM 0x02 115 #define AST_URGENT 0x04 116 #define AST_HANDOFF 0x08 117 #define AST_YIELD 0x10 118 #define AST_APC 0x20 /* migration APC hook */ 119 #define AST_LEDGER 0x40 120 #define AST_BSD 0x80 121 #define AST_KPERF 0x100 /* kernel profiling */ 122 #define AST_MACF 0x200 /* MACF user ret pending */ 123 #define AST_RESET_PCS 0x400 /* restartable ranges */ 124 #define AST_ARCADE 0x800 /* arcade subsciption support */ 125 #define AST_GUARD 0x1000 126 #define AST_TELEMETRY_USER 0x2000 /* telemetry sample requested on interrupt from userspace */ 127 #define AST_TELEMETRY_KERNEL 0x4000 /* telemetry sample requested on interrupt from kernel */ 128 #define AST_TELEMETRY_PMI 0x8000 /* telemetry sample requested on PMI */ 129 #define AST_SFI 0x10000 /* Evaluate if SFI wait is needed before return to userspace */ 130 #define AST_DTRACE 0x20000 131 #define AST_TELEMETRY_IO 0x40000 /* telemetry sample requested for I/O */ 132 #define AST_KEVENT 0x80000 133 #define AST_REBALANCE 0x100000 /* thread context switched due to rebalancing */ 134 #define AST_UNQUIESCE 0x200000 /* catch unquiesced processor before returning to userspace */ 135 #define AST_PROC_RESOURCE 0x400000 /* port space and/or file descriptor table has reached its limits */ 136 #define AST_DEBUG_ASSERT 0x800000 /* check debug assertion */ 137 #define AST_TELEMETRY_MACF 0x1000000 /* telemetry sample requested by MAC framework */ 138 139 #define AST_NONE 0x00 140 #define AST_ALL (~AST_NONE) 141 142 #define AST_SCHEDULING (AST_PREEMPTION | AST_YIELD | AST_HANDOFF) 143 #define AST_PREEMPTION (AST_PREEMPT | AST_QUANTUM | AST_URGENT) 144 145 #define AST_TELEMETRY_ALL (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL | \ 146 AST_TELEMETRY_PMI | AST_TELEMETRY_IO | AST_TELEMETRY_MACF) 147 148 /* Per-thread ASTs follow the thread at context-switch time. */ 149 #define AST_PER_THREAD (AST_APC | AST_BSD | AST_MACF | AST_RESET_PCS | \ 150 AST_ARCADE | AST_LEDGER | AST_GUARD | AST_TELEMETRY_ALL | AST_KEVENT | AST_PROC_RESOURCE | AST_DEBUG_ASSERT) 151 152 /* Handle AST_URGENT detected while in the kernel */ 153 extern void ast_taken_kernel(void); 154 155 /* Handle an AST flag set while returning to user mode (may continue via thread_exception_return) */ 156 extern void ast_taken_user(void); 157 158 /* Check for pending ASTs */ 159 extern void ast_check(processor_t processor); 160 161 /* Pending ast mask for the current processor */ 162 extern ast_t *ast_pending(void); 163 164 /* Set AST flags on current processor */ 165 extern void ast_on(ast_t reasons); 166 167 /* Clear AST flags on current processor */ 168 extern void ast_off(ast_t reasons); 169 170 /* Consume specified AST flags from current processor */ 171 extern ast_t ast_consume(ast_t reasons); 172 173 /* Read specified AST flags from current processor */ 174 extern ast_t ast_peek(ast_t reasons); 175 176 /* Re-set current processor's per-thread AST flags to those set on thread */ 177 extern void ast_context(thread_t thread); 178 179 /* Propagate ASTs set on a thread to the current processor */ 180 extern void ast_propagate(thread_t thread); 181 182 /* Prepare for an AST "ack" scheme */ 183 extern void ast_generation_get(processor_t processor, ast_gen_t gens[] /* MAX_CPUS */); 184 185 /* Wait for an AST generation "ack" to pass */ 186 extern void ast_generation_wait(ast_gen_t gens[] /* MAX_CPUS */ ); 187 188 /* 189 * Set an AST on a thread with thread_ast_set. 190 * 191 * You can then propagate it to the current processor with ast_propagate(), 192 * or tell another processor to act on it with cause_ast_check(). 193 * 194 * See act_set_ast() for an example. 195 */ 196 #define thread_ast_set(act, reason) ((void)os_atomic_or(&(act)->ast, (reason), relaxed)) 197 #define thread_ast_clear(act, reason) ((void)os_atomic_andnot(&(act)->ast, (reason), relaxed)) 198 #define thread_ast_peek(act, reason) (os_atomic_load(&(act)->ast, relaxed) & (reason)) 199 200 #ifdef MACH_BSD 201 202 extern void act_set_astbsd(thread_t); 203 extern void bsd_ast(thread_t); 204 extern void proc_filedesc_ast(task_t task); 205 206 #endif /* MACH_BSD */ 207 208 #ifdef CONFIG_DTRACE 209 extern void ast_dtrace_on(void); 210 extern void dtrace_ast(void); 211 #endif /* CONFIG_DTRACE */ 212 213 /* These are kept in sync with bsd/kern/ast.h */ 214 #define AST_KEVENT_RETURN_TO_KERNEL 0x0001 215 #define AST_KEVENT_REDRIVE_THREADREQ 0x0002 216 #define AST_KEVENT_WORKQ_QUANTUM_EXPIRED 0x0004 217 218 extern void kevent_ast(thread_t thread, uint16_t bits); 219 extern void act_set_astkevent(thread_t thread, uint16_t bits); 220 extern uint16_t act_clear_astkevent(thread_t thread, uint16_t bits); 221 extern void act_set_ast_reset_pcs(thread_t thread, ast_gen_t array[] /* MAX_CPUS */ ); 222 extern void task_filedesc_ast(task_t task, int current_size, int soft_limit, int hard_limit); 223 extern void act_set_debug_assert(void); 224 225 extern void thread_debug_return_to_user_ast(thread_t thread); 226 #endif /* _KERN_AST_H_ */ 227