xref: /xnu-11215.41.3/osfmk/i386/mp.h (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 /*
58  */
59 #ifdef  KERNEL_PRIVATE
60 
61 #ifndef _I386_MP_H_
62 #define _I386_MP_H_
63 
64 //#define	MP_DEBUG 1
65 
66 #include <i386/apic.h>
67 #include <i386/mp_events.h>
68 #include <machine/limits.h>
69 
70 #define MAX_CPUS        64              /* 8 * sizeof(cpumask_t) */
71 
72 #ifndef ASSEMBLER
73 #include <stdint.h>
74 #include <sys/cdefs.h>
75 #include <mach/boolean.h>
76 #include <mach/kern_return.h>
77 #include <mach/i386/thread_status.h>
78 #include <mach/vm_types.h>
79 #include <kern/simple_lock.h>
80 #include <kern/assert.h>
81 #ifdef XNU_KERNEL_PRIVATE
82 #include <vm/vm_kern_xnu.h>
83 #endif
84 
85 __BEGIN_DECLS
86 
87 extern kern_return_t intel_startCPU(int slot_num);
88 extern kern_return_t intel_startCPU_fast(int slot_num);
89 extern void i386_init_slave(void) __dead2;
90 extern void i386_init_slave_fast(void) __dead2;
91 extern void smp_init(void);
92 
93 extern void cpu_interrupt(int cpu);
94 __END_DECLS
95 
96 extern  unsigned int    real_ncpus;             /* real number of cpus */
97 extern  unsigned int    max_ncpus;              /* max number of cpus */
98 extern  unsigned int    max_cpus_from_firmware; /* actual max cpus, from firmware (ACPI) */
99 decl_simple_lock_data(extern, kdb_lock); /* kdb lock		*/
100 
101 __BEGIN_DECLS
102 
103 extern  void    console_init(void);
104 extern  void    *console_cpu_alloc(boolean_t boot_cpu);
105 extern  void    console_cpu_free(void *console_buf);
106 
107 extern  int     kdb_cpu;                /* current cpu running kdb	*/
108 extern  int     kdb_debug;
109 extern  int     kdb_active[];
110 
111 extern  volatile boolean_t mp_kdp_trap;
112 extern  volatile boolean_t mp_kdp_is_NMI;
113 extern  volatile boolean_t force_immediate_debugger_NMI;
114 extern  volatile boolean_t pmap_tlb_flush_timeout;
115 
116 extern  uint64_t  LastDebuggerEntryAllowance;
117 
118 extern  void      mp_kdp_enter(boolean_t proceed_on_failure, bool is_stackshot);
119 extern  void      mp_kdp_exit(void);
120 extern  boolean_t mp_kdp_all_cpus_halted(void);
121 
122 extern  boolean_t       mp_recent_debugger_activity(void);
123 extern  void    kernel_spin(uint64_t spin_ns);
124 
125 /*
126  * All cpu rendezvous:
127  */
128 extern void mp_rendezvous(
129 	void (*setup_func)(void *),
130 	void (*action_func)(void *),
131 	void (*teardown_func)(void *),
132 	void *arg);
133 extern void mp_rendezvous_no_intrs(
134 	void (*action_func)(void *),
135 	void *arg);
136 extern void mp_rendezvous_break_lock(void);
137 extern void mp_rendezvous_lock(void);
138 extern void mp_rendezvous_unlock(void);
139 
140 /*
141  * All cpu broadcast.
142  * Called from thread context, this blocks until all active cpus have
143  * run action_func:
144  */
145 extern void mp_broadcast(
146 	void (*action_func)(void *),
147 	void *arg);
148 #if MACH_KDP
149 typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu);
150 
151 extern  long kdp_x86_xcpu_invoke(const uint16_t lcpu,
152     kdp_x86_xcpu_func_t func,
153     void *arg0, void *arg1, uint64_t timeout);
154 typedef enum    {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t;
155 #endif
156 
157 typedef uint32_t cpu_t;
158 typedef volatile uint64_t cpumask_t;
159 
160 static_assert(sizeof(cpumask_t) * CHAR_BIT >= MAX_CPUS, "cpumask_t bitvector is too small for current MAX_CPUS value");
161 
162 static inline cpumask_t
cpu_to_cpumask(cpu_t cpu)163 cpu_to_cpumask(cpu_t cpu)
164 {
165 	return (cpu < MAX_CPUS) ? (1ULL << cpu) : 0;
166 }
167 #define CPUMASK_ALL             0xffffffffffffffffULL
168 #define CPUMASK_SELF            cpu_to_cpumask((cpu_t)cpu_number())
169 #define CPUMASK_OTHERS          (CPUMASK_ALL & ~CPUMASK_SELF)
170 #define CPUMASK_REAL_OTHERS     (((1ULL << real_ncpus) - 1) & ~CPUMASK_SELF)
171 
172 /* Initialation routing called at processor registration */
173 extern void mp_cpus_call_cpu_init(int cpu);
174 
175 /*
176  * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
177  * The mask may include the local cpu.
178  * If the mode is:
179  *	- ASYNC:  other cpus make their calls in parallel
180  *      - SYNC:   the calls are performed serially in logical cpu order
181  *      - NOSYNC: the calls are queued
182  * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been
183  * called on all specified cpus.
184  * The return value is the number of cpus where the call was made or queued.
185  * The action function is called with interrupts disabled.
186  */
187 extern cpu_t mp_cpus_call(
188 	cpumask_t       cpus,
189 	mp_sync_t       mode,
190 	void            (*action_func)(void *),
191 	void            *arg);
192 extern cpu_t mp_cpus_call1(
193 	cpumask_t       cpus,
194 	mp_sync_t       mode,
195 	void            (*action_func)(void *, void*),
196 	void            *arg0,
197 	void            *arg1,
198 	cpumask_t       *cpus_calledp);
199 
200 typedef enum {
201 	NONE = 0,
202 	SPINLOCK_TIMEOUT,
203 	TLB_FLUSH_TIMEOUT,
204 	CROSSCALL_TIMEOUT,
205 	INTERRUPT_WATCHDOG,
206 	PTE_CORRUPTION
207 } NMI_reason_t;
208 
209 extern NMI_reason_t NMI_panic_reason;
210 
211 extern void NMIPI_panic(cpumask_t cpus, NMI_reason_t reason);
212 extern long NMI_pte_corruption_callback(void *arg0, void *arg1, uint16_t lcpu);
213 
214 /* Interrupt a set of cpus, forcing an exit out of non-root mode */
215 extern void mp_cpus_kick(cpumask_t cpus);
216 /*
217  * Power-management-specific SPI to:
218  *  - register a callout function, and
219  *  - request the callout (if registered) on a given cpu.
220  */
221 extern void PM_interrupt_register(void (*fn)(void));
222 extern void cpu_PM_interrupt(int cpu);
223 
224 __END_DECLS
225 
226 #if MP_DEBUG
227 typedef struct {
228 	uint64_t        time;
229 	int             cpu;
230 	mp_event_t      event;
231 } cpu_signal_event_t;
232 
233 #define LOG_NENTRIES    100
234 typedef struct {
235 	uint64_t                count[MP_LAST];
236 	int                     next_entry;
237 	cpu_signal_event_t      entry[LOG_NENTRIES];
238 } cpu_signal_event_log_t;
239 
240 extern cpu_signal_event_log_t   *cpu_signal[];
241 extern cpu_signal_event_log_t   *cpu_handle[];
242 
243 #define DBGLOG(log, _cpu, _event) {                                             \
244 	boolean_t		spl = ml_set_interrupts_enabled(FALSE);         \
245 	cpu_signal_event_log_t	*logp = log[cpu_number()];                      \
246 	int			next = logp->next_entry;                        \
247 	cpu_signal_event_t	*eventp = &logp->entry[next];                   \
248                                                                                 \
249 	logp->count[_event]++;                                                  \
250                                                                                 \
251 	eventp->time = rdtsc64();                                               \
252 	eventp->cpu = _cpu;                                                     \
253 	eventp->event = _event;                                                 \
254 	if (next == (LOG_NENTRIES - 1))                                         \
255 	        logp->next_entry = 0;                                           \
256 	else                                                                    \
257 	        logp->next_entry++;                                             \
258                                                                                 \
259 	(void) ml_set_interrupts_enabled(spl);                                  \
260 }
261 
262 #define DBGLOG_CPU_INIT(cpu)    {                                               \
263 	cpu_signal_event_log_t	**sig_logpp = &cpu_signal[cpu];                 \
264 	cpu_signal_event_log_t	**hdl_logpp = &cpu_handle[cpu];                 \
265 	vm_size_t log_size = round_page(sizeof(cpu_signal_event_log_t));        \
266 	kma_flags_t log_flags = KMA_NOFAIL | KMA_KOBJECT |                      \
267 	    KMA_PERMANENT | KMA_ZERO;                                           \
268                                                                                 \
269 	if (*sig_logpp == NULL) {                                               \
270 	        kmem_alloc(kernel_map, (vm_offset_t *)sig_logpp,                \
271 	            log_size, log_flags, VM_KERN_MEMORY_DIAG);                  \
272 	}                                                                       \
273 	if (*sig_logpp == NULL) {                                               \
274 	        kmem_alloc(kernel_map, (vm_offset_t *)hdl_logpp,                \
275 	            log_size, log_flags, VM_KERN_MEMORY_DIAG);                  \
276 	}                                                                       \
277 }
278 #else   /* MP_DEBUG */
279 #define DBGLOG(log, _cpu, _event)
280 #define DBGLOG_CPU_INIT(cpu)
281 #endif  /* MP_DEBUG */
282 
283 #endif  /* ASSEMBLER */
284 
285 #ifdef ASSEMBLER
286 #define i_bit(bit, word)        ((long)(*(word)) & (1L << (bit)))
287 #else
288 __attribute__((always_inline)) static inline long
i_bit_impl(long word,long bit)289 i_bit_impl(long word, long bit)
290 {
291 	long bitmask = 1L << bit;
292 	return word & bitmask;
293 }
294 #define i_bit(bit, word)        i_bit_impl((long)(*(word)), bit)
295 #endif
296 
297 
298 #endif /* _I386_MP_H_ */
299 
300 #endif /* KERNEL_PRIVATE */
301