1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59 #ifdef KERNEL_PRIVATE
60
61 #ifndef _I386_MP_H_
62 #define _I386_MP_H_
63
64 //#define MP_DEBUG 1
65
66 #include <i386/apic.h>
67 #include <i386/mp_events.h>
68 #include <machine/limits.h>
69
70 #define MAX_CPUS 64 /* 8 * sizeof(cpumask_t) */
71
72 #ifndef ASSEMBLER
73 #include <stdint.h>
74 #include <sys/cdefs.h>
75 #include <mach/boolean.h>
76 #include <mach/kern_return.h>
77 #include <mach/i386/thread_status.h>
78 #include <mach/vm_types.h>
79 #include <kern/simple_lock.h>
80 #include <kern/assert.h>
81
82 __BEGIN_DECLS
83
84 extern kern_return_t intel_startCPU(int slot_num);
85 extern kern_return_t intel_startCPU_fast(int slot_num);
86 extern void i386_init_slave(void) __dead2;
87 extern void i386_init_slave_fast(void) __dead2;
88 extern void smp_init(void);
89
90 extern void cpu_interrupt(int cpu);
91 __END_DECLS
92
93 extern unsigned int real_ncpus; /* real number of cpus */
94 extern unsigned int max_ncpus; /* max number of cpus */
95 extern unsigned int max_cpus_from_firmware; /* actual max cpus, from firmware (ACPI) */
96 decl_simple_lock_data(extern, kdb_lock); /* kdb lock */
97
98 __BEGIN_DECLS
99
100 extern void console_init(void);
101 extern void *console_cpu_alloc(boolean_t boot_cpu);
102 extern void console_cpu_free(void *console_buf);
103
104 extern int kdb_cpu; /* current cpu running kdb */
105 extern int kdb_debug;
106 extern int kdb_active[];
107
108 extern volatile boolean_t mp_kdp_trap;
109 extern volatile boolean_t mp_kdp_is_NMI;
110 extern volatile boolean_t force_immediate_debugger_NMI;
111 extern volatile boolean_t pmap_tlb_flush_timeout;
112
113 extern uint64_t LastDebuggerEntryAllowance;
114
115 extern void mp_kdp_enter(boolean_t proceed_on_failure);
116 extern void mp_kdp_exit(void);
117 extern boolean_t mp_kdp_all_cpus_halted(void);
118
119 extern boolean_t mp_recent_debugger_activity(void);
120 extern void kernel_spin(uint64_t spin_ns);
121
122 /*
123 * All cpu rendezvous:
124 */
125 extern void mp_rendezvous(
126 void (*setup_func)(void *),
127 void (*action_func)(void *),
128 void (*teardown_func)(void *),
129 void *arg);
130 extern void mp_rendezvous_no_intrs(
131 void (*action_func)(void *),
132 void *arg);
133 extern void mp_rendezvous_break_lock(void);
134 extern void mp_rendezvous_lock(void);
135 extern void mp_rendezvous_unlock(void);
136
137 /*
138 * All cpu broadcast.
139 * Called from thread context, this blocks until all active cpus have
140 * run action_func:
141 */
142 extern void mp_broadcast(
143 void (*action_func)(void *),
144 void *arg);
145 #if MACH_KDP
146 typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu);
147
148 extern long kdp_x86_xcpu_invoke(const uint16_t lcpu,
149 kdp_x86_xcpu_func_t func,
150 void *arg0, void *arg1, uint64_t timeout);
151 typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t;
152 #endif
153
154 typedef uint32_t cpu_t;
155 typedef volatile uint64_t cpumask_t;
156
157 static_assert(sizeof(cpumask_t) * CHAR_BIT >= MAX_CPUS, "cpumask_t bitvector is too small for current MAX_CPUS value");
158
159 static inline cpumask_t
cpu_to_cpumask(cpu_t cpu)160 cpu_to_cpumask(cpu_t cpu)
161 {
162 return (cpu < MAX_CPUS) ? (1ULL << cpu) : 0;
163 }
164 #define CPUMASK_ALL 0xffffffffffffffffULL
165 #define CPUMASK_SELF cpu_to_cpumask((cpu_t)cpu_number())
166 #define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF)
167 #define CPUMASK_REAL_OTHERS (((1ULL << real_ncpus) - 1) & ~CPUMASK_SELF)
168
169 /* Initialation routing called at processor registration */
170 extern void mp_cpus_call_cpu_init(int cpu);
171
172 /*
173 * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
174 * The mask may include the local cpu.
175 * If the mode is:
176 * - ASYNC: other cpus make their calls in parallel
177 * - SYNC: the calls are performed serially in logical cpu order
178 * - NOSYNC: the calls are queued
179 * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been
180 * called on all specified cpus.
181 * The return value is the number of cpus where the call was made or queued.
182 * The action function is called with interrupts disabled.
183 */
184 extern cpu_t mp_cpus_call(
185 cpumask_t cpus,
186 mp_sync_t mode,
187 void (*action_func)(void *),
188 void *arg);
189 extern cpu_t mp_cpus_call1(
190 cpumask_t cpus,
191 mp_sync_t mode,
192 void (*action_func)(void *, void*),
193 void *arg0,
194 void *arg1,
195 cpumask_t *cpus_calledp);
196
197 typedef enum {
198 NONE = 0,
199 SPINLOCK_TIMEOUT,
200 TLB_FLUSH_TIMEOUT,
201 CROSSCALL_TIMEOUT,
202 INTERRUPT_WATCHDOG,
203 PTE_CORRUPTION
204 } NMI_reason_t;
205
206 extern NMI_reason_t NMI_panic_reason;
207
208 extern void NMIPI_panic(cpumask_t cpus, NMI_reason_t reason);
209 extern long NMI_pte_corruption_callback(void *arg0, void *arg1, uint16_t lcpu);
210
211 /* Interrupt a set of cpus, forcing an exit out of non-root mode */
212 extern void mp_cpus_kick(cpumask_t cpus);
213 /*
214 * Power-management-specific SPI to:
215 * - register a callout function, and
216 * - request the callout (if registered) on a given cpu.
217 */
218 extern void PM_interrupt_register(void (*fn)(void));
219 extern void cpu_PM_interrupt(int cpu);
220
221 __END_DECLS
222
223 #if MP_DEBUG
224 typedef struct {
225 uint64_t time;
226 int cpu;
227 mp_event_t event;
228 } cpu_signal_event_t;
229
230 #define LOG_NENTRIES 100
231 typedef struct {
232 uint64_t count[MP_LAST];
233 int next_entry;
234 cpu_signal_event_t entry[LOG_NENTRIES];
235 } cpu_signal_event_log_t;
236
237 extern cpu_signal_event_log_t *cpu_signal[];
238 extern cpu_signal_event_log_t *cpu_handle[];
239
240 #define DBGLOG(log, _cpu, _event) { \
241 boolean_t spl = ml_set_interrupts_enabled(FALSE); \
242 cpu_signal_event_log_t *logp = log[cpu_number()]; \
243 int next = logp->next_entry; \
244 cpu_signal_event_t *eventp = &logp->entry[next]; \
245 \
246 logp->count[_event]++; \
247 \
248 eventp->time = rdtsc64(); \
249 eventp->cpu = _cpu; \
250 eventp->event = _event; \
251 if (next == (LOG_NENTRIES - 1)) \
252 logp->next_entry = 0; \
253 else \
254 logp->next_entry++; \
255 \
256 (void) ml_set_interrupts_enabled(spl); \
257 }
258
259 #define DBGLOG_CPU_INIT(cpu) { \
260 cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \
261 cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \
262 vm_size_t log_size = round_page(sizeof(cpu_signal_event_log_t)); \
263 kma_flags_t log_flags = KMA_NOFAIL | KMA_KOBJECT | \
264 KMA_PERMANENT | KMA_ZERO; \
265 \
266 if (*sig_logpp == NULL) { \
267 kmem_alloc(kernel_map, (vm_offset_t *)sig_logpp, \
268 log_size, log_flags, VM_KERN_MEMORY_DIAG); \
269 } \
270 if (*sig_logpp == NULL) { \
271 kmem_alloc(kernel_map, (vm_offset_t *)hdl_logpp, \
272 log_size, log_flags, VM_KERN_MEMORY_DIAG); \
273 } \
274 }
275 #else /* MP_DEBUG */
276 #define DBGLOG(log, _cpu, _event)
277 #define DBGLOG_CPU_INIT(cpu)
278 #endif /* MP_DEBUG */
279
280 #endif /* ASSEMBLER */
281
282 #ifdef ASSEMBLER
283 #define i_bit(bit, word) ((long)(*(word)) & (1L << (bit)))
284 #else
285 __attribute__((always_inline)) static inline long
i_bit_impl(long word,long bit)286 i_bit_impl(long word, long bit)
287 {
288 long bitmask = 1L << bit;
289 return word & bitmask;
290 }
291 #define i_bit(bit, word) i_bit_impl((long)(*(word)), bit)
292 #endif
293
294
295 #endif /* _I386_MP_H_ */
296
297 #endif /* KERNEL_PRIVATE */
298