1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #ifndef _I386_MACHINE_ROUTINES_H_
33 #define _I386_MACHINE_ROUTINES_H_
34
35 #include <mach/mach_types.h>
36 #include <mach/boolean.h>
37 #include <kern/kern_types.h>
38 #include <pexpert/pexpert.h>
39
40 #include <sys/cdefs.h>
41 #include <sys/appleapiopts.h>
42
43 #include <stdarg.h>
44
45 __BEGIN_DECLS
46
47 #ifdef XNU_KERNEL_PRIVATE
48
49 /* are we a 64 bit platform ? */
50
51 boolean_t ml_is64bit(void);
52
53 /* is this a 64bit thread? */
54
55 boolean_t ml_thread_is64bit(thread_t);
56
57 /* is this a 64bit thread? */
58
59 boolean_t ml_state_is64bit(void *);
60
61 /* set state of fpu save area for signal handling */
62
63 void ml_fp_setvalid(boolean_t);
64
65 void ml_cpu_set_ldt(int);
66
67 /* Interrupt handling */
68
69 /* Initialize Interrupts */
70 void ml_init_interrupt(void);
71
72 /* Generate a fake interrupt */
73 void ml_cause_interrupt(void);
74
75 /* Initialize Interrupts */
76 void ml_install_interrupt_handler(
77 void *nub,
78 int source,
79 void *target,
80 IOInterruptHandler handler,
81 void *refCon);
82
83 uint64_t ml_get_timebase(void);
84 uint64_t ml_get_timebase_entropy(void);
85
86 #if MACH_KERNEL_PRIVATE
87 /**
88 * Issue a barrier that guarantees all prior memory accesses will complete
89 * before any subsequent timebase reads.
90 */
91 static inline void
ml_memory_to_timebase_fence(void)92 ml_memory_to_timebase_fence(void)
93 {
94 /*
95 * No-op on x86. mach_absolute_time() & co. have load and lfence
96 * instructions that already guarantee this ordering.
97 */
98 }
99
100 /**
101 * Issue a barrier that guarantees all prior timebase reads will
102 * be ordered before any subsequent memory accesses.
103 */
104 static inline void
ml_timebase_to_memory_fence(void)105 ml_timebase_to_memory_fence(void)
106 {
107 }
108 #endif /* MACH_KERNEL_PRIVATE */
109
110 void ml_init_delay_spin_threshold(int);
111
112 boolean_t ml_delay_should_spin(uint64_t interval);
113
114 extern void ml_delay_on_yield(void);
115
116 vm_offset_t
117 ml_static_ptovirt(
118 vm_offset_t);
119
120 void ml_static_mfree(
121 vm_offset_t,
122 vm_size_t);
123
124 kern_return_t
125 ml_static_protect(
126 vm_offset_t start,
127 vm_size_t size,
128 vm_prot_t new_prot);
129
130 kern_return_t
131 ml_static_verify_page_protections(
132 uint64_t base, uint64_t size, vm_prot_t prot);
133
134 /* virtual to physical on wired pages */
135 vm_offset_t ml_vtophys(
136 vm_offset_t vaddr);
137
138 vm_size_t ml_nofault_copy(
139 vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size);
140
141 boolean_t ml_validate_nofault(
142 vm_offset_t virtsrc, vm_size_t size);
143
144 /* Machine topology info */
145 typedef enum {
146 CLUSTER_TYPE_SMP,
147 MAX_CPU_TYPES,
148 } cluster_type_t;
149
150 uint64_t ml_cpu_cache_size(unsigned int level);
151
152 /* Set the maximum number of CPUs */
153 void ml_set_max_cpus(
154 unsigned int max_cpus);
155
156 extern void ml_cpu_init_completed(void);
157 extern void ml_cpu_up(void);
158 extern void ml_cpu_down(void);
159 extern void ml_cpu_up_update_counts(int cpu_id);
160 extern void ml_cpu_down_update_counts(int cpu_id);
161
162 void bzero_phys_nc(
163 addr64_t phys_address,
164 uint32_t length);
165 extern uint32_t interrupt_timer_coalescing_enabled;
166 extern uint32_t idle_entry_timer_processing_hdeadline_threshold;
167
168 #if TCOAL_INSTRUMENT
169 #define TCOAL_DEBUG KERNEL_DEBUG_CONSTANT
170 #else
171 #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0)
172 #endif /* TCOAL_INSTRUMENT */
173
174 #if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE)
175 /* IO memory map services */
176
177 extern vm_offset_t io_map(
178 vm_map_offset_t phys_addr,
179 vm_size_t size,
180 unsigned int flags,
181 vm_prot_t prot,
182 bool unmappable);
183
184 /* Map memory map IO space */
185 vm_offset_t ml_io_map(
186 vm_offset_t phys_addr,
187 vm_size_t size);
188
189 vm_offset_t ml_io_map_wcomb(
190 vm_offset_t phys_addr,
191 vm_size_t size);
192
193 vm_offset_t ml_io_map_unmappable(
194 vm_offset_t phys_addr,
195 vm_size_t size,
196 unsigned int flags);
197
198 void ml_get_bouncepool_info(
199 vm_offset_t *phys_addr,
200 vm_size_t *size);
201 /* Indicates if spinlock, IPI and other timeouts should be suspended */
202 boolean_t machine_timeout_suspended(void);
203 void plctrace_disable(void);
204 #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */
205
206 /* Warm up a CPU to receive an interrupt */
207 kern_return_t ml_interrupt_prewarm(uint64_t deadline);
208
209 /* Machine layer routine for intercepting panics */
210 __printflike(1, 0)
211 void ml_panic_trap_to_debugger(const char *panic_format_str,
212 va_list *panic_args,
213 unsigned int reason,
214 void *ctx,
215 uint64_t panic_options_mask,
216 unsigned long panic_caller,
217 const char *panic_initiator);
218 #endif /* XNU_KERNEL_PRIVATE */
219
220 #ifdef KERNEL_PRIVATE
221
222 /* Type for the Time Base Enable function */
223 typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable);
224
225 /* Type for the IPI Hander */
226 typedef void (*ipi_handler_t)(void);
227
228 /* Struct for ml_processor_register */
229 struct ml_processor_info {
230 cpu_id_t cpu_id;
231 boolean_t boot_cpu;
232 vm_offset_t start_paddr;
233 boolean_t supports_nap;
234 unsigned long l2cr_value;
235 time_base_enable_t time_base_enable;
236 };
237
238 typedef struct ml_processor_info ml_processor_info_t;
239
240
241 /* Register a processor */
242 kern_return_t
243 ml_processor_register(
244 cpu_id_t cpu_id,
245 uint32_t lapic_id,
246 processor_t *processor_out,
247 boolean_t boot_cpu,
248 boolean_t start );
249
250 /* PCI config cycle probing */
251 boolean_t ml_probe_read(
252 vm_offset_t paddr,
253 unsigned int *val);
254 boolean_t ml_probe_read_64(
255 addr64_t paddr,
256 unsigned int *val);
257
258 /* Read physical address byte */
259 unsigned int ml_phys_read_byte(
260 vm_offset_t paddr);
261 unsigned int ml_phys_read_byte_64(
262 addr64_t paddr);
263
264 /* Read physical address half word */
265 unsigned int ml_phys_read_half(
266 vm_offset_t paddr);
267 unsigned int ml_phys_read_half_64(
268 addr64_t paddr);
269
270 /* Read physical address word*/
271 unsigned int ml_phys_read(
272 vm_offset_t paddr);
273 unsigned int ml_phys_read_64(
274 addr64_t paddr);
275 unsigned int ml_phys_read_word(
276 vm_offset_t paddr);
277 unsigned int ml_phys_read_word_64(
278 addr64_t paddr);
279
280 /* Read physical address double word */
281 unsigned long long ml_phys_read_double(
282 vm_offset_t paddr);
283 unsigned long long ml_phys_read_double_64(
284 addr64_t paddr);
285
286 extern uint32_t ml_port_io_read(uint16_t ioport, int size);
287 extern uint8_t ml_port_io_read8(uint16_t ioport);
288 extern uint16_t ml_port_io_read16(uint16_t ioport);
289 extern uint32_t ml_port_io_read32(uint16_t ioport);
290 extern void ml_port_io_write(uint16_t ioport, uint32_t val, int size);
291 extern void ml_port_io_write8(uint16_t ioport, uint8_t val);
292 extern void ml_port_io_write16(uint16_t ioport, uint16_t val);
293 extern void ml_port_io_write32(uint16_t ioport, uint32_t val);
294
295 /* Write physical address byte */
296 void ml_phys_write_byte(
297 vm_offset_t paddr, unsigned int data);
298 void ml_phys_write_byte_64(
299 addr64_t paddr, unsigned int data);
300
301 /* Write physical address half word */
302 void ml_phys_write_half(
303 vm_offset_t paddr, unsigned int data);
304 void ml_phys_write_half_64(
305 addr64_t paddr, unsigned int data);
306
307 /* Write physical address word */
308 void ml_phys_write(
309 vm_offset_t paddr, unsigned int data);
310 void ml_phys_write_64(
311 addr64_t paddr, unsigned int data);
312 void ml_phys_write_word(
313 vm_offset_t paddr, unsigned int data);
314 void ml_phys_write_word_64(
315 addr64_t paddr, unsigned int data);
316
317 /* Write physical address double word */
318 void ml_phys_write_double(
319 vm_offset_t paddr, unsigned long long data);
320 void ml_phys_write_double_64(
321 addr64_t paddr, unsigned long long data);
322
323 /* Struct for ml_cpu_get_info */
324 struct ml_cpu_info {
325 uint32_t vector_unit;
326 uint32_t cache_line_size;
327 uint32_t l1_icache_size;
328 uint32_t l1_dcache_size;
329 uint32_t l2_settings;
330 uint32_t l2_cache_size;
331 uint32_t l3_settings;
332 uint32_t l3_cache_size;
333 };
334
335 typedef struct ml_cpu_info ml_cpu_info_t;
336
337 /* Get processor info */
338 void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info);
339
340 void ml_thread_policy(
341 thread_t thread,
342 unsigned policy_id,
343 unsigned policy_info);
344
345 #define MACHINE_GROUP 0x00000001
346 #define MACHINE_NETWORK_GROUP 0x10000000
347 #define MACHINE_NETWORK_WORKLOOP 0x00000001
348 #define MACHINE_NETWORK_NETISR 0x00000002
349
350 /* Return the maximum number of CPUs set by ml_set_max_cpus(), blocking if necessary */
351 unsigned int ml_wait_max_cpus(
352 void);
353
354 /*
355 * The following are in pmCPU.c not machine_routines.c.
356 */
357 extern void ml_set_maxsnoop(uint32_t maxdelay);
358 extern unsigned ml_get_maxsnoop(void);
359 extern void ml_set_maxbusdelay(uint32_t mdelay);
360 extern uint32_t ml_get_maxbusdelay(void);
361 extern void ml_set_maxintdelay(uint64_t mdelay);
362 extern uint64_t ml_get_maxintdelay(void);
363 extern boolean_t ml_get_interrupt_prewake_applicable(void);
364
365
366 extern uint64_t tmrCvt(uint64_t time, uint64_t conversion);
367
368 extern uint64_t ml_cpu_int_event_time(void);
369
370 #endif /* KERNEL_PRIVATE */
371
372 /* Get Interrupts Enabled */
373 boolean_t ml_get_interrupts_enabled(void);
374
375 /* Set Interrupts Enabled */
376 boolean_t ml_set_interrupts_enabled(boolean_t enable);
377 #define ml_set_interrupts_enabled_with_debug(en, dbg) ml_set_interrupts_enabled(en);
378 boolean_t ml_early_set_interrupts_enabled(boolean_t enable);
379
380 /* Check if running at interrupt context */
381 boolean_t ml_at_interrupt_context(void);
382
383 #ifdef XNU_KERNEL_PRIVATE
384
385 bool ml_did_interrupt_userspace(void);
386
387 extern boolean_t ml_is_quiescing(void);
388 extern void ml_set_is_quiescing(boolean_t);
389 extern uint64_t ml_get_booter_memory_size(void);
390 unsigned int ml_cpu_cache_sharing(unsigned int level, cluster_type_t cluster_type, bool include_all_cpu_types);
391 void ml_cpu_get_info_type(ml_cpu_info_t * ml_cpu_info, cluster_type_t cluster_type);
392 unsigned int ml_get_cpu_number_type(cluster_type_t cluster_type, bool logical, bool available);
393 unsigned int ml_get_cluster_number_type(cluster_type_t cluster_type);
394 unsigned int ml_get_cpu_types(void);
395 #endif
396
397 /* Zero bytes starting at a physical address */
398 void bzero_phys(
399 addr64_t phys_address,
400 uint32_t length);
401
402 /* Bytes available on current stack */
403 vm_offset_t ml_stack_remaining(void);
404
405 #if defined(MACH_KERNEL_PRIVATE)
406 __private_extern__ uint64_t ml_phys_read_data(uint64_t paddr, int psz);
407 __private_extern__ void ml_phys_write_data(uint64_t paddr,
408 unsigned long long data, int size);
409 __private_extern__ uintptr_t
410 pmap_verify_noncacheable(uintptr_t vaddr);
411 void machine_lockdown(void);
412 #endif /* MACH_KERNEL_PRIVATE */
413 #ifdef XNU_KERNEL_PRIVATE
414
415 boolean_t ml_fpu_avx_enabled(void);
416 boolean_t ml_fpu_avx512_enabled(void);
417
418 void interrupt_latency_tracker_setup(void);
419 void interrupt_reset_latency_stats(void);
420 void interrupt_populate_latency_stats(char *, unsigned);
421 void ml_get_power_state(boolean_t *, boolean_t *);
422
423 void timer_queue_expire_rescan(void*);
424 void ml_timer_evaluate(void);
425 boolean_t ml_timer_forced_evaluation(void);
426
427 void ml_gpu_stat_update(uint64_t);
428 uint64_t ml_gpu_stat(thread_t);
429 boolean_t ml_recent_wake(void);
430
431 #ifdef MACH_KERNEL_PRIVATE
432 struct i386_cpu_info;
433 struct machine_thread;
434 /* LBR support */
435 void i386_lbr_init(struct i386_cpu_info *info_p, bool is_master);
436 int i386_filtered_lbr_state_to_mach_thread_state(thread_t thr_act, last_branch_state_t *machlbrp, boolean_t from_userspace);
437 void i386_lbr_synch(thread_t thr);
438 void i386_lbr_enable(void);
439 void i386_lbr_disable(void);
440 extern lbr_modes_t last_branch_enabled_modes;
441 #endif
442
443 extern uint64_t report_phy_read_delay;
444 extern uint64_t report_phy_write_delay;
445 extern uint32_t phy_read_panic;
446 extern uint32_t phy_write_panic;
447 extern uint64_t trace_phy_read_delay;
448 extern uint64_t trace_phy_write_delay;
449
450 void ml_hibernate_active_pre(void);
451 void ml_hibernate_active_post(void);
452
453 int ml_page_protection_type(void);
454
455 #endif /* XNU_KERNEL_PRIVATE */
456
457 __END_DECLS
458
459 #endif /* _I386_MACHINE_ROUTINES_H_ */
460