xref: /xnu-12377.1.9/osfmk/machine/machine_routines.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _MACHINE_MACHINE_ROUTINES_H
29 #define _MACHINE_MACHINE_ROUTINES_H
30 
31 #include <sys/cdefs.h>
32 #include <stdint.h>
33 
34 #if defined (__i386__) || defined(__x86_64__)
35 #include "i386/machine_routines.h"
36 #elif defined (__arm__) || defined (__arm64__)
37 #include "arm/machine_routines.h"
38 #else
39 #error architecture not supported
40 #endif
41 
42 __BEGIN_DECLS
43 
44 #ifdef XNU_KERNEL_PRIVATE
45 #pragma GCC visibility push(hidden)
46 
47 /*!
48  * @function      ml_cpu_can_exit
49  * @brief         Check whether the platform code allows |cpu_id| to be
50  *                shut down at runtime outside of system sleep.
51  * @return        true if allowed, false otherwise
52  */
53 bool ml_cpu_can_exit(int cpu_id);
54 
55 /*!
56  * @function      ml_cpu_begin_state_transition
57  * @brief         Tell the platform code that processor_start() or
58  *                processor_exit() is about to begin for |cpu_id|.  This
59  *                can block.
60  * @param cpu_id  CPU that is (potentially) going up or down
61  */
62 void ml_cpu_begin_state_transition(int cpu_id);
63 
64 /*!
65  * @function      ml_cpu_end_state_transition
66  * @brief         Tell the platform code that processor_start() or
67  *                processor_exit() is finished for |cpu_id|.  This
68  *                can block.  Can be called from a different thread from
69  *                ml_cpu_begin_state_transition().
70  * @param cpu_id  CPU that is (potentially) going up or down
71  */
72 void ml_cpu_end_state_transition(int cpu_id);
73 
74 /*!
75  * @function      ml_cpu_begin_loop
76  * @brief         Acquire a global lock that prevents processor_start() or
77  *                processor_exit() from changing any CPU states for the
78  *                duration of a loop.  This can block.
79  */
80 void ml_cpu_begin_loop(void);
81 
82 /*!
83  * @function      ml_cpu_end_loop
84  * @brief         Release the global lock acquired by ml_cpu_begin_loop().
85  *                Must be called from the same thread as ml_cpu_begin_loop().
86  */
87 void ml_cpu_end_loop(void);
88 
89 /*!
90  * @function      ml_early_cpu_max_number()
91  * @brief         Returns an early maximum cpu number the kernel will ever use.
92  *
93  * @return        the maximum cpu number the kernel will ever use.
94  *
95  * @discussion
96  * The value returned by this function might be an over-estimate,
97  * but is more precise than @c MAX_CPUS.
98  *
99  * Unlike @c real_ncpus which is only initialized late in boot,
100  * this can be called during startup after the @c STARTUP_SUB_TUNABLES
101  * subsystem has been initialized.
102  */
103 int ml_early_cpu_max_number(void);
104 
105 /*!
106  * @function        ml_cpu_power_enable
107  * @abstract        Enable voltage rails to a CPU prior to bringing it up
108  * @discussion      Called from the scheduler to enable any voltage rails
109  *                  needed by a CPU.  This should happen before the
110  *                  CPU_BOOT_REQUESTED broadcast.  This does not boot the
111  *                  CPU and it may be a no-op on some platforms.  This must be
112  *                  called from a schedulable context.
113  * @param cpu_id    The logical CPU ID (from the topology) of the CPU to be booted
114  */
115 void ml_cpu_power_enable(int cpu_id);
116 
117 /*!
118  * @function        ml_cpu_power_disable
119  * @abstract        Disable voltage rails to a CPU after bringing it down
120  * @discussion      Called from the scheduler to disable any voltage rails
121  *                  that are no longer needed by an offlined CPU or cluster.
122  *                  This should happen after the CPU_EXITED broadcast.
123  *                  This does not halt the CPU and it may be a no-op on some
124  *                  platforms.  This must be called from a schedulable context.
125  * @param cpu_id    The logical CPU ID (from the topology) of the halted CPU
126  */
127 void ml_cpu_power_disable(int cpu_id);
128 
129 #pragma GCC visibility pop
130 #endif /* defined(XNU_KERNEL_PRIVATE) */
131 
132 /*!
133  * @enum     cpu_event
134  * @abstract Broadcast events allowing clients to hook CPU state transitions.
135  * @constant CPU_BOOT_REQUESTED      Called from processor_start(); may block.
136  * @constant CPU_BOOTED              Called from platform code on the newly-booted CPU; may not block.
137  * @constant CPU_ACTIVE              Called from scheduler code; may block.
138  * @constant CLUSTER_ACTIVE          Called from platform code; may block.
139  * @constant CPU_EXIT_REQUESTED      Called from processor_exit(); may block.
140  * @constant CPU_DOWN                Called from platform code on the disabled CPU; may not block.
141  * @constant CLUSTER_EXIT_REQUESTED  Called from platform code; may block.
142  * @constant CPU_EXITED              Called after CPU is stopped; may block.
143  */
144 enum cpu_event {
145 	CPU_BOOT_REQUESTED = 0,
146 	CPU_BOOTED,
147 	CPU_ACTIVE,
148 	CLUSTER_ACTIVE,
149 	CPU_EXIT_REQUESTED,
150 	CPU_DOWN,
151 	CLUSTER_EXIT_REQUESTED,
152 	CPU_EXITED,
153 	PLATFORM_QUIESCE,
154 	PLATFORM_ACTIVE,
155 	PLATFORM_HALT_RESTART,
156 	PLATFORM_PANIC,
157 	PLATFORM_PANIC_SYNC,
158 	PLATFORM_PRE_SLEEP,
159 	PLATFORM_POST_RESUME,
160 };
161 
162 typedef bool (*cpu_callback_t)(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
163 
164 /*!
165  * @function              cpu_event_register_callback
166  * @abstract              Register a function to be called on CPU state changes.
167  * @param fn              Function to call on state change events.
168  * @param param           Optional argument to be passed to the callback (e.g. object pointer).
169  */
170 void cpu_event_register_callback(cpu_callback_t fn, void *param);
171 
172 /*!
173  * @function              cpu_event_unregister_callback
174  * @abstract              Unregister a previously-registered callback function.
175  * @param fn              Function pointer previously passed to cpu_event_register_callback().
176  */
177 void cpu_event_unregister_callback(cpu_callback_t fn);
178 
179 #if XNU_KERNEL_PRIVATE
180 /*!
181  * @function              ml_broadcast_cpu_event
182  * @abstract              Internal XNU function used to broadcast CPU state changes to callers.
183  * @param event           CPU event that is occurring.
184  * @param cpu_or_cluster  Logical CPU ID of the core (or cluster) affected by the event.
185  */
186 void ml_broadcast_cpu_event(enum cpu_event event, unsigned int cpu_or_cluster);
187 #endif
188 
189 void cpu_event_debug_log(enum cpu_event event, unsigned int cpu_or_cluster);
190 
191 void dump_cpu_event_log(int (*printf_func)(const char * fmt, ...));
192 
193 /*!
194  * @function      ml_io_read()
195  * @brief         Perform an MMIO read access
196  *
197  * @return        The value resulting from the read.
198  *
199  */
200 unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz);
201 unsigned int ml_io_read8(uintptr_t iovaddr);
202 unsigned int ml_io_read16(uintptr_t iovaddr);
203 unsigned int ml_io_read32(uintptr_t iovaddr);
204 unsigned long long ml_io_read64(uintptr_t iovaddr);
205 
206 uint64_t ml_io_read_cpu_reg(uintptr_t io_vaddr, int io_sz, int logical_cpu);
207 
208 
209 /*!
210  * @function      ml_io_write()
211  * @brief         Perform an MMIO write access
212  *
213  */
214 void ml_io_write(uintptr_t vaddr, uint64_t val, int size);
215 void ml_io_write8(uintptr_t vaddr, uint8_t val);
216 void ml_io_write16(uintptr_t vaddr, uint16_t val);
217 void ml_io_write32(uintptr_t vaddr, uint32_t val);
218 void ml_io_write64(uintptr_t vaddr, uint64_t val);
219 
220 #if XNU_KERNEL_PRIVATE
221 /*
222  * ml_io access timeouts and tracing.
223  *
224  * We are specific in what to compile in, in order to not burden
225  * heavily used code with paths that will never be used on common
226  * configurations.
227  */
228 
229 /* ml_io_read/write timeouts are generally enabled on macOS, because
230  * they may help developers. */
231 #if  (XNU_TARGET_OS_OSX || DEVELOPMENT || DEBUG)
232 
233 #define ML_IO_TIMEOUTS_ENABLED 1
234 
235 /* Simulating stretched IO is only for DEVELOPMENT || DEBUG. */
236 #if DEVELOPMENT || DEBUG
237 #define ML_IO_SIMULATE_STRETCHED_ENABLED 1
238 #endif
239 
240 /* We also check that the memory is mapped non-cacheable on x86 internally. */
241 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
242 #define ML_IO_VERIFY_UNCACHEABLE 1
243 #endif
244 
245 #endif /* (XNU_TARGET_OS_OSX || DEVELOPMENT || DEBUG) */
246 #endif /* XNU_KERNEL_PRIVATE */
247 
248 #if KERNEL_PRIVATE
249 
250 /*!
251  * @function                    ml_io_increase_timeouts
252  * @brief                       Increase the ml_io_read* and ml_io_write*
253  *                              timeouts for a region of VA space
254  *                              [`iovaddr_base', `iovaddr_base' + `size').
255  * @discussion                  This function is intended for building an
256  *                              allowlist of known-misbehaving register spaces
257  *                              on specific peripherals.  `size' must be between
258  *                              1 and 4096 inclusive, and the VA range must not
259  *                              overlap with any ranges previously passed to
260  *                              ml_io_increase_timeouts().
261  * @note                        This function has no effect when the new timeouts are
262  *                              shorter than the global timeouts.
263  * @param iovaddr_base          Base VA of the target region
264  * @param size                  Size of the target region, in bytes
265  * @param read_timeout_us       New read timeout, in microseconds
266  * @param write_timeout_us      New write timeout, in microseconds
267  * @return                      0 if successful, or KERN_INVALID_ARGUMENT if either
268  *                              the VA range or timeout is invalid.
269  */
270 OS_WARN_RESULT
271 int ml_io_increase_timeouts(uintptr_t iovaddr_base, unsigned int size, uint32_t read_timeout_us, uint32_t write_timeout_us);
272 
273 /*!
274  * @function            ml_io_reset_timeouts
275  * @brief               Unregister custom timeouts previously registered by
276  *                      ml_io_increase_timeouts().
277  * @discussion          The caller must use the exact `iovaddr_base' and `size'
278  *                      range passed to a previous ml_io_increase_timeouts()
279  *                      call.  Unregistering a smaller subrange is unsupported
280  *                      and will return an error.
281  * @param iovaddr_base  Base VA previously passed to ml_io_increase_timeouts()
282  * @param size          Size previously passed to ml_io_increase_timeouts()
283  * @return              0 if successful, or KERN_NOT_FOUND if the specfied range
284  *                      does not match a previously-registered timeout.
285  */
286 OS_WARN_RESULT
287 int ml_io_reset_timeouts(uintptr_t iovaddr_base, unsigned int size);
288 
289 /*!
290  * @function                    ml_io_increase_timeouts_phys
291  * @brief                       Increase the ml_io_read* and ml_io_write*
292  *                              timeouts for a region of PA space
293  *                              [`iopaddr_base', `iopaddr_base' + `size').
294  * @discussion                  This function is intended for building an
295  *                              allowlist of known-misbehaving register spaces
296  *                              on specific peripherals.  `size' must be between
297  *                              1 and 4096 inclusive, and the PA range must not
298  *                              overlap with any ranges previously passed to
299  *                              ml_io_increase_timeouts().
300  * @note                        This function has no effect when the new timeouts are
301  *                              shorter than the global timeouts. In addition to
302  *                              global timeouts a larger timeout may be applied
303  *                              to regions of memory which may be susceptible to
304  *                              PCIe CTOs.
305  *                              For IOs performed through virtual addresses, the
306  *                              larger of the VA timeout (if one is set) and
307  *                              this timeout is used.
308  * @param iopaddr_base          Base PA of the target region
309  * @param size                  Size of the target region, in bytes
310  * @param read_timeout_us       New read timeout, in microseconds
311  * @param write_timeout_us      New write timeout, in microseconds
312  * @return                      0 if successful, or KERN_INVALID_ARGUMENT if either
313  *                              the PA range or timeout is invalid.
314  */
315 OS_WARN_RESULT
316 int ml_io_increase_timeouts_phys(vm_offset_t iopaddr_base, unsigned int size,
317     uint32_t read_timeout_us, uint32_t write_timeout_us);
318 
319 /*!
320  * @function            ml_io_reset_timeouts_phys
321  * @brief               Unregister custom timeouts previously registered by
322  *                      ml_io_increase_timeouts_phys().
323  * @discussion          The caller must use the exact `iopaddr_base' and `size'
324  *                      range passed to a previous ml_io_increase_timeouts_phys()
325  *                      call.  Unregistering a smaller subrange is unsupported
326  *                      and will return an error.
327  * @param iopaddr_base  Base PA previously passed to ml_io_increase_timeouts_phys()
328  * @param size          Size previously passed to ml_io_increase_timeouts_phys()
329  * @return              0 if successful, or KERN_NOT_FOUND if the specfied range
330  *                      does not match a previously-registered timeout.
331  */
332 OS_WARN_RESULT
333 int ml_io_reset_timeouts_phys(vm_offset_t iopaddr_base, unsigned int size);
334 
335 #endif /* KERNEL_PRIVATE */
336 
337 #if XNU_KERNEL_PRIVATE
338 
339 #if ML_IO_TIMEOUTS_ENABLED
340 
341 #if !defined(__x86_64__)
342 /* x86 does not have the MACHINE_TIMEOUTs types, and the variables are
343  * declared elsewhere. */
344 extern machine_timeout_t report_phy_read_delay_to;
345 extern machine_timeout_t report_phy_write_delay_to;
346 extern machine_timeout_t report_phy_read_delay_to;
347 extern machine_timeout_t trace_phy_read_delay_to;
348 extern machine_timeout_t trace_phy_write_delay_to;
349 #endif /* !defined(__x86_64__) */
350 extern void override_io_timeouts(uintptr_t vaddr, uint64_t paddr,
351     uint64_t *read_timeout, uint64_t *write_timeout);
352 
353 typedef struct {
354 	uint64_t mmio_start_mt;
355 	uint64_t mmio_paddr;
356 	uintptr_t mmio_vaddr;
357 } mmio_track_t;
358 PERCPU_DECL(mmio_track_t, mmio_tracker);
359 
360 extern boolean_t ml_io_check_for_mmio_overrides(uint64_t mt);
361 
362 #endif /* ML_IO_TIMEOUTS_ENABLED */
363 
364 void ml_get_cluster_type_name(cluster_type_t cluster_type, char *name,
365     size_t name_size);
366 
367 unsigned int ml_get_cluster_count(void);
368 
369 /**
370  * Depending on the system, it's possible that a kernel backtrace could contain
371  * stack frames from both XNU and non-XNU-owned stacks. This function can be
372  * used to determine whether an address is pointing to one of these non-XNU
373  * stacks.
374  *
375  * @param addr The virtual address to check.
376  *
377  * @return True if the address is within the bounds of a non-XNU stack. False
378  *         otherwise.
379  */
380 bool ml_addr_in_non_xnu_stack(uintptr_t addr);
381 
382 #endif /* XNU_KERNEL_PRIVATE */
383 
384 #if MACH_KERNEL_PRIVATE
385 
386 /*!
387  * @func          ml_map_cpus_to_clusters
388  * @brief         Populate the logical CPU -> logical cluster ID table at address addr.
389  *
390  * @param table   array to write to
391  */
392 void ml_map_cpus_to_clusters(uint8_t *table);
393 
394 #endif /* MACH_KERNEL_PRIVATE */
395 
396 #if MACH_KERNEL_PRIVATE
397 /*!
398  * @func          ml_task_post_signature_processing_hook
399  * @brief         Platform-specific hook called on the main thread of a new task
400  *                after process_signature() is completed by the parent and before
401  *                the main thread returns to EL0.
402  *
403  * @param task    The new task whose signature has been processed
404  */
405 void ml_task_post_signature_processing_hook(task_t task);
406 #endif /* MACH_KERNEL_PRIVATE */
407 
408 #if XNU_KERNEL_PRIVATE
409 /**
410  * Returns whether kernel text should be writable.
411  *
412  * @note This is always true on x86_64.
413  *
414  * @note On ARM, this can be set through LocalPolicy, or internally through the
415  *       -unsafe_kernel_text boot arg.
416  */
417 bool ml_unsafe_kernel_text(void);
418 #endif /* XNU_KERNEL_PRIVATE */
419 
420 __END_DECLS
421 
422 #endif /* _MACHINE_MACHINE_ROUTINES_H */
423