xref: /xnu-8020.121.3/osfmk/machine/machine_routines.h (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _MACHINE_MACHINE_ROUTINES_H
29 #define _MACHINE_MACHINE_ROUTINES_H
30 
31 #include <sys/cdefs.h>
32 
33 #if defined (__i386__) || defined(__x86_64__)
34 #include "i386/machine_routines.h"
35 #elif defined (__arm__) || defined (__arm64__)
36 #include "arm/machine_routines.h"
37 #else
38 #error architecture not supported
39 #endif
40 
41 __BEGIN_DECLS
42 
43 #ifdef XNU_KERNEL_PRIVATE
44 #pragma GCC visibility push(hidden)
45 
46 /*!
47  * @function      ml_cpu_can_exit
48  * @brief         Check whether the platform code allows |cpu_id| to be
49  *                shut down at runtime.
50  * @return        true if allowed, false otherwise
51  */
52 bool ml_cpu_can_exit(int cpu_id);
53 
54 /*!
55  * @function      ml_cpu_begin_state_transition
56  * @brief         Tell the platform code that processor_start() or
57  *                processor_exit() is about to begin for |cpu_id|.  This
58  *                can block.
59  * @param cpu_id  CPU that is (potentially) going up or down
60  */
61 void ml_cpu_begin_state_transition(int cpu_id);
62 
63 /*!
64  * @function      ml_cpu_end_state_transition
65  * @brief         Tell the platform code that processor_start() or
66  *                processor_exit() is finished for |cpu_id|.  This
67  *                can block.  Can be called from a different thread from
68  *                ml_cpu_begin_state_transition().
69  * @param cpu_id  CPU that is (potentially) going up or down
70  */
71 void ml_cpu_end_state_transition(int cpu_id);
72 
73 /*!
74  * @function      ml_cpu_begin_loop
75  * @brief         Acquire a global lock that prevents processor_start() or
76  *                processor_exit() from changing any CPU states for the
77  *                duration of a loop.  This can block.
78  */
79 void ml_cpu_begin_loop(void);
80 
81 /*!
82  * @function      ml_cpu_end_loop
83  * @brief         Release the global lock acquired by ml_cpu_begin_loop().
84  *                Must be called from the same thread as ml_cpu_begin_loop().
85  */
86 void ml_cpu_end_loop(void);
87 
88 /*!
89  * @function      ml_early_cpu_max_number()
90  * @brief         Returns an early maximum cpu number the kernel will ever use.
91  *
92  * @return        the maximum cpu number the kernel will ever use.
93  *
94  * @discussion
95  * The value returned by this function might be an over-estimate,
96  * but is more precise than @c MAX_CPUS.
97  *
98  * Unlike @c real_ncpus which is only initialized late in boot,
99  * this can be called during startup after the @c STARTUP_SUB_TUNABLES
100  * subsystem has been initialized.
101  */
102 int ml_early_cpu_max_number(void);
103 
104 #pragma GCC visibility pop
105 #endif /* defined(XNU_KERNEL_PRIVATE) */
106 
107 /*!
108  * @enum     cpu_event
109  * @abstract Broadcast events allowing clients to hook CPU state transitions.
110  * @constant CPU_BOOT_REQUESTED      Called from processor_start(); may block.
111  * @constant CPU_BOOTED              Called from platform code on the newly-booted CPU; may not block.
112  * @constant CPU_ACTIVE              Called from scheduler code; may block.
113  * @constant CLUSTER_ACTIVE          Called from platform code; may not block.
114  * @constant CPU_EXIT_REQUESTED      Called from processor_exit(); may block.
115  * @constant CPU_DOWN                Called from platform code on the disabled CPU; may not block.
116  * @constant CLUSTER_EXIT_REQUESTED  Called from platform code; may not block.
117  * @constant CPU_EXITED              Called after CPU is stopped; may block.
118  */
119 enum cpu_event {
120 	CPU_BOOT_REQUESTED = 0,
121 	CPU_BOOTED,
122 	CPU_ACTIVE,
123 	CLUSTER_ACTIVE,
124 	CPU_EXIT_REQUESTED,
125 	CPU_DOWN,
126 	CLUSTER_EXIT_REQUESTED,
127 	CPU_EXITED,
128 };
129 
130 typedef bool (*cpu_callback_t)(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
131 
132 /*!
133  * @function              cpu_event_register_callback
134  * @abstract              Register a function to be called on CPU state changes.
135  * @param fn              Function to call on state change events.
136  * @param param           Optional argument to be passed to the callback (e.g. object pointer).
137  */
138 void cpu_event_register_callback(cpu_callback_t fn, void *param);
139 
140 /*!
141  * @function              cpu_event_unregister_callback
142  * @abstract              Unregister a previously-registered callback function.
143  * @param fn              Function pointer previously passed to cpu_event_register_callback().
144  */
145 void cpu_event_unregister_callback(cpu_callback_t fn);
146 
147 #if XNU_KERNEL_PRIVATE
148 /*!
149  * @function              ml_broadcast_cpu_event
150  * @abstract              Internal XNU function used to broadcast CPU state changes to callers.
151  * @param event           CPU event that is occurring.
152  * @param cpu_or_cluster  Logical CPU ID of the core (or cluster) affected by the event.
153  */
154 void ml_broadcast_cpu_event(enum cpu_event event, unsigned int cpu_or_cluster);
155 #endif
156 
157 /*!
158  * @function      ml_io_read()
159  * @brief         Perform an MMIO read access
160  *
161  * @return        The value resulting from the read.
162  *
163  */
164 unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz);
165 unsigned int ml_io_read8(uintptr_t iovaddr);
166 unsigned int ml_io_read16(uintptr_t iovaddr);
167 unsigned int ml_io_read32(uintptr_t iovaddr);
168 unsigned long long ml_io_read64(uintptr_t iovaddr);
169 
170 /*!
171  * @function      ml_io_write()
172  * @brief         Perform an MMIO write access
173  *
174  */
175 void ml_io_write(uintptr_t vaddr, uint64_t val, int size);
176 void ml_io_write8(uintptr_t vaddr, uint8_t val);
177 void ml_io_write16(uintptr_t vaddr, uint16_t val);
178 void ml_io_write32(uintptr_t vaddr, uint32_t val);
179 void ml_io_write64(uintptr_t vaddr, uint64_t val);
180 
181 #if XNU_KERNEL_PRIVATE
182 /*
183  * ml_io access timeouts and tracing.
184  *
185  * We are specific in what to compile in, in order to not burden
186  * heavily used code with paths that will never be used on common
187  * configurations.
188  */
189 
190 /* ml_io_read/write timeouts are generally enabled on macOS, because
191  * they may help developers. */
192 #if  (XNU_TARGET_OS_OSX || DEVELOPMENT || DEBUG)
193 
194 #define ML_IO_TIMEOUTS_ENABLED 1
195 /* ... but tracing is only present internally (and not ported to arm yet). */
196 #if  (ML_IO_TIMEOUTS_ENABLED && defined(__x86_64__) && (DEVELOPMENT || DEBUG))
197 #define ML_IO_IOTRACE_ENABLED 1
198 #endif
199 
200 /* Simulating stretched IO is only for DEVELOPMENT || DEBUG. */
201 #if DEVELOPMENT || DEBUG
202 #define ML_IO_SIMULATE_STRETCHED_ENABLED 1
203 #endif
204 
205 /* We also check that the memory is mapped non-cacheable on x86 internally. */
206 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
207 #define ML_IO_VERIFY_UNCACHEABLE 1
208 #endif
209 
210 #endif /* (XNU_TARGET_OS_OSX || DEVELOPMENT || DEBUG) */
211 
212 #if ML_IO_TIMEOUTS_ENABLED && !defined(__x86_64__)
213 /* x86 does not have the MACHINE_TIMEOUTs types, and the variables are
214  * declared elsewhere. */
215 extern machine_timeout32_t report_phy_read_delay_to;
216 extern machine_timeout32_t report_phy_write_delay_to;
217 extern machine_timeout32_t report_phy_read_delay_to;
218 extern machine_timeout32_t trace_phy_read_delay_to;
219 extern machine_timeout32_t trace_phy_write_delay_to;
220 extern unsigned int report_phy_read_osbt;
221 extern unsigned int report_phy_write_osbt;
222 #endif /* ML_IO_TIMEOUTS_ENABLED */
223 
224 #endif /* XNU_KERNEL_PRIVATE */
225 
226 __END_DECLS
227 
228 #endif /* _MACHINE_MACHINE_ROUTINES_H */
229