1 /* 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 #ifndef _MACHINE_MACHINE_ROUTINES_H 29 #define _MACHINE_MACHINE_ROUTINES_H 30 31 #include <sys/cdefs.h> 32 33 #if defined (__i386__) || defined(__x86_64__) 34 #include "i386/machine_routines.h" 35 #elif defined (__arm__) || defined (__arm64__) 36 #include "arm/machine_routines.h" 37 #else 38 #error architecture not supported 39 #endif 40 41 __BEGIN_DECLS 42 43 #ifdef XNU_KERNEL_PRIVATE 44 #pragma GCC visibility push(hidden) 45 46 /*! 47 * @function ml_cpu_can_exit 48 * @brief Check whether the platform code allows |cpu_id| to be 49 * shut down at runtime. 50 * @return true if allowed, false otherwise 51 */ 52 bool ml_cpu_can_exit(int cpu_id, processor_reason_t reason); 53 54 /*! 55 * @function ml_cpu_begin_state_transition 56 * @brief Tell the platform code that processor_start() or 57 * processor_exit() is about to begin for |cpu_id|. This 58 * can block. 59 * @param cpu_id CPU that is (potentially) going up or down 60 */ 61 void ml_cpu_begin_state_transition(int cpu_id); 62 63 /*! 64 * @function ml_cpu_end_state_transition 65 * @brief Tell the platform code that processor_start() or 66 * processor_exit() is finished for |cpu_id|. This 67 * can block. Can be called from a different thread from 68 * ml_cpu_begin_state_transition(). 69 * @param cpu_id CPU that is (potentially) going up or down 70 */ 71 void ml_cpu_end_state_transition(int cpu_id); 72 73 /*! 74 * @function ml_cpu_begin_loop 75 * @brief Acquire a global lock that prevents processor_start() or 76 * processor_exit() from changing any CPU states for the 77 * duration of a loop. This can block. 78 */ 79 void ml_cpu_begin_loop(void); 80 81 /*! 82 * @function ml_cpu_end_loop 83 * @brief Release the global lock acquired by ml_cpu_begin_loop(). 84 * Must be called from the same thread as ml_cpu_begin_loop(). 85 */ 86 void ml_cpu_end_loop(void); 87 88 /*! 89 * @function ml_early_cpu_max_number() 90 * @brief Returns an early maximum cpu number the kernel will ever use. 91 * 92 * @return the maximum cpu number the kernel will ever use. 93 * 94 * @discussion 95 * The value returned by this function might be an over-estimate, 96 * but is more precise than @c MAX_CPUS. 97 * 98 * Unlike @c real_ncpus which is only initialized late in boot, 99 * this can be called during startup after the @c STARTUP_SUB_TUNABLES 100 * subsystem has been initialized. 101 */ 102 int ml_early_cpu_max_number(void); 103 104 /*! 105 * @function ml_cpu_power_enable 106 * @abstract Enable voltage rails to a CPU prior to bringing it up 107 * @discussion Called from the scheduler to enable any voltage rails 108 * needed by a CPU. This should happen before the 109 * CPU_BOOT_REQUESTED broadcast. This does not boot the 110 * CPU and it may be a no-op on some platforms. This must be 111 * called from a schedulable context. 112 * @param cpu_id The logical CPU ID (from the topology) of the CPU to be booted 113 */ 114 void ml_cpu_power_enable(int cpu_id); 115 116 /*! 117 * @function ml_cpu_power_disable 118 * @abstract Disable voltage rails to a CPU after bringing it down 119 * @discussion Called from the scheduler to disable any voltage rails 120 * that are no longer needed by an offlined CPU or cluster. 121 * This should happen after the CPU_EXITED broadcast. 122 * This does not halt the CPU and it may be a no-op on some 123 * platforms. This must be called from a schedulable context. 124 * @param cpu_id The logical CPU ID (from the topology) of the halted CPU 125 */ 126 void ml_cpu_power_disable(int cpu_id); 127 128 #pragma GCC visibility pop 129 #endif /* defined(XNU_KERNEL_PRIVATE) */ 130 131 /*! 132 * @enum cpu_event 133 * @abstract Broadcast events allowing clients to hook CPU state transitions. 134 * @constant CPU_BOOT_REQUESTED Called from processor_start(); may block. 135 * @constant CPU_BOOTED Called from platform code on the newly-booted CPU; may not block. 136 * @constant CPU_ACTIVE Called from scheduler code; may block. 137 * @constant CLUSTER_ACTIVE Called from platform code; may block. 138 * @constant CPU_EXIT_REQUESTED Called from processor_exit(); may block. 139 * @constant CPU_DOWN Called from platform code on the disabled CPU; may not block. 140 * @constant CLUSTER_EXIT_REQUESTED Called from platform code; may block. 141 * @constant CPU_EXITED Called after CPU is stopped; may block. 142 */ 143 enum cpu_event { 144 CPU_BOOT_REQUESTED = 0, 145 CPU_BOOTED, 146 CPU_ACTIVE, 147 CLUSTER_ACTIVE, 148 CPU_EXIT_REQUESTED, 149 CPU_DOWN, 150 CLUSTER_EXIT_REQUESTED, 151 CPU_EXITED, 152 }; 153 154 typedef bool (*cpu_callback_t)(void *param, enum cpu_event event, unsigned int cpu_or_cluster); 155 156 /*! 157 * @function cpu_event_register_callback 158 * @abstract Register a function to be called on CPU state changes. 159 * @param fn Function to call on state change events. 160 * @param param Optional argument to be passed to the callback (e.g. object pointer). 161 */ 162 void cpu_event_register_callback(cpu_callback_t fn, void *param); 163 164 /*! 165 * @function cpu_event_unregister_callback 166 * @abstract Unregister a previously-registered callback function. 167 * @param fn Function pointer previously passed to cpu_event_register_callback(). 168 */ 169 void cpu_event_unregister_callback(cpu_callback_t fn); 170 171 #if XNU_KERNEL_PRIVATE 172 /*! 173 * @function ml_broadcast_cpu_event 174 * @abstract Internal XNU function used to broadcast CPU state changes to callers. 175 * @param event CPU event that is occurring. 176 * @param cpu_or_cluster Logical CPU ID of the core (or cluster) affected by the event. 177 */ 178 void ml_broadcast_cpu_event(enum cpu_event event, unsigned int cpu_or_cluster); 179 #endif 180 181 /*! 182 * @function ml_io_read() 183 * @brief Perform an MMIO read access 184 * 185 * @return The value resulting from the read. 186 * 187 */ 188 unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz); 189 unsigned int ml_io_read8(uintptr_t iovaddr); 190 unsigned int ml_io_read16(uintptr_t iovaddr); 191 unsigned int ml_io_read32(uintptr_t iovaddr); 192 unsigned long long ml_io_read64(uintptr_t iovaddr); 193 194 /*! 195 * @function ml_io_write() 196 * @brief Perform an MMIO write access 197 * 198 */ 199 void ml_io_write(uintptr_t vaddr, uint64_t val, int size); 200 void ml_io_write8(uintptr_t vaddr, uint8_t val); 201 void ml_io_write16(uintptr_t vaddr, uint16_t val); 202 void ml_io_write32(uintptr_t vaddr, uint32_t val); 203 void ml_io_write64(uintptr_t vaddr, uint64_t val); 204 205 #if XNU_KERNEL_PRIVATE 206 /* 207 * ml_io access timeouts and tracing. 208 * 209 * We are specific in what to compile in, in order to not burden 210 * heavily used code with paths that will never be used on common 211 * configurations. 212 */ 213 214 /* ml_io_read/write timeouts are generally enabled on macOS, because 215 * they may help developers. */ 216 #if (XNU_TARGET_OS_OSX || DEVELOPMENT || DEBUG) 217 218 #define ML_IO_TIMEOUTS_ENABLED 1 219 220 /* Simulating stretched IO is only for DEVELOPMENT || DEBUG. */ 221 #if DEVELOPMENT || DEBUG 222 #define ML_IO_SIMULATE_STRETCHED_ENABLED 1 223 #endif 224 225 /* We also check that the memory is mapped non-cacheable on x86 internally. */ 226 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG) 227 #define ML_IO_VERIFY_UNCACHEABLE 1 228 #endif 229 230 #endif /* (XNU_TARGET_OS_OSX || DEVELOPMENT || DEBUG) */ 231 #endif /* XNU_KERNEL_PRIVATE */ 232 233 #if KERNEL_PRIVATE 234 235 /*! 236 * @function ml_io_increase_timeouts 237 * @brief Increase the ml_io_read* and ml_io_write* 238 * timeouts for a region of VA space 239 * [`iovaddr_base', `iovaddr_base' + `size'). 240 * @discussion This function is intended for building an 241 * allowlist of known-misbehaving register spaces 242 * on specific peripherals. `size' must be between 243 * 1 and 4096 inclusive, and the VA range must not 244 * overlap with any ranges previously passed to 245 * ml_io_increase_timeouts(). 246 * @note This function has no effect when the new timeouts are 247 * shorter than the global timeouts. 248 * @param iovaddr_base Base VA of the target region 249 * @param size Size of the target region, in bytes 250 * @param read_timeout_us New read timeout, in microseconds 251 * @param write_timeout_us New write timeout, in microseconds 252 * @return 0 if successful, or KERN_INVALID_ARGUMENT if either 253 * the VA range or timeout is invalid. 254 */ 255 OS_WARN_RESULT 256 int ml_io_increase_timeouts(uintptr_t iovaddr_base, unsigned int size, uint32_t read_timeout_us, uint32_t write_timeout_us); 257 258 /*! 259 * @function ml_io_reset_timeouts 260 * @brief Unregister custom timeouts previously registered by 261 * ml_io_increase_timeouts(). 262 * @discussion The caller must use the exact `iovaddr_base' and `size' 263 * range passed to a previous ml_io_increase_timeouts() 264 * call. Unregistering a smaller subrange is unsupported 265 * and will return an error. 266 * @param iovaddr_base Base VA previously passed to ml_io_increase_timeouts() 267 * @param size Size previously passed to ml_io_increase_timeouts() 268 * @return 0 if successful, or KERN_NOT_FOUND if the specfied range 269 * does not match a previously-registered timeout. 270 */ 271 OS_WARN_RESULT 272 int ml_io_reset_timeouts(uintptr_t iovaddr_base, unsigned int size); 273 274 #endif /* KERNEL_PRIVATE */ 275 276 #if XNU_KERNEL_PRIVATE 277 278 #if ML_IO_TIMEOUTS_ENABLED && !defined(__x86_64__) 279 /* x86 does not have the MACHINE_TIMEOUTs types, and the variables are 280 * declared elsewhere. */ 281 extern machine_timeout_t report_phy_read_delay_to; 282 extern machine_timeout_t report_phy_write_delay_to; 283 extern machine_timeout_t report_phy_read_delay_to; 284 extern machine_timeout_t trace_phy_read_delay_to; 285 extern machine_timeout_t trace_phy_write_delay_to; 286 extern unsigned int report_phy_read_osbt; 287 extern unsigned int report_phy_write_osbt; 288 #endif /* ML_IO_TIMEOUTS_ENABLED */ 289 290 void ml_get_cluster_type_name(cluster_type_t cluster_type, char *name, 291 size_t name_size); 292 293 #endif /* XNU_KERNEL_PRIVATE */ 294 295 __END_DECLS 296 297 #endif /* _MACHINE_MACHINE_ROUTINES_H */ 298