1 /* 2 * Copyright (c) 2007 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 * 31 */ 32 33 #ifndef ARM_CPU_DATA_INTERNAL 34 #define ARM_CPU_DATA_INTERNAL 35 36 #include <mach_assert.h> 37 #include <kern/assert.h> 38 #include <kern/kern_types.h> 39 #include <kern/percpu.h> 40 #include <kern/processor.h> 41 #include <pexpert/pexpert.h> 42 #include <arm/dbgwrap.h> 43 #include <arm/machine_routines.h> 44 #include <arm/proc_reg.h> 45 #include <arm/thread.h> 46 #include <arm/pmap.h> 47 48 #if MONOTONIC 49 #include <machine/monotonic.h> 50 #endif /* MONOTONIC */ 51 52 #include <san/kcov_data.h> 53 54 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) 55 56 typedef struct reset_handler_data { 57 vm_offset_t assist_reset_handler; /* Assist handler phys address */ 58 vm_offset_t cpu_data_entries; /* CpuDataEntries phys address */ 59 #if !__arm64__ 60 vm_offset_t boot_args; /* BootArgs phys address */ 61 #endif 62 } reset_handler_data_t; 63 64 extern reset_handler_data_t ResetHandlerData; 65 66 /* Put the static check for cpumap_t here as it's defined in <kern/processor.h> */ 67 static_assert(sizeof(cpumap_t) * CHAR_BIT >= MAX_CPUS, "cpumap_t bitvector is too small for current MAX_CPUS value"); 68 69 #ifdef __arm__ 70 #define CPUWINDOWS_BASE_MASK 0xFFF00000UL 71 #else 72 #define CPUWINDOWS_BASE_MASK 0xFFFFFFFFFFE00000UL 73 #endif 74 #define CPUWINDOWS_BASE (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) 75 #define CPUWINDOWS_TOP (CPUWINDOWS_BASE + (MAX_CPUS * CPUWINDOWS_MAX * ARM_PGBYTES)) 76 77 static_assert((CPUWINDOWS_BASE >= VM_MIN_KERNEL_ADDRESS) && ((CPUWINDOWS_TOP - 1) <= VM_MAX_KERNEL_ADDRESS), 78 "CPU copy windows too large for CPUWINDOWS_BASE_MASK value"); 79 80 typedef struct cpu_data_entry { 81 void *cpu_data_paddr; /* Cpu data physical address */ 82 struct cpu_data *cpu_data_vaddr; /* Cpu data virtual address */ 83 #if __arm__ 84 uint32_t cpu_data_offset_8; 85 uint32_t cpu_data_offset_12; 86 #elif __arm64__ 87 #else 88 #error Check cpu_data_entry padding for this architecture 89 #endif 90 } cpu_data_entry_t; 91 92 93 typedef struct rtclock_timer { 94 mpqueue_head_t queue; 95 uint64_t deadline; 96 uint32_t is_set:1, 97 has_expired:1, 98 :0; 99 } rtclock_timer_t; 100 101 typedef struct { 102 /* 103 * The wake variants of these counters are reset to 0 when the CPU wakes. 104 */ 105 uint64_t irq_ex_cnt; 106 uint64_t irq_ex_cnt_wake; 107 uint64_t ipi_cnt; 108 uint64_t ipi_cnt_wake; 109 uint64_t timer_cnt; 110 #if MONOTONIC 111 uint64_t pmi_cnt_wake; 112 #endif /* MONOTONIC */ 113 uint64_t undef_ex_cnt; 114 uint64_t unaligned_cnt; 115 uint64_t vfp_cnt; 116 uint64_t data_ex_cnt; 117 uint64_t instr_ex_cnt; 118 } cpu_stat_t; 119 120 typedef struct cpu_data { 121 short cpu_number; 122 unsigned short cpu_flags; 123 int cpu_type; 124 int cpu_subtype; 125 int cpu_threadtype; 126 127 vm_offset_t istackptr; 128 vm_offset_t intstack_top; 129 #if __arm64__ 130 vm_offset_t excepstackptr; 131 vm_offset_t excepstack_top; 132 #else 133 vm_offset_t fiqstackptr; 134 vm_offset_t fiqstack_top; 135 #endif 136 thread_t cpu_active_thread; 137 vm_offset_t cpu_active_stack; 138 cpu_id_t cpu_id; 139 unsigned volatile int cpu_signal; 140 ast_t cpu_pending_ast; 141 cache_dispatch_t cpu_cache_dispatch; 142 143 #if __arm64__ 144 uint64_t cpu_base_timebase; 145 uint64_t cpu_timebase; 146 #else 147 union { 148 struct { 149 uint32_t low; 150 uint32_t high; 151 } split; 152 struct { 153 uint64_t val; 154 } raw; 155 } cbtb; 156 #define cpu_base_timebase_low cbtb.split.low 157 #define cpu_base_timebase_high cbtb.split.high 158 159 union { 160 struct { 161 uint32_t low; 162 uint32_t high; 163 } split; 164 struct { 165 uint64_t val; 166 } raw; 167 } ctb; 168 #define cpu_timebase_low ctb.split.low 169 #define cpu_timebase_high ctb.split.high 170 #endif 171 bool cpu_hibernate; /* This cpu is currently hibernating the system */ 172 bool cpu_running; 173 bool cluster_master; 174 #if __ARM_ARCH_8_5__ 175 bool sync_on_cswitch; 176 #endif /* __ARM_ARCH_8_5__ */ 177 /* true if processor_start() or processor_exit() is operating on this CPU */ 178 bool in_state_transition; 179 180 uint32_t cpu_decrementer; 181 get_decrementer_t cpu_get_decrementer_func; 182 set_decrementer_t cpu_set_decrementer_func; 183 fiq_handler_t cpu_get_fiq_handler; 184 185 void *cpu_tbd_hardware_addr; 186 void *cpu_tbd_hardware_val; 187 188 processor_idle_t cpu_idle_notify; 189 uint64_t cpu_idle_latency; 190 uint64_t cpu_idle_pop; 191 192 #if __arm__ || __ARM_KERNEL_PROTECT__ 193 vm_offset_t cpu_exc_vectors; 194 #endif /* __ARM_KERNEL_PROTECT__ */ 195 vm_offset_t cpu_reset_handler; 196 uintptr_t cpu_reset_assist; 197 uint32_t cpu_reset_type; 198 199 unsigned int interrupt_source; 200 void *cpu_int_state; 201 IOInterruptHandler interrupt_handler; 202 void *interrupt_nub; 203 void *interrupt_target; 204 void *interrupt_refCon; 205 206 idle_timer_t idle_timer_notify; 207 void *idle_timer_refcon; 208 uint64_t idle_timer_deadline; 209 210 uint64_t rtcPop; 211 rtclock_timer_t rtclock_timer; 212 struct _rtclock_data_ *rtclock_datap; 213 214 arm_debug_state_t *cpu_user_debug; /* Current debug state */ 215 vm_offset_t cpu_debug_interface_map; 216 217 volatile int debugger_active; 218 volatile int PAB_active; /* Tells the console if we are dumping backtraces */ 219 220 void *cpu_xcall_p0; 221 void *cpu_xcall_p1; 222 void *cpu_imm_xcall_p0; 223 void *cpu_imm_xcall_p1; 224 225 #if defined(ARMA7) 226 volatile uint32_t cpu_CLW_active; 227 volatile uint64_t cpu_CLWFlush_req; 228 volatile uint64_t cpu_CLWFlush_last; 229 volatile uint64_t cpu_CLWClean_req; 230 volatile uint64_t cpu_CLWClean_last; 231 #endif 232 233 #if __arm64__ 234 vm_offset_t coresight_base[CORESIGHT_REGIONS]; 235 #endif 236 237 /* CCC ARMv8 registers */ 238 uint64_t cpu_regmap_paddr; 239 240 uint32_t cpu_phys_id; 241 uint32_t cpu_l2_access_penalty; 242 platform_error_handler_t platform_error_handler; 243 244 int cpu_mcount_off; 245 246 #define ARM_CPU_ON_SLEEP_PATH 0x50535553UL 247 volatile unsigned int cpu_sleep_token; 248 unsigned int cpu_sleep_token_last; 249 250 cluster_type_t cpu_cluster_type; 251 uint32_t cpu_cluster_id; 252 uint32_t cpu_l2_id; 253 uint32_t cpu_l2_size; 254 uint32_t cpu_l3_id; 255 uint32_t cpu_l3_size; 256 257 enum { 258 CPU_NOT_HALTED = 0, 259 CPU_HALTED, 260 CPU_HALTED_WITH_STATE 261 } halt_status; 262 #if defined(HAS_APPLE_PAC) 263 uint64_t rop_key; 264 uint64_t jop_key; 265 #endif /* defined(HAS_APPLE_PAC) */ 266 267 /* large structs with large alignment requirements */ 268 #if KPC 269 /* double-buffered performance counter data */ 270 uint64_t *cpu_kpc_buf[2]; 271 /* PMC shadow and reload value buffers */ 272 uint64_t *cpu_kpc_shadow; 273 uint64_t *cpu_kpc_reload; 274 #endif 275 #if MONOTONIC 276 struct mt_cpu cpu_monotonic; 277 #endif /* MONOTONIC */ 278 cpu_stat_t cpu_stat; 279 #if !XNU_MONITOR 280 struct pmap_cpu_data cpu_pmap_cpu_data; 281 #endif 282 dbgwrap_thread_state_t halt_state; 283 #if DEVELOPMENT || DEBUG 284 uint64_t wfe_count; 285 uint64_t wfe_deadline_checks; 286 uint64_t wfe_terminations; 287 #endif 288 #if CONFIG_KCOV 289 kcov_cpu_data_t cpu_kcov_data; 290 #endif 291 #if __arm64__ 292 /** 293 * Stash the state of the system when an IPI is received. This will be 294 * dumped in the case a panic is getting triggered. 295 */ 296 uint64_t ipi_pc; 297 uint64_t ipi_lr; 298 uint64_t ipi_fp; 299 #endif 300 } cpu_data_t; 301 302 /* 303 * cpu_flags 304 */ 305 #define SleepState 0x0800 306 #define StartedState 0x1000 307 308 extern cpu_data_entry_t CpuDataEntries[MAX_CPUS]; 309 PERCPU_DECL(cpu_data_t, cpu_data); 310 #define BootCpuData __PERCPU_NAME(cpu_data) 311 extern boot_args *BootArgs; 312 313 #if __arm__ 314 extern unsigned int *ExceptionLowVectorsBase; 315 extern unsigned int *ExceptionVectorsTable; 316 #elif __arm64__ 317 extern unsigned int LowResetVectorBase; 318 extern unsigned int LowResetVectorEnd; 319 #if WITH_CLASSIC_S2R 320 extern uint8_t SleepToken[8]; 321 #endif 322 extern unsigned int LowExceptionVectorBase; 323 #else 324 #error Unknown arch 325 #endif 326 327 extern cpu_data_t *cpu_datap(int cpu); 328 extern cpu_data_t *cpu_data_alloc(boolean_t is_boot); 329 extern void cpu_stack_alloc(cpu_data_t*); 330 extern void cpu_data_init(cpu_data_t *cpu_data_ptr); 331 extern void cpu_data_free(cpu_data_t *cpu_data_ptr); 332 extern kern_return_t cpu_data_register(cpu_data_t *cpu_data_ptr); 333 extern cpu_data_t *processor_to_cpu_datap( processor_t processor); 334 335 #if __arm64__ 336 typedef struct sysreg_restore { 337 uint64_t tcr_el1; 338 } sysreg_restore_t; 339 340 extern sysreg_restore_t sysreg_restore; 341 #endif /* __arm64__ */ 342 343 #endif /* ARM_CPU_DATA_INTERNAL */ 344