1 /* 2 * Copyright (c) 2007-2023 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 * 31 */ 32 33 #ifndef ARM_CPU_DATA_INTERNAL 34 #define ARM_CPU_DATA_INTERNAL 35 36 #include <mach_assert.h> 37 #include <kern/assert.h> 38 #include <kern/kern_types.h> 39 #include <kern/percpu.h> 40 #include <kern/processor.h> 41 #include <os/base.h> 42 #include <pexpert/pexpert.h> 43 #include <arm/dbgwrap.h> 44 #include <arm/machine_routines.h> 45 #include <arm64/proc_reg.h> 46 #include <arm/thread.h> 47 #include <arm/pmap.h> 48 #include <machine/monotonic.h> 49 #include <san/kcov_data.h> 50 51 #define NSEC_PER_HZ (NSEC_PER_SEC / 100) 52 53 typedef struct reset_handler_data { 54 vm_offset_t assist_reset_handler; /* Assist handler phys address */ 55 vm_offset_t cpu_data_entries; /* CpuDataEntries phys address */ 56 } reset_handler_data_t; 57 58 #if !CONFIG_SPTM 59 extern reset_handler_data_t ResetHandlerData; 60 #endif 61 62 /* Put the static check for cpumap_t here as it's defined in <kern/processor.h> */ 63 static_assert(sizeof(cpumap_t) * CHAR_BIT >= MAX_CPUS, "cpumap_t bitvector is too small for current MAX_CPUS value"); 64 65 #define CPUWINDOWS_BASE_MASK 0xFFFFFFFFFFD00000UL 66 #define CPUWINDOWS_BASE (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) 67 #define CPUWINDOWS_TOP (CPUWINDOWS_BASE + (MAX_CPUS * CPUWINDOWS_MAX * ARM_PGBYTES)) 68 69 #ifndef __BUILDING_XNU_LIBRARY__ /* in user-mode kernel addresses are low */ 70 static_assert((CPUWINDOWS_BASE >= VM_MIN_KERNEL_ADDRESS) && ((CPUWINDOWS_TOP - 1) <= VM_MAX_KERNEL_ADDRESS), 71 "CPU copy windows too large for CPUWINDOWS_BASE_MASK value"); 72 #endif 73 74 typedef struct cpu_data_entry { 75 void *cpu_data_paddr; /* Cpu data physical address */ 76 struct cpu_data *cpu_data_vaddr; /* Cpu data virtual address */ 77 #if !defined(__arm64__) 78 #error Check cpu_data_entry padding for this architecture 79 #endif 80 } cpu_data_entry_t; 81 82 83 typedef struct rtclock_timer { 84 mpqueue_head_t queue; 85 uint64_t deadline; 86 uint32_t is_set:1, 87 has_expired:1, 88 :0; 89 } rtclock_timer_t; 90 91 typedef struct { 92 /* 93 * The wake variants of these counters are reset to 0 when the CPU wakes. 94 */ 95 uint64_t irq_ex_cnt; 96 uint64_t irq_ex_cnt_wake; 97 uint64_t ipi_cnt; 98 uint64_t ipi_cnt_wake; 99 uint64_t timer_cnt; 100 #if CONFIG_CPU_COUNTERS 101 uint64_t pmi_cnt_wake; 102 #endif /* CONFIG_CPU_COUNTERS */ 103 uint64_t undef_ex_cnt; 104 uint64_t unaligned_cnt; 105 uint64_t vfp_cnt; 106 uint64_t data_ex_cnt; 107 uint64_t instr_ex_cnt; 108 } cpu_stat_t; 109 110 __options_closed_decl(cpu_flags_t, uint16_t, { 111 SleepState = 0x0800, 112 /* For the boot processor, StartedState means 'interrupts initialized' - it is already running */ 113 StartedState = 0x1000, 114 /* For the boot processor, InitState means 'cpu_data fully initialized' - it is already running */ 115 InitState = 0x2000, 116 }); 117 118 __options_closed_decl(cpu_signal_t, unsigned int, { 119 SIGPnop = 0x00000000U, /* Send IPI with no service */ 120 /* 0x1U unused */ 121 /* 0x2U unused */ 122 SIGPxcall = 0x00000004U, /* Call a function on a processor */ 123 SIGPast = 0x00000008U, /* Request AST check */ 124 SIGPdebug = 0x00000010U, /* Request Debug call */ 125 SIGPLWFlush = 0x00000020U, /* Request LWFlush call */ 126 SIGPLWClean = 0x00000040U, /* Request LWClean call */ 127 /* 0x80U unused */ 128 SIGPkppet = 0x00000100U, /* Request kperf PET handler */ 129 SIGPxcallImm = 0x00000200U, /* Send a cross-call, fail if already pending */ 130 SIGPTimerLocal = 0x00000400U, /* Update the decrementer via timer_queue_expire_local */ 131 SIGPdeferred = 0x00000800U, /* Scheduler deferred IPI to wake core */ 132 133 SIGPdisabled = 0x80000000U, /* Signal disabled */ 134 }); 135 136 typedef struct cpu_data { 137 unsigned short cpu_number; 138 _Atomic cpu_flags_t cpu_flags; 139 int cpu_type; 140 int cpu_subtype; 141 int cpu_threadtype; 142 143 void * XNU_PTRAUTH_SIGNED_PTR("cpu_data.istackptr") istackptr; 144 vm_offset_t intstack_top; 145 #if __arm64__ 146 void * XNU_PTRAUTH_SIGNED_PTR("cpu_data.excepstackptr") excepstackptr; 147 vm_offset_t excepstack_top; 148 #endif 149 thread_t cpu_active_thread; 150 vm_offset_t cpu_active_stack; 151 cpu_id_t cpu_id; 152 volatile cpu_signal_t cpu_signal; 153 ast_t cpu_pending_ast; 154 cache_dispatch_t cpu_cache_dispatch; 155 156 #if __arm64__ 157 uint64_t cpu_base_timebase; 158 uint64_t cpu_timebase; 159 #endif 160 bool cpu_hibernate; /* This cpu is currently hibernating the system */ 161 bool cpu_running; 162 bool cluster_master; 163 bool sync_on_cswitch; 164 /* true if processor_start() or processor_exit() is operating on this CPU */ 165 bool in_state_transition; 166 167 uint32_t cpu_decrementer; 168 get_decrementer_t cpu_get_decrementer_func; 169 set_decrementer_t cpu_set_decrementer_func; 170 fiq_handler_t cpu_get_fiq_handler; 171 172 void *cpu_tbd_hardware_addr; 173 void *cpu_tbd_hardware_val; 174 175 processor_idle_t cpu_idle_notify; 176 uint64_t cpu_idle_latency; 177 uint64_t cpu_idle_pop; 178 179 #if __ARM_KERNEL_PROTECT__ 180 vm_offset_t cpu_exc_vectors; 181 #endif /* __ARM_KERNEL_PROTECT__ */ 182 vm_offset_t cpu_reset_handler; 183 uintptr_t cpu_reset_assist; 184 uint32_t cpu_reset_type; 185 186 unsigned int interrupt_source; 187 void *cpu_int_state; 188 IOInterruptHandler interrupt_handler; 189 void *interrupt_nub; 190 void *interrupt_target; 191 void *interrupt_refCon; 192 193 idle_timer_t idle_timer_notify; 194 void *idle_timer_refcon; 195 uint64_t idle_timer_deadline; 196 197 uint64_t rtcPop; 198 rtclock_timer_t rtclock_timer; 199 struct _rtclock_data_ *rtclock_datap; 200 201 arm_debug_state_t *cpu_user_debug; /* Current debug state */ 202 vm_offset_t cpu_debug_interface_map; 203 204 volatile int debugger_active; 205 volatile int PAB_active; /* Tells the console if we are dumping backtraces */ 206 207 void *cpu_xcall_p0; 208 void *cpu_xcall_p1; 209 void *cpu_imm_xcall_p0; 210 void *cpu_imm_xcall_p1; 211 212 213 #if __arm64__ 214 vm_offset_t coresight_base[CORESIGHT_REGIONS]; 215 #endif 216 217 #if NEEDS_MTE_IRG_RESEED 218 uint64_t cpu_irg_reseed_counter; 219 #endif 220 221 /* CCC ARMv8 registers */ 222 uint64_t cpu_regmap_paddr; 223 224 uint32_t cpu_phys_id; 225 platform_error_handler_t platform_error_handler; 226 227 int cpu_mcount_off; 228 229 #define ARM_CPU_ON_SLEEP_PATH 0x50535553UL 230 volatile unsigned int cpu_sleep_token; 231 unsigned int cpu_sleep_token_last; 232 233 cluster_type_t cpu_cluster_type; 234 235 uint32_t cpu_cluster_id; 236 uint32_t cpu_l2_id; 237 uint32_t cpu_l2_size; 238 uint32_t cpu_l3_id; 239 uint32_t cpu_l3_size; 240 241 enum { 242 CPU_NOT_HALTED = 0, 243 CPU_HALTED, 244 CPU_HALTED_WITH_STATE 245 } halt_status; 246 #if defined(HAS_APPLE_PAC) 247 uint64_t rop_key; 248 uint64_t jop_key; 249 #endif /* defined(HAS_APPLE_PAC) */ 250 251 /* large structs with large alignment requirements */ 252 253 /* double-buffered performance counter data */ 254 uint64_t *cpu_kpc_buf[2]; 255 /* PMC shadow and reload value buffers */ 256 uint64_t *cpu_kpc_shadow; 257 uint64_t *cpu_kpc_reload; 258 259 #if CONFIG_CPU_COUNTERS 260 struct mt_cpu cpu_monotonic; 261 #endif /* CONFIG_CPU_COUNTERS */ 262 263 cpu_stat_t cpu_stat; 264 #if !XNU_MONITOR 265 struct pmap_cpu_data cpu_pmap_cpu_data; 266 #endif 267 dbgwrap_thread_state_t halt_state; 268 #if DEVELOPMENT || DEBUG 269 uint64_t wfe_count; 270 uint64_t wfe_deadline_checks; 271 uint64_t wfe_terminations; 272 #endif 273 #if CONFIG_KCOV 274 kcov_cpu_data_t cpu_kcov_data; 275 #endif 276 #if __arm64__ 277 /** 278 * Stash the state of the system when an IPI is received. This will be 279 * dumped in the case a panic is getting triggered. 280 */ 281 uint64_t ipi_pc; 282 uint64_t ipi_lr; 283 uint64_t ipi_fp; 284 285 /* Encoded data to store in TPIDR_EL0 on context switch */ 286 uint64_t cpu_tpidr_el0; 287 #endif 288 289 #ifdef APPLEEVEREST 290 /* PAs used to apply pio locks in early boot. */ 291 uint64_t cpu_reg_paddr; 292 uint64_t acc_reg_paddr; 293 uint64_t cpm_reg_paddr; 294 #endif 295 296 #if HAS_MTE 297 uint64_t mte_rgsr_el1_seed; 298 #endif 299 } cpu_data_t; 300 301 extern cpu_data_entry_t CpuDataEntries[MAX_CPUS]; 302 PERCPU_DECL(cpu_data_t, cpu_data); 303 #define BootCpuData __PERCPU_NAME(cpu_data) 304 extern boot_args *BootArgs; 305 306 #if __arm64__ 307 extern unsigned int LowResetVectorBase; 308 extern unsigned int LowResetVectorEnd; 309 #if WITH_CLASSIC_S2R 310 extern uint8_t SleepToken[8]; 311 #endif 312 extern unsigned int LowExceptionVectorBase; 313 #else 314 #error Unknown arch 315 #endif 316 317 extern cpu_data_t *cpu_datap(int cpu); 318 extern cpu_data_t *cpu_data_alloc(boolean_t is_boot); 319 extern void cpu_stack_alloc(cpu_data_t*); 320 extern void cpu_data_init(cpu_data_t *cpu_data_ptr); 321 extern void cpu_data_register(cpu_data_t *cpu_data_ptr); 322 extern cpu_data_t *processor_to_cpu_datap( processor_t processor); 323 324 #endif /* ARM_CPU_DATA_INTERNAL */ 325