1 /*
2 * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm64/machine_machdep.h>
30 #include <arm64/proc_reg.h>
31 #include <arm/machine_cpu.h>
32 #include <arm/cpu_internal.h>
33 #include <arm/cpuid.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/caches_internal.h>
37 #include <arm/misc_protos.h>
38 #include <arm/machdep_call.h>
39 #include <arm/machine_routines.h>
40 #include <arm/rtclock.h>
41 #include <arm/cpuid_internal.h>
42 #include <arm/cpu_capabilities.h>
43 #include <console/serial_protos.h>
44 #include <kern/machine.h>
45 #include <kern/misc_protos.h>
46 #include <prng/random.h>
47 #include <kern/startup.h>
48 #include <kern/thread.h>
49 #include <kern/timer_queue.h>
50 #include <mach/machine.h>
51 #include <machine/atomic.h>
52 #include <machine/config.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_shared_region.h>
56 #include <vm/vm_map.h>
57 #include <sys/codesign.h>
58 #include <sys/kdebug.h>
59 #include <kern/coalition.h>
60 #include <pexpert/device_tree.h>
61
62 #include <IOKit/IOPlatformExpert.h>
63 #if HIBERNATION
64 #include <IOKit/IOHibernatePrivate.h>
65 #endif /* HIBERNATION */
66
67 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
68 #include <arm64/amcc_rorgn.h>
69 #endif
70
71
72
73 #include <libkern/section_keywords.h>
74
75 /**
76 * On supported hardware, debuggable builds make the HID bits read-only
77 * without locking them. This lets people manually modify HID bits while
78 * debugging, since they can use a debugging tool to first reset the HID
79 * bits back to read/write. However it will still catch xnu changes that
80 * accidentally write to HID bits after they've been made read-only.
81 */
82
83 #if KPC
84 #include <kern/kpc.h>
85 #endif
86
87 #define MPIDR_CPU_ID(mpidr_el1_val) (((mpidr_el1_val) & MPIDR_AFF0_MASK) >> MPIDR_AFF0_SHIFT)
88 #define MPIDR_CLUSTER_ID(mpidr_el1_val) (((mpidr_el1_val) & MPIDR_AFF1_MASK) >> MPIDR_AFF1_SHIFT)
89
90 #if HAS_CLUSTER
91 static uint8_t cluster_initialized = 0;
92 #endif
93
94 MACHINE_TIMEOUT_DEV_WRITEABLE(LockTimeOut, "lock", 6e6 /* 0.25s */, MACHINE_TIMEOUT_UNIT_TIMEBASE, NULL);
95 machine_timeout_t LockTimeOutUsec; // computed in ml_init_lock_timeout
96
97 MACHINE_TIMEOUT_DEV_WRITEABLE(TLockTimeOut, "ticket-lock", 3e6 /* 0.125s */, MACHINE_TIMEOUT_UNIT_TIMEBASE, NULL);
98
99 MACHINE_TIMEOUT_DEV_WRITEABLE(MutexSpin, "mutex-spin", 240 /* 10us */, MACHINE_TIMEOUT_UNIT_TIMEBASE, NULL);
100
101 uint64_t low_MutexSpin;
102 int64_t high_MutexSpin;
103
104
105
106 static uint64_t ml_wfe_hint_max_interval;
107 #define MAX_WFE_HINT_INTERVAL_US (500ULL)
108
109 /* Must be less than cpu_idle_latency to ensure ml_delay_should_spin is true */
110 TUNABLE(uint32_t, yield_delay_us, "yield_delay_us", 0);
111
112 extern vm_offset_t segLOWEST;
113 extern vm_offset_t segLOWESTTEXT;
114 extern vm_offset_t segLASTB;
115 extern unsigned long segSizeLAST;
116
117 /* ARM64 specific bounds; used to test for presence in the kernelcache. */
118 extern vm_offset_t vm_kernelcache_base;
119 extern vm_offset_t vm_kernelcache_top;
120
121 /* Location of the physmap / physical aperture */
122 extern uint64_t physmap_base;
123
124 extern vm_offset_t arm_vm_kernelcache_phys_start;
125 extern vm_offset_t arm_vm_kernelcache_phys_end;
126
127 #if defined(HAS_IPI)
128 unsigned int gFastIPI = 1;
129 #define kDeferredIPITimerDefault (64 * NSEC_PER_USEC) /* in nanoseconds */
130 static TUNABLE_WRITEABLE(uint64_t, deferred_ipi_timer_ns, "fastipitimeout",
131 kDeferredIPITimerDefault);
132 #endif /* defined(HAS_IPI) */
133
134 thread_t Idle_context(void);
135
136 SECURITY_READ_ONLY_LATE(bool) cpu_config_correct = true;
137
138 SECURITY_READ_ONLY_LATE(static ml_topology_cpu_t) topology_cpu_array[MAX_CPUS];
139 SECURITY_READ_ONLY_LATE(static ml_topology_cluster_t) topology_cluster_array[MAX_CPU_CLUSTERS];
140 SECURITY_READ_ONLY_LATE(static ml_topology_info_t) topology_info = {
141 .version = CPU_TOPOLOGY_VERSION,
142 .cpus = topology_cpu_array,
143 .clusters = topology_cluster_array,
144 };
145
146 _Atomic unsigned int cluster_type_num_active_cpus[MAX_CPU_TYPES];
147
148 /**
149 * Represents the offset of each cluster within a hypothetical array of MAX_CPUS
150 * entries of an arbitrary data type. This is intended for use by specialized consumers
151 * that must quickly access per-CPU data using only the physical CPU ID (MPIDR_EL1),
152 * as follows:
153 * hypothetical_array[cluster_offsets[AFF1] + AFF0]
154 * Most consumers should instead use general-purpose facilities such as PERCPU or
155 * ml_get_cpu_number().
156 */
157 SECURITY_READ_ONLY_LATE(int64_t) cluster_offsets[MAX_CPU_CLUSTER_PHY_ID + 1];
158
159 SECURITY_READ_ONLY_LATE(static uint32_t) arm64_eventi = UINT32_MAX;
160
161 extern uint32_t lockdown_done;
162
163 /**
164 * Represents regions of virtual address space that should be reserved
165 * (pre-mapped) in each user address space.
166 */
167 static const struct vm_reserved_region vm_reserved_regions[] = {
168 {
169 .vmrr_name = "GPU Carveout",
170 .vmrr_addr = MACH_VM_MIN_GPU_CARVEOUT_ADDRESS,
171 .vmrr_size = (vm_map_size_t)(MACH_VM_MAX_GPU_CARVEOUT_ADDRESS - MACH_VM_MIN_GPU_CARVEOUT_ADDRESS)
172 },
173 /*
174 * Reserve the virtual memory space representing the commpage nesting region
175 * to prevent user processes from allocating memory within it. The actual
176 * page table entries for the commpage are inserted by vm_commpage_enter().
177 * This vm_map_enter() just prevents userspace from allocating/deallocating
178 * anything within the entire commpage nested region.
179 */
180 {
181 .vmrr_name = "commpage nesting",
182 .vmrr_addr = _COMM_PAGE64_NESTING_START,
183 .vmrr_size = _COMM_PAGE64_NESTING_SIZE
184 }
185 };
186
187 uint32_t get_arm_cpu_version(void);
188
189 #if defined(HAS_IPI)
190 static inline void
ml_cpu_signal_type(unsigned int cpu_mpidr,uint32_t type)191 ml_cpu_signal_type(unsigned int cpu_mpidr, uint32_t type)
192 {
193 #if HAS_CLUSTER
194 uint64_t local_mpidr;
195 /* NOTE: this logic expects that we are called in a non-preemptible
196 * context, or at least one in which the calling thread is bound
197 * to a single CPU. Otherwise we may migrate between choosing which
198 * IPI mechanism to use and issuing the IPI. */
199 MRS(local_mpidr, "MPIDR_EL1");
200 if (MPIDR_CLUSTER_ID(local_mpidr) == MPIDR_CLUSTER_ID(cpu_mpidr)) {
201 uint64_t x = type | MPIDR_CPU_ID(cpu_mpidr);
202 MSR("S3_5_C15_C0_0", x);
203 } else {
204 #define IPI_RR_TARGET_CLUSTER_SHIFT 16
205 uint64_t x = type | (MPIDR_CLUSTER_ID(cpu_mpidr) << IPI_RR_TARGET_CLUSTER_SHIFT) | MPIDR_CPU_ID(cpu_mpidr);
206 MSR("S3_5_C15_C0_1", x);
207 }
208 #else
209 uint64_t x = type | MPIDR_CPU_ID(cpu_mpidr);
210 MSR("S3_5_C15_C0_1", x);
211 #endif
212 /* The recommended local/global IPI sequence is:
213 * DSB <sys> (This ensures visibility of e.g. older stores to the
214 * pending CPU signals bit vector in DRAM prior to IPI reception,
215 * and is present in cpu_signal_internal())
216 * MSR S3_5_C15_C0_1, Xt
217 * ISB
218 */
219 __builtin_arm_isb(ISB_SY);
220 }
221 #endif
222
223 #if !defined(HAS_IPI)
224 __dead2
225 #endif
226 void
ml_cpu_signal(unsigned int cpu_mpidr __unused)227 ml_cpu_signal(unsigned int cpu_mpidr __unused)
228 {
229 #if defined(HAS_IPI)
230 ml_cpu_signal_type(cpu_mpidr, ARM64_REG_IPI_RR_TYPE_IMMEDIATE);
231 #else
232 panic("Platform does not support ACC Fast IPI");
233 #endif
234 }
235
236 #if !defined(HAS_IPI)
237 __dead2
238 #endif
239 void
ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs)240 ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs)
241 {
242 #if defined(HAS_IPI)
243 /* adjust IPI_CR timer countdown value for deferred IPI
244 * accepts input in nanosecs, convert to absolutetime (REFCLK ticks),
245 * clamp maximum REFCLK ticks to 0xFFFF (16 bit field)
246 *
247 * global register, should only require a single write to update all
248 * CPU cores: from Skye ACC user spec section 5.7.3.3
249 *
250 * IPICR is a global register but there are two copies in ACC: one at pBLK and one at eBLK.
251 * IPICR write SPR token also traverses both pCPM and eCPM rings and updates both copies.
252 */
253 uint64_t abstime;
254
255 nanoseconds_to_absolutetime(nanosecs, &abstime);
256
257 abstime = MIN(abstime, 0xFFFF);
258
259 /* update deferred_ipi_timer_ns with the new clamped value */
260 absolutetime_to_nanoseconds(abstime, &deferred_ipi_timer_ns);
261
262 MSR("S3_5_C15_C3_1", abstime);
263 #else
264 (void)nanosecs;
265 panic("Platform does not support ACC Fast IPI");
266 #endif
267 }
268
269 uint64_t
ml_cpu_signal_deferred_get_timer()270 ml_cpu_signal_deferred_get_timer()
271 {
272 #if defined(HAS_IPI)
273 return deferred_ipi_timer_ns;
274 #else
275 return 0;
276 #endif
277 }
278
279 #if !defined(HAS_IPI)
280 __dead2
281 #endif
282 void
ml_cpu_signal_deferred(unsigned int cpu_mpidr __unused)283 ml_cpu_signal_deferred(unsigned int cpu_mpidr __unused)
284 {
285 #if defined(HAS_IPI)
286 ml_cpu_signal_type(cpu_mpidr, ARM64_REG_IPI_RR_TYPE_DEFERRED);
287 #else
288 panic("Platform does not support ACC Fast IPI deferral");
289 #endif
290 }
291
292 #if !defined(HAS_IPI)
293 __dead2
294 #endif
295 void
ml_cpu_signal_retract(unsigned int cpu_mpidr __unused)296 ml_cpu_signal_retract(unsigned int cpu_mpidr __unused)
297 {
298 #if defined(HAS_IPI)
299 ml_cpu_signal_type(cpu_mpidr, ARM64_REG_IPI_RR_TYPE_RETRACT);
300 #else
301 panic("Platform does not support ACC Fast IPI retraction");
302 #endif
303 }
304
305 extern uint32_t idle_proximate_io_wfe_unmasked;
306
307 #define CPUPM_IDLE_WFE 0x5310300
308 static bool
wfe_process_recommendation(void)309 wfe_process_recommendation(void)
310 {
311 bool ipending = false;
312 if (__probable(idle_proximate_io_wfe_unmasked == 1)) {
313 /* Check for an active perf. controller generated
314 * WFE recommendation for this cluster.
315 */
316 cpu_data_t *cdp = getCpuDatap();
317 uint32_t cid = cdp->cpu_cluster_id;
318 uint64_t wfe_ttd = 0;
319 uint64_t wfe_deadline = 0;
320
321 if ((wfe_ttd = ml_cluster_wfe_timeout(cid)) != 0) {
322 wfe_deadline = mach_absolute_time() + wfe_ttd;
323 }
324
325 if (wfe_deadline != 0) {
326 /* Poll issuing event-bounded WFEs until an interrupt
327 * arrives or the WFE recommendation expires
328 */
329 #if DEVELOPMENT || DEBUG
330 uint64_t wc = cdp->wfe_count;
331 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_START, ipending, wc, wfe_ttd, cdp->cpu_stat.irq_ex_cnt_wake);
332 #endif
333 /* Issue WFE until the recommendation expires,
334 * with IRQs unmasked.
335 */
336 ipending = wfe_to_deadline_or_interrupt(cid, wfe_deadline, cdp, true, true);
337 #if DEVELOPMENT || DEBUG
338 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_END, ipending, cdp->wfe_count - wc, wfe_deadline, cdp->cpu_stat.irq_ex_cnt_wake);
339 #endif
340 }
341 }
342 return ipending;
343 }
344
345 void
machine_idle(void)346 machine_idle(void)
347 {
348 /* Interrupts are expected to be masked on entry or re-entry via
349 * Idle_load_context()
350 */
351 assert((__builtin_arm_rsr("DAIF") & (DAIF_IRQF | DAIF_FIQF)) == (DAIF_IRQF | DAIF_FIQF));
352 /* Check for, and act on, a WFE recommendation.
353 * Bypasses context spill/fill for a minor perf. increment.
354 * May unmask and restore IRQ+FIQ mask.
355 */
356 if (wfe_process_recommendation() == false) {
357 /* If WFE recommendation absent, or WFE deadline
358 * arrived with no interrupt pending/processed,
359 * fall back to WFI.
360 */
361 Idle_context();
362 }
363 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
364 }
365
366 void
OSSynchronizeIO(void)367 OSSynchronizeIO(void)
368 {
369 __builtin_arm_dsb(DSB_SY);
370 }
371
372 uint64_t
get_aux_control(void)373 get_aux_control(void)
374 {
375 uint64_t value;
376
377 MRS(value, "ACTLR_EL1");
378 return value;
379 }
380
381 uint64_t
get_mmu_control(void)382 get_mmu_control(void)
383 {
384 uint64_t value;
385
386 MRS(value, "SCTLR_EL1");
387 return value;
388 }
389
390 uint64_t
get_tcr(void)391 get_tcr(void)
392 {
393 uint64_t value;
394
395 MRS(value, "TCR_EL1");
396 return value;
397 }
398
399 boolean_t
ml_get_interrupts_enabled(void)400 ml_get_interrupts_enabled(void)
401 {
402 uint64_t value;
403
404 MRS(value, "DAIF");
405 if (value & DAIF_IRQF) {
406 return FALSE;
407 }
408 return TRUE;
409 }
410
411 pmap_paddr_t
get_mmu_ttb(void)412 get_mmu_ttb(void)
413 {
414 pmap_paddr_t value;
415
416 MRS(value, "TTBR0_EL1");
417 return value;
418 }
419
420 uint32_t
get_arm_cpu_version(void)421 get_arm_cpu_version(void)
422 {
423 uint32_t value = machine_read_midr();
424
425 /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
426 return ((value & MIDR_EL1_REV_MASK) >> MIDR_EL1_REV_SHIFT) | ((value & MIDR_EL1_VAR_MASK) >> (MIDR_EL1_VAR_SHIFT - 4));
427 }
428
429 bool
ml_feature_supported(uint32_t feature_bit)430 ml_feature_supported(uint32_t feature_bit)
431 {
432 uint64_t aidr_el1_value = 0;
433
434 MRS(aidr_el1_value, "AIDR_EL1");
435
436
437 return aidr_el1_value & feature_bit;
438 }
439
440 /*
441 * user_cont_hwclock_allowed()
442 *
443 * Indicates whether we allow EL0 to read the virtual timebase (CNTVCT_EL0)
444 * as a continuous time source (e.g. from mach_continuous_time)
445 */
446 boolean_t
user_cont_hwclock_allowed(void)447 user_cont_hwclock_allowed(void)
448 {
449 #if HAS_CONTINUOUS_HWCLOCK
450 return TRUE;
451 #else
452 return FALSE;
453 #endif
454 }
455
456 /*
457 * user_timebase_type()
458 *
459 * Indicates type of EL0 virtual timebase read (CNTVCT_EL0).
460 *
461 * USER_TIMEBASE_NONE: EL0 has no access to timebase register
462 * USER_TIMEBASE_SPEC: EL0 has access to speculative timebase reads (CNTVCT_EL0)
463 * USER_TIMEBASE_NOSPEC: EL0 has access to non speculative timebase reads (CNTVCTSS_EL0)
464 *
465 */
466
467 uint8_t
user_timebase_type(void)468 user_timebase_type(void)
469 {
470 #if HAS_ACNTVCT
471 return USER_TIMEBASE_NOSPEC_APPLE;
472 #elif __ARM_ARCH_8_6__
473 return USER_TIMEBASE_NOSPEC;
474 #else
475 return USER_TIMEBASE_SPEC;
476 #endif
477 }
478
479 void
machine_startup(__unused boot_args * args)480 machine_startup(__unused boot_args * args)
481 {
482 #if defined(HAS_IPI) && (DEVELOPMENT || DEBUG)
483 if (!PE_parse_boot_argn("fastipi", &gFastIPI, sizeof(gFastIPI))) {
484 gFastIPI = 1;
485 }
486 #endif /* defined(HAS_IPI) && (DEVELOPMENT || DEBUG)*/
487
488
489 machine_conf();
490
491
492 /*
493 * Kick off the kernel bootstrap.
494 */
495 kernel_bootstrap();
496 /* NOTREACHED */
497 }
498
499 typedef void (*invalidate_fn_t)(void);
500
501 static SECURITY_READ_ONLY_LATE(invalidate_fn_t) invalidate_hmac_function = NULL;
502
503 void set_invalidate_hmac_function(invalidate_fn_t fn);
504
505 void
set_invalidate_hmac_function(invalidate_fn_t fn)506 set_invalidate_hmac_function(invalidate_fn_t fn)
507 {
508 if (NULL != invalidate_hmac_function) {
509 panic("Invalidate HMAC function already set");
510 }
511
512 invalidate_hmac_function = fn;
513 }
514
515 void
machine_lockdown(void)516 machine_lockdown(void)
517 {
518 arm_vm_prot_finalize(PE_state.bootArgs);
519
520 #if CONFIG_KERNEL_INTEGRITY
521 #if KERNEL_INTEGRITY_WT
522 /* Watchtower
523 *
524 * Notify the monitor about the completion of early kernel bootstrap.
525 * From this point forward it will enforce the integrity of kernel text,
526 * rodata and page tables.
527 */
528
529 #ifdef MONITOR
530 monitor_call(MONITOR_LOCKDOWN, 0, 0, 0);
531 #endif
532 #endif /* KERNEL_INTEGRITY_WT */
533
534 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
535 /* KTRR
536 *
537 * Lock physical KTRR region. KTRR region is read-only. Memory outside
538 * the region is not executable at EL1.
539 */
540
541 rorgn_lockdown();
542 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
543
544 #if XNU_MONITOR
545 pmap_lockdown_ppl();
546 #endif
547
548 #endif /* CONFIG_KERNEL_INTEGRITY */
549
550
551 if (NULL != invalidate_hmac_function) {
552 invalidate_hmac_function();
553 }
554
555 lockdown_done = 1;
556 }
557
558
559 char *
machine_boot_info(__unused char * buf,__unused vm_size_t size)560 machine_boot_info(
561 __unused char *buf,
562 __unused vm_size_t size)
563 {
564 return PE_boot_args();
565 }
566
567 void
slave_machine_init(__unused void * param)568 slave_machine_init(__unused void *param)
569 {
570 cpu_machine_init(); /* Initialize the processor */
571 clock_init(); /* Init the clock */
572 }
573
574 /*
575 * Routine: machine_processor_shutdown
576 * Function:
577 */
578 thread_t
machine_processor_shutdown(__unused thread_t thread,void (* doshutdown)(processor_t),processor_t processor)579 machine_processor_shutdown(
580 __unused thread_t thread,
581 void (*doshutdown)(processor_t),
582 processor_t processor)
583 {
584 return Shutdown_context(doshutdown, processor);
585 }
586
587 /*
588 * Routine: ml_init_lock_timeout
589 * Function:
590 */
591 static void __startup_func
ml_init_lock_timeout(void)592 ml_init_lock_timeout(void)
593 {
594 /*
595 * This function is called after STARTUP_SUB_TIMEOUTS
596 * initialization, so using the "legacy" boot-args here overrides
597 * the ml-timeout-... configuration. (Given that these boot-args
598 * here are usually explicitly specified, this makes sense by
599 * overriding ml-timeout-..., which may come from the device tree.
600 */
601
602 uint64_t lto_timeout_ns;
603 uint64_t lto_abstime;
604 uint32_t slto;
605
606 if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) {
607 lto_timeout_ns = slto * NSEC_PER_USEC;
608 nanoseconds_to_absolutetime(lto_timeout_ns, <o_abstime);
609 os_atomic_store(&LockTimeOut, lto_abstime, relaxed);
610 } else {
611 lto_abstime = os_atomic_load(&LockTimeOut, relaxed);
612 absolutetime_to_nanoseconds(lto_abstime, <o_timeout_ns);
613 }
614
615 os_atomic_store(&LockTimeOutUsec, lto_timeout_ns / NSEC_PER_USEC, relaxed);
616
617 if (PE_parse_boot_argn("tlto_us", &slto, sizeof(slto))) {
618 nanoseconds_to_absolutetime(slto * NSEC_PER_USEC, <o_abstime);
619 os_atomic_store(&TLockTimeOut, lto_abstime, relaxed);
620 } else if (lto_abstime != 0) {
621 os_atomic_store(&TLockTimeOut, lto_abstime >> 1, relaxed);
622 } // else take default from MACHINE_TIMEOUT.
623
624 uint64_t mtxspin;
625 uint64_t mtx_abstime;
626 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) {
627 if (mtxspin > USEC_PER_SEC >> 4) {
628 mtxspin = USEC_PER_SEC >> 4;
629 }
630 nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &mtx_abstime);
631 os_atomic_store(&MutexSpin, mtx_abstime, relaxed);
632 } else {
633 mtx_abstime = os_atomic_load(&MutexSpin, relaxed);
634 }
635
636 low_MutexSpin = os_atomic_load(&MutexSpin, relaxed);
637 /*
638 * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but
639 * real_ncpus is not set at this time
640 *
641 * NOTE: active spinning is disabled in arm. It can be activated
642 * by setting high_MutexSpin through the sysctl.
643 */
644 high_MutexSpin = low_MutexSpin;
645
646 uint64_t maxwfeus = MAX_WFE_HINT_INTERVAL_US;
647 PE_parse_boot_argn("max_wfe_us", &maxwfeus, sizeof(maxwfeus));
648 nanoseconds_to_absolutetime(maxwfeus * NSEC_PER_USEC, &ml_wfe_hint_max_interval);
649 }
650 STARTUP(TIMEOUTS, STARTUP_RANK_MIDDLE, ml_init_lock_timeout);
651
652
653 /*
654 * This is called when all of the ml_processor_info_t structures have been
655 * initialized and all the processors have been started through processor_start().
656 *
657 * Required by the scheduler subsystem.
658 */
659 void
ml_cpu_init_completed(void)660 ml_cpu_init_completed(void)
661 {
662 if (SCHED(cpu_init_completed) != NULL) {
663 SCHED(cpu_init_completed)();
664 }
665 }
666
667 /*
668 * These are called from the machine-independent routine cpu_up()
669 * to perform machine-dependent info updates.
670 *
671 * The update to CPU counts needs to be separate from other actions
672 * because we don't update the counts when CLPC causes temporary
673 * cluster powerdown events, as these must be transparent to the user.
674 */
675 void
ml_cpu_up(void)676 ml_cpu_up(void)
677 {
678 }
679
680 void
ml_cpu_up_update_counts(int cpu_id)681 ml_cpu_up_update_counts(int cpu_id)
682 {
683 ml_topology_cpu_t *cpu = &ml_get_topology_info()->cpus[cpu_id];
684
685 os_atomic_inc(&cluster_type_num_active_cpus[cpu->cluster_type], relaxed);
686
687 os_atomic_inc(&machine_info.physical_cpu, relaxed);
688 os_atomic_inc(&machine_info.logical_cpu, relaxed);
689 }
690
691 /*
692 * These are called from the machine-independent routine cpu_down()
693 * to perform machine-dependent info updates.
694 *
695 * The update to CPU counts needs to be separate from other actions
696 * because we don't update the counts when CLPC causes temporary
697 * cluster powerdown events, as these must be transparent to the user.
698 */
699 void
ml_cpu_down(void)700 ml_cpu_down(void)
701 {
702 /*
703 * If we want to deal with outstanding IPIs, we need to
704 * do relatively early in the processor_doshutdown path,
705 * as we pend decrementer interrupts using the IPI
706 * mechanism if we cannot immediately service them (if
707 * IRQ is masked). Do so now.
708 *
709 * We aren't on the interrupt stack here; would it make
710 * more sense to disable signaling and then enable
711 * interrupts? It might be a bit cleaner.
712 */
713 cpu_data_t *cpu_data_ptr = getCpuDatap();
714 cpu_data_ptr->cpu_running = FALSE;
715
716 if (cpu_data_ptr != &BootCpuData) {
717 /*
718 * Move all of this cpu's timers to the master/boot cpu,
719 * and poke it in case there's a sooner deadline for it to schedule.
720 */
721 timer_queue_shutdown(&cpu_data_ptr->rtclock_timer.queue);
722 kern_return_t rv = cpu_xcall(BootCpuData.cpu_number, &timer_queue_expire_local, &ml_cpu_down);
723 if (rv != KERN_SUCCESS) {
724 panic("ml_cpu_down: IPI failure %d", rv);
725 }
726 }
727
728 cpu_signal_handler_internal(TRUE);
729 }
730 void
ml_cpu_down_update_counts(int cpu_id)731 ml_cpu_down_update_counts(int cpu_id)
732 {
733 ml_topology_cpu_t *cpu = &ml_get_topology_info()->cpus[cpu_id];
734
735 os_atomic_dec(&cluster_type_num_active_cpus[cpu->cluster_type], relaxed);
736
737 os_atomic_dec(&machine_info.physical_cpu, relaxed);
738 os_atomic_dec(&machine_info.logical_cpu, relaxed);
739 }
740
741
742 unsigned int
ml_get_machine_mem(void)743 ml_get_machine_mem(void)
744 {
745 return machine_info.memory_size;
746 }
747
748 __attribute__((noreturn))
749 void
halt_all_cpus(boolean_t reboot)750 halt_all_cpus(boolean_t reboot)
751 {
752 if (reboot) {
753 printf("MACH Reboot\n");
754 PEHaltRestart(kPERestartCPU);
755 } else {
756 printf("CPU halted\n");
757 PEHaltRestart(kPEHaltCPU);
758 }
759 while (1) {
760 ;
761 }
762 }
763
764 __attribute__((noreturn))
765 void
halt_cpu(void)766 halt_cpu(void)
767 {
768 halt_all_cpus(FALSE);
769 }
770
771 /*
772 * Routine: machine_signal_idle
773 * Function:
774 */
775 void
machine_signal_idle(processor_t processor)776 machine_signal_idle(
777 processor_t processor)
778 {
779 cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL);
780 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
781 }
782
783 void
machine_signal_idle_deferred(processor_t processor)784 machine_signal_idle_deferred(
785 processor_t processor)
786 {
787 cpu_signal_deferred(processor_to_cpu_datap(processor));
788 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
789 }
790
791 void
machine_signal_idle_cancel(processor_t processor)792 machine_signal_idle_cancel(
793 processor_t processor)
794 {
795 cpu_signal_cancel(processor_to_cpu_datap(processor));
796 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
797 }
798
799 /*
800 * Routine: ml_install_interrupt_handler
801 * Function: Initialize Interrupt Handler
802 */
803 void
ml_install_interrupt_handler(void * nub,int source,void * target,IOInterruptHandler handler,void * refCon)804 ml_install_interrupt_handler(
805 void *nub,
806 int source,
807 void *target,
808 IOInterruptHandler handler,
809 void *refCon)
810 {
811 cpu_data_t *cpu_data_ptr;
812 boolean_t current_state;
813
814 current_state = ml_set_interrupts_enabled(FALSE);
815 cpu_data_ptr = getCpuDatap();
816
817 cpu_data_ptr->interrupt_nub = nub;
818 cpu_data_ptr->interrupt_source = source;
819 cpu_data_ptr->interrupt_target = target;
820 cpu_data_ptr->interrupt_handler = handler;
821 cpu_data_ptr->interrupt_refCon = refCon;
822
823 (void) ml_set_interrupts_enabled(current_state);
824 }
825
826 /*
827 * Routine: ml_init_interrupt
828 * Function: Initialize Interrupts
829 */
830 void
ml_init_interrupt(void)831 ml_init_interrupt(void)
832 {
833 #if defined(HAS_IPI)
834 /*
835 * ml_init_interrupt will get called once for each CPU, but this is redundant
836 * because there is only one global copy of the register for skye. do it only
837 * on the bootstrap cpu
838 */
839 if (getCpuDatap()->cluster_master) {
840 ml_cpu_signal_deferred_adjust_timer(deferred_ipi_timer_ns);
841 }
842 #endif
843 }
844
845 /*
846 * Routine: ml_init_timebase
847 * Function: register and setup Timebase, Decremeter services
848 */
849 void
ml_init_timebase(void * args,tbd_ops_t tbd_funcs,vm_offset_t int_address,vm_offset_t int_value __unused)850 ml_init_timebase(
851 void *args,
852 tbd_ops_t tbd_funcs,
853 vm_offset_t int_address,
854 vm_offset_t int_value __unused)
855 {
856 cpu_data_t *cpu_data_ptr;
857
858 cpu_data_ptr = (cpu_data_t *)args;
859
860 if ((cpu_data_ptr == &BootCpuData)
861 && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) {
862 rtclock_timebase_func = *tbd_funcs;
863 rtclock_timebase_addr = int_address;
864 }
865 }
866
867 #define ML_READPROP_MANDATORY UINT64_MAX
868
869 static uint64_t
ml_readprop(const DTEntry entry,const char * propertyName,uint64_t default_value)870 ml_readprop(const DTEntry entry, const char *propertyName, uint64_t default_value)
871 {
872 void const *prop;
873 unsigned int propSize;
874
875 if (SecureDTGetProperty(entry, propertyName, &prop, &propSize) == kSuccess) {
876 if (propSize == sizeof(uint8_t)) {
877 return *((uint8_t const *)prop);
878 } else if (propSize == sizeof(uint16_t)) {
879 return *((uint16_t const *)prop);
880 } else if (propSize == sizeof(uint32_t)) {
881 return *((uint32_t const *)prop);
882 } else if (propSize == sizeof(uint64_t)) {
883 return *((uint64_t const *)prop);
884 } else {
885 panic("CPU property '%s' has bad size %u", propertyName, propSize);
886 }
887 } else {
888 if (default_value == ML_READPROP_MANDATORY) {
889 panic("Missing mandatory property '%s'", propertyName);
890 }
891 return default_value;
892 }
893 }
894
895 static boolean_t
ml_read_reg_range(const DTEntry entry,const char * propertyName,uint64_t * pa_ptr,uint64_t * len_ptr)896 ml_read_reg_range(const DTEntry entry, const char *propertyName, uint64_t *pa_ptr, uint64_t *len_ptr)
897 {
898 uint64_t const *prop;
899 unsigned int propSize;
900
901 if (SecureDTGetProperty(entry, propertyName, (void const **)&prop, &propSize) != kSuccess) {
902 return FALSE;
903 }
904
905 if (propSize != sizeof(uint64_t) * 2) {
906 panic("Wrong property size for %s", propertyName);
907 }
908
909 *pa_ptr = prop[0];
910 *len_ptr = prop[1];
911 return TRUE;
912 }
913
914 static boolean_t
ml_is_boot_cpu(const DTEntry entry)915 ml_is_boot_cpu(const DTEntry entry)
916 {
917 void const *prop;
918 unsigned int propSize;
919
920 if (SecureDTGetProperty(entry, "state", &prop, &propSize) != kSuccess) {
921 panic("unable to retrieve state for cpu");
922 }
923
924 if (strncmp((char const *)prop, "running", propSize) == 0) {
925 return TRUE;
926 } else {
927 return FALSE;
928 }
929 }
930
931 static void
ml_read_chip_revision(unsigned int * rev __unused)932 ml_read_chip_revision(unsigned int *rev __unused)
933 {
934 // The CPU_VERSION_* macros are only defined on APPLE_ARM64_ARCH_FAMILY builds
935 #ifdef APPLE_ARM64_ARCH_FAMILY
936 DTEntry entryP;
937
938 if ((SecureDTFindEntry("name", "arm-io", &entryP) == kSuccess)) {
939 *rev = (unsigned int)ml_readprop(entryP, "chip-revision", CPU_VERSION_UNKNOWN);
940 } else {
941 *rev = CPU_VERSION_UNKNOWN;
942 }
943 #endif
944 }
945
946 void
ml_parse_cpu_topology(void)947 ml_parse_cpu_topology(void)
948 {
949 DTEntry entry, child __unused;
950 OpaqueDTEntryIterator iter;
951 uint32_t cpu_boot_arg = MAX_CPUS;
952 uint64_t cpumask_boot_arg = ULLONG_MAX;
953 int err;
954
955 int64_t cluster_phys_to_logical[MAX_CPU_CLUSTER_PHY_ID + 1];
956 int64_t cluster_max_cpu_phys_id[MAX_CPU_CLUSTER_PHY_ID + 1];
957 const boolean_t cpus_boot_arg_present = PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg));
958 const boolean_t cpumask_boot_arg_present = PE_parse_boot_argn("cpumask", &cpumask_boot_arg, sizeof(cpumask_boot_arg));
959
960 // The cpus=N and cpumask=N boot args cannot be used simultaneously. Flag this
961 // so that we trigger a panic later in the boot process, once serial is enabled.
962 if (cpus_boot_arg_present && cpumask_boot_arg_present) {
963 cpu_config_correct = false;
964 }
965
966 err = SecureDTLookupEntry(NULL, "/cpus", &entry);
967 assert(err == kSuccess);
968
969 err = SecureDTInitEntryIterator(entry, &iter);
970 assert(err == kSuccess);
971
972 for (int i = 0; i <= MAX_CPU_CLUSTER_PHY_ID; i++) {
973 cluster_offsets[i] = -1;
974 cluster_phys_to_logical[i] = -1;
975 cluster_max_cpu_phys_id[i] = 0;
976 }
977
978 while (kSuccess == SecureDTIterateEntries(&iter, &child)) {
979 boolean_t is_boot_cpu = ml_is_boot_cpu(child);
980 boolean_t cpu_enabled = cpumask_boot_arg & 1;
981 cpumask_boot_arg >>= 1;
982
983 // Boot CPU disabled in cpumask. Flag this so that we trigger a panic
984 // later in the boot process, once serial is enabled.
985 if (is_boot_cpu && !cpu_enabled) {
986 cpu_config_correct = false;
987 }
988
989 // Ignore this CPU if it has been disabled by the cpumask= boot-arg.
990 if (!is_boot_cpu && !cpu_enabled) {
991 continue;
992 }
993
994 // If the number of CPUs is constrained by the cpus= boot-arg, and the boot CPU hasn't
995 // been added to the topology struct yet, and we only have one slot left, then skip
996 // every other non-boot CPU in order to leave room for the boot CPU.
997 //
998 // e.g. if the boot-args say "cpus=3" and CPU4 is the boot CPU, then the cpus[]
999 // array will list CPU0, CPU1, and CPU4. CPU2-CPU3 and CPU5-CPUn will be omitted.
1000 if (topology_info.num_cpus >= (cpu_boot_arg - 1) && topology_info.boot_cpu == NULL && !is_boot_cpu) {
1001 continue;
1002 }
1003 if (topology_info.num_cpus >= cpu_boot_arg) {
1004 break;
1005 }
1006
1007 ml_topology_cpu_t *cpu = &topology_info.cpus[topology_info.num_cpus];
1008
1009 cpu->cpu_id = topology_info.num_cpus++;
1010 assert(cpu->cpu_id < MAX_CPUS);
1011 topology_info.max_cpu_id = MAX(topology_info.max_cpu_id, cpu->cpu_id);
1012
1013 cpu->die_id = 0;
1014 topology_info.max_die_id = 0;
1015
1016 cpu->phys_id = (uint32_t)ml_readprop(child, "reg", ML_READPROP_MANDATORY);
1017
1018 cpu->l2_access_penalty = (uint32_t)ml_readprop(child, "l2-access-penalty", 0);
1019 cpu->l2_cache_size = (uint32_t)ml_readprop(child, "l2-cache-size", 0);
1020 cpu->l2_cache_id = (uint32_t)ml_readprop(child, "l2-cache-id", 0);
1021 cpu->l3_cache_size = (uint32_t)ml_readprop(child, "l3-cache-size", 0);
1022 cpu->l3_cache_id = (uint32_t)ml_readprop(child, "l3-cache-id", 0);
1023
1024 ml_read_reg_range(child, "cpu-uttdbg-reg", &cpu->cpu_UTTDBG_pa, &cpu->cpu_UTTDBG_len);
1025 ml_read_reg_range(child, "cpu-impl-reg", &cpu->cpu_IMPL_pa, &cpu->cpu_IMPL_len);
1026 ml_read_reg_range(child, "coresight-reg", &cpu->coresight_pa, &cpu->coresight_len);
1027 cpu->cluster_type = CLUSTER_TYPE_SMP;
1028
1029 int cluster_type = (int)ml_readprop(child, "cluster-type", 0);
1030 if (cluster_type == 'E') {
1031 cpu->cluster_type = CLUSTER_TYPE_E;
1032 } else if (cluster_type == 'P') {
1033 cpu->cluster_type = CLUSTER_TYPE_P;
1034 }
1035
1036 topology_info.cluster_type_num_cpus[cpu->cluster_type]++;
1037
1038 /*
1039 * Since we want to keep a linear cluster ID space, we cannot just rely
1040 * on the value provided by EDT. Instead, use the MPIDR value to see if we have
1041 * seen this exact cluster before. If so, then reuse that cluster ID for this CPU.
1042 */
1043 #if HAS_CLUSTER
1044 uint32_t phys_cluster_id = MPIDR_CLUSTER_ID(cpu->phys_id);
1045 #else
1046 uint32_t phys_cluster_id = (cpu->cluster_type == CLUSTER_TYPE_P);
1047 #endif
1048 assert(phys_cluster_id <= MAX_CPU_CLUSTER_PHY_ID);
1049 cpu->cluster_id = ((cluster_phys_to_logical[phys_cluster_id] == -1) ?
1050 topology_info.num_clusters : cluster_phys_to_logical[phys_cluster_id]);
1051
1052 assert(cpu->cluster_id < MAX_CPU_CLUSTERS);
1053
1054 ml_topology_cluster_t *cluster = &topology_info.clusters[cpu->cluster_id];
1055 if (cluster->num_cpus == 0) {
1056 assert(topology_info.num_clusters < MAX_CPU_CLUSTERS);
1057
1058 topology_info.num_clusters++;
1059 topology_info.max_cluster_id = MAX(topology_info.max_cluster_id, cpu->cluster_id);
1060 topology_info.cluster_types |= (1 << cpu->cluster_type);
1061
1062 cluster->cluster_id = cpu->cluster_id;
1063 cluster->cluster_type = cpu->cluster_type;
1064 cluster->first_cpu_id = cpu->cpu_id;
1065 assert(cluster_phys_to_logical[phys_cluster_id] == -1);
1066 cluster_phys_to_logical[phys_cluster_id] = cpu->cluster_id;
1067
1068 topology_info.cluster_type_num_clusters[cluster->cluster_type]++;
1069
1070 // Since we don't have a per-cluster EDT node, this is repeated in each CPU node.
1071 // If we wind up with a bunch of these, we might want to create separate per-cluster
1072 // EDT nodes and have the CPU nodes reference them through a phandle.
1073 ml_read_reg_range(child, "acc-impl-reg", &cluster->acc_IMPL_pa, &cluster->acc_IMPL_len);
1074 ml_read_reg_range(child, "cpm-impl-reg", &cluster->cpm_IMPL_pa, &cluster->cpm_IMPL_len);
1075 }
1076
1077 #if HAS_CLUSTER
1078 if (MPIDR_CPU_ID(cpu->phys_id) > cluster_max_cpu_phys_id[phys_cluster_id]) {
1079 cluster_max_cpu_phys_id[phys_cluster_id] = MPIDR_CPU_ID(cpu->phys_id);
1080 }
1081 #endif
1082
1083 cpu->die_cluster_id = (int)ml_readprop(child, "die-cluster-id", MPIDR_CLUSTER_ID(cpu->phys_id));
1084 cpu->cluster_core_id = (int)ml_readprop(child, "cluster-core-id", MPIDR_CPU_ID(cpu->phys_id));
1085
1086 cluster->num_cpus++;
1087 cluster->cpu_mask |= 1ULL << cpu->cpu_id;
1088
1089 if (is_boot_cpu) {
1090 assert(topology_info.boot_cpu == NULL);
1091 topology_info.boot_cpu = cpu;
1092 topology_info.boot_cluster = cluster;
1093 }
1094
1095 }
1096
1097 #if HAS_CLUSTER
1098 /*
1099 * Build the cluster offset array, ensuring that the region reserved
1100 * for each physical cluster contains enough entries to be indexed
1101 * by the maximum physical CPU ID (AFF0) within the cluster.
1102 */
1103 unsigned int cur_cluster_offset = 0;
1104 for (int i = 0; i <= MAX_CPU_CLUSTER_PHY_ID; i++) {
1105 if (cluster_phys_to_logical[i] != -1) {
1106 cluster_offsets[i] = cur_cluster_offset;
1107 cur_cluster_offset += (cluster_max_cpu_phys_id[i] + 1);
1108 }
1109 }
1110 assert(cur_cluster_offset <= MAX_CPUS);
1111 #else
1112 /*
1113 * For H10, there are really 2 physical clusters, but they are not separated
1114 * into distinct ACCs. AFF1 therefore always reports 0, and AFF0 numbering
1115 * is linear across both clusters. For the purpose of MPIDR_EL1-based indexing,
1116 * treat H10 and earlier devices as though they contain a single cluster.
1117 */
1118 cluster_offsets[0] = 0;
1119 #endif
1120 assert(topology_info.boot_cpu != NULL);
1121 ml_read_chip_revision(&topology_info.chip_revision);
1122
1123
1124 /*
1125 * Set TPIDR_EL0 to indicate the correct cpu number & cluster id,
1126 * as we may not be booting from cpu 0. Userspace will consume
1127 * the current CPU number through this register. For non-boot
1128 * cores, this is done in start.s (start_cpu) using the per-cpu
1129 * data object.
1130 */
1131 ml_topology_cpu_t *boot_cpu = topology_info.boot_cpu;
1132 uint64_t tpidr_el0 = ((boot_cpu->cpu_id << MACHDEP_TPIDR_CPUNUM_SHIFT) & MACHDEP_TPIDR_CPUNUM_MASK) | \
1133 ((boot_cpu->cluster_id << MACHDEP_TPIDR_CLUSTERID_SHIFT) & MACHDEP_TPIDR_CLUSTERID_MASK);
1134 assert(((tpidr_el0 & MACHDEP_TPIDR_CPUNUM_MASK) >> MACHDEP_TPIDR_CPUNUM_SHIFT) == boot_cpu->cpu_id);
1135 assert(((tpidr_el0 & MACHDEP_TPIDR_CLUSTERID_MASK) >> MACHDEP_TPIDR_CLUSTERID_SHIFT) == boot_cpu->cluster_id);
1136 __builtin_arm_wsr64("TPIDR_EL0", tpidr_el0);
1137
1138 __builtin_arm_wsr64("TPIDRRO_EL0", 0);
1139 }
1140
1141 const ml_topology_info_t *
ml_get_topology_info(void)1142 ml_get_topology_info(void)
1143 {
1144 return &topology_info;
1145 }
1146
1147 void
ml_map_cpu_pio(void)1148 ml_map_cpu_pio(void)
1149 {
1150 unsigned int i;
1151
1152 for (i = 0; i < topology_info.num_cpus; i++) {
1153 ml_topology_cpu_t *cpu = &topology_info.cpus[i];
1154 if (cpu->cpu_IMPL_pa) {
1155 cpu->cpu_IMPL_regs = (vm_offset_t)ml_io_map(cpu->cpu_IMPL_pa, cpu->cpu_IMPL_len);
1156 cpu->coresight_regs = (vm_offset_t)ml_io_map(cpu->coresight_pa, cpu->coresight_len);
1157 }
1158 if (cpu->cpu_UTTDBG_pa) {
1159 cpu->cpu_UTTDBG_regs = (vm_offset_t)ml_io_map(cpu->cpu_UTTDBG_pa, cpu->cpu_UTTDBG_len);
1160 }
1161 }
1162
1163 for (i = 0; i < topology_info.num_clusters; i++) {
1164 ml_topology_cluster_t *cluster = &topology_info.clusters[i];
1165 if (cluster->acc_IMPL_pa) {
1166 cluster->acc_IMPL_regs = (vm_offset_t)ml_io_map(cluster->acc_IMPL_pa, cluster->acc_IMPL_len);
1167 }
1168 if (cluster->cpm_IMPL_pa) {
1169 cluster->cpm_IMPL_regs = (vm_offset_t)ml_io_map(cluster->cpm_IMPL_pa, cluster->cpm_IMPL_len);
1170 }
1171 }
1172 }
1173
1174 unsigned int
ml_get_cpu_count(void)1175 ml_get_cpu_count(void)
1176 {
1177 return topology_info.num_cpus;
1178 }
1179
1180 unsigned int
ml_get_cluster_count(void)1181 ml_get_cluster_count(void)
1182 {
1183 return topology_info.num_clusters;
1184 }
1185
1186 int
ml_get_boot_cpu_number(void)1187 ml_get_boot_cpu_number(void)
1188 {
1189 return topology_info.boot_cpu->cpu_id;
1190 }
1191
1192 cluster_type_t
ml_get_boot_cluster_type(void)1193 ml_get_boot_cluster_type(void)
1194 {
1195 return topology_info.boot_cluster->cluster_type;
1196 }
1197
1198 int
ml_get_cpu_number(uint32_t phys_id)1199 ml_get_cpu_number(uint32_t phys_id)
1200 {
1201 phys_id &= MPIDR_AFF1_MASK | MPIDR_AFF0_MASK;
1202
1203 for (unsigned i = 0; i < topology_info.num_cpus; i++) {
1204 if (topology_info.cpus[i].phys_id == phys_id) {
1205 return i;
1206 }
1207 }
1208
1209 return -1;
1210 }
1211
1212 int
ml_get_cluster_number(uint32_t phys_id)1213 ml_get_cluster_number(uint32_t phys_id)
1214 {
1215 int cpu_id = ml_get_cpu_number(phys_id);
1216 if (cpu_id < 0) {
1217 return -1;
1218 }
1219
1220 ml_topology_cpu_t *cpu = &topology_info.cpus[cpu_id];
1221
1222 return cpu->cluster_id;
1223 }
1224
1225 unsigned int
ml_get_cpu_number_local(void)1226 ml_get_cpu_number_local(void)
1227 {
1228 uint64_t mpidr_el1_value = 0;
1229 unsigned cpu_id;
1230
1231 /* We identify the CPU based on the constant bits of MPIDR_EL1. */
1232 MRS(mpidr_el1_value, "MPIDR_EL1");
1233 cpu_id = ml_get_cpu_number((uint32_t)mpidr_el1_value);
1234
1235 assert(cpu_id <= (unsigned int)ml_get_max_cpu_number());
1236
1237 return cpu_id;
1238 }
1239
1240 int
ml_get_cluster_number_local()1241 ml_get_cluster_number_local()
1242 {
1243 uint64_t mpidr_el1_value = 0;
1244 unsigned cluster_id;
1245
1246 /* We identify the cluster based on the constant bits of MPIDR_EL1. */
1247 MRS(mpidr_el1_value, "MPIDR_EL1");
1248 cluster_id = ml_get_cluster_number((uint32_t)mpidr_el1_value);
1249
1250 assert(cluster_id <= (unsigned int)ml_get_max_cluster_number());
1251
1252 return cluster_id;
1253 }
1254
1255 int
ml_get_max_cpu_number(void)1256 ml_get_max_cpu_number(void)
1257 {
1258 return topology_info.max_cpu_id;
1259 }
1260
1261 int
ml_get_max_cluster_number(void)1262 ml_get_max_cluster_number(void)
1263 {
1264 return topology_info.max_cluster_id;
1265 }
1266
1267 unsigned int
ml_get_first_cpu_id(unsigned int cluster_id)1268 ml_get_first_cpu_id(unsigned int cluster_id)
1269 {
1270 return topology_info.clusters[cluster_id].first_cpu_id;
1271 }
1272
1273 static_assert(MAX_CPUS <= 256, "MAX_CPUS must fit in _COMM_PAGE_CPU_TO_CLUSTER; Increase table size if needed");
1274
1275 void
ml_map_cpus_to_clusters(uint8_t * table)1276 ml_map_cpus_to_clusters(uint8_t *table)
1277 {
1278 for (uint16_t cpu_id = 0; cpu_id < topology_info.num_cpus; cpu_id++) {
1279 *(table + cpu_id) = (uint8_t)(topology_info.cpus[cpu_id].cluster_id);
1280 }
1281 }
1282
1283 /*
1284 * Return the die id of a cluster.
1285 */
1286 unsigned int
ml_get_die_id(unsigned int cluster_id)1287 ml_get_die_id(unsigned int cluster_id)
1288 {
1289 /*
1290 * The current implementation gets the die_id from the
1291 * first CPU of the cluster.
1292 * rdar://80917654 (Add the die_id field to the cluster topology info)
1293 */
1294 unsigned int first_cpu = ml_get_first_cpu_id(cluster_id);
1295 return topology_info.cpus[first_cpu].die_id;
1296 }
1297
1298 /*
1299 * Return the index of a cluster in its die.
1300 */
1301 unsigned int
ml_get_die_cluster_id(unsigned int cluster_id)1302 ml_get_die_cluster_id(unsigned int cluster_id)
1303 {
1304 /*
1305 * The current implementation gets the die_id from the
1306 * first CPU of the cluster.
1307 * rdar://80917654 (Add the die_id field to the cluster topology info)
1308 */
1309 unsigned int first_cpu = ml_get_first_cpu_id(cluster_id);
1310 return topology_info.cpus[first_cpu].die_cluster_id;
1311 }
1312
1313 /*
1314 * Return the highest die id of the system.
1315 */
1316 unsigned int
ml_get_max_die_id(void)1317 ml_get_max_die_id(void)
1318 {
1319 return topology_info.max_die_id;
1320 }
1321
1322 void
ml_lockdown_init()1323 ml_lockdown_init()
1324 {
1325 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
1326 rorgn_stash_range();
1327 #endif
1328 }
1329
1330 kern_return_t
ml_lockdown_handler_register(lockdown_handler_t f,void * this)1331 ml_lockdown_handler_register(lockdown_handler_t f, void *this)
1332 {
1333 if (!f) {
1334 return KERN_FAILURE;
1335 }
1336
1337 assert(lockdown_done);
1338 f(this); // XXX: f this whole function
1339
1340 return KERN_SUCCESS;
1341 }
1342
1343 static mcache_flush_function mcache_flush_func;
1344 static void* mcache_flush_service;
1345 kern_return_t
ml_mcache_flush_callback_register(mcache_flush_function func,void * service)1346 ml_mcache_flush_callback_register(mcache_flush_function func, void *service)
1347 {
1348 mcache_flush_service = service;
1349 mcache_flush_func = func;
1350
1351 return KERN_SUCCESS;
1352 }
1353
1354 kern_return_t
ml_mcache_flush(void)1355 ml_mcache_flush(void)
1356 {
1357 if (!mcache_flush_func) {
1358 panic("Cannot flush M$ with no flush callback registered");
1359
1360 return KERN_FAILURE;
1361 } else {
1362 return mcache_flush_func(mcache_flush_service);
1363 }
1364 }
1365
1366
1367 extern lck_mtx_t pset_create_lock;
1368
1369 kern_return_t
ml_processor_register(ml_processor_info_t * in_processor_info,processor_t * processor_out,ipi_handler_t * ipi_handler_out,perfmon_interrupt_handler_func * pmi_handler_out)1370 ml_processor_register(ml_processor_info_t *in_processor_info,
1371 processor_t *processor_out, ipi_handler_t *ipi_handler_out,
1372 perfmon_interrupt_handler_func *pmi_handler_out)
1373 {
1374 cpu_data_t *this_cpu_datap;
1375 processor_set_t pset;
1376 boolean_t is_boot_cpu;
1377 static unsigned int reg_cpu_count = 0;
1378
1379 if (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number()) {
1380 return KERN_FAILURE;
1381 }
1382
1383 if ((unsigned)OSIncrementAtomic((SInt32*)®_cpu_count) >= topology_info.num_cpus) {
1384 return KERN_FAILURE;
1385 }
1386
1387 if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) {
1388 is_boot_cpu = FALSE;
1389 this_cpu_datap = cpu_data_alloc(FALSE);
1390 cpu_data_init(this_cpu_datap);
1391 } else {
1392 this_cpu_datap = &BootCpuData;
1393 is_boot_cpu = TRUE;
1394 }
1395
1396 assert(in_processor_info->log_id <= (uint32_t)ml_get_max_cpu_number());
1397
1398 this_cpu_datap->cpu_id = in_processor_info->cpu_id;
1399
1400 if (!is_boot_cpu) {
1401 this_cpu_datap->cpu_number = (unsigned short)(in_processor_info->log_id);
1402
1403 if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) {
1404 goto processor_register_error;
1405 }
1406 assert((this_cpu_datap->cpu_number & MACHDEP_TPIDR_CPUNUM_MASK) == this_cpu_datap->cpu_number);
1407 }
1408
1409 this_cpu_datap->cpu_idle_notify = in_processor_info->processor_idle;
1410 this_cpu_datap->cpu_cache_dispatch = (cache_dispatch_t)in_processor_info->platform_cache_dispatch;
1411 nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency);
1412 this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr);
1413
1414 this_cpu_datap->idle_timer_notify = in_processor_info->idle_timer;
1415 this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon;
1416
1417 this_cpu_datap->platform_error_handler = in_processor_info->platform_error_handler;
1418 this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr;
1419 this_cpu_datap->cpu_phys_id = in_processor_info->phys_id;
1420 this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty;
1421
1422 this_cpu_datap->cpu_cluster_type = in_processor_info->cluster_type;
1423 this_cpu_datap->cpu_cluster_id = in_processor_info->cluster_id;
1424 this_cpu_datap->cpu_l2_id = in_processor_info->l2_cache_id;
1425 this_cpu_datap->cpu_l2_size = in_processor_info->l2_cache_size;
1426 this_cpu_datap->cpu_l3_id = in_processor_info->l3_cache_id;
1427 this_cpu_datap->cpu_l3_size = in_processor_info->l3_cache_size;
1428
1429 /*
1430 * Encode cpu_id, cluster_id to be stored in TPIDR_EL0 (see
1431 * cswitch.s:set_thread_registers, start.s:start_cpu) for consumption
1432 * by userspace.
1433 */
1434 this_cpu_datap->cpu_tpidr_el0 = ((this_cpu_datap->cpu_number << MACHDEP_TPIDR_CPUNUM_SHIFT) & MACHDEP_TPIDR_CPUNUM_MASK) | \
1435 ((this_cpu_datap->cpu_cluster_id << MACHDEP_TPIDR_CLUSTERID_SHIFT) & MACHDEP_TPIDR_CLUSTERID_MASK);
1436 assert(((this_cpu_datap->cpu_tpidr_el0 & MACHDEP_TPIDR_CPUNUM_MASK) >> MACHDEP_TPIDR_CPUNUM_SHIFT) == this_cpu_datap->cpu_number);
1437 assert(((this_cpu_datap->cpu_tpidr_el0 & MACHDEP_TPIDR_CLUSTERID_MASK) >> MACHDEP_TPIDR_CLUSTERID_SHIFT) == this_cpu_datap->cpu_cluster_id);
1438
1439 #if HAS_CLUSTER
1440 this_cpu_datap->cluster_master = !OSTestAndSet(this_cpu_datap->cpu_cluster_id, &cluster_initialized);
1441 #else /* HAS_CLUSTER */
1442 this_cpu_datap->cluster_master = is_boot_cpu;
1443 #endif /* HAS_CLUSTER */
1444 lck_mtx_lock(&pset_create_lock);
1445 pset = pset_find(in_processor_info->cluster_id, NULL);
1446 kprintf("[%d]%s>pset_find(cluster_id=%d) returned pset %d\n", current_processor()->cpu_id, __FUNCTION__, in_processor_info->cluster_id, pset ? pset->pset_id : -1);
1447 if (pset == NULL) {
1448 #if __AMP__
1449 pset_cluster_type_t pset_cluster_type = this_cpu_datap->cpu_cluster_type == CLUSTER_TYPE_E ? PSET_AMP_E : PSET_AMP_P;
1450 pset = pset_create(ml_get_boot_cluster_type() == this_cpu_datap->cpu_cluster_type ? &pset_node0 : &pset_node1, pset_cluster_type, this_cpu_datap->cpu_cluster_id, this_cpu_datap->cpu_cluster_id);
1451 assert(pset != PROCESSOR_SET_NULL);
1452 kprintf("[%d]%s>pset_create(cluster_id=%d) returned pset %d\n", current_processor()->cpu_id, __FUNCTION__, this_cpu_datap->cpu_cluster_id, pset->pset_id);
1453 #else /* __AMP__ */
1454 pset_cluster_type_t pset_cluster_type = PSET_SMP;
1455 pset = pset_create(&pset_node0, pset_cluster_type, this_cpu_datap->cpu_cluster_id, this_cpu_datap->cpu_cluster_id);
1456 assert(pset != PROCESSOR_SET_NULL);
1457 #endif /* __AMP__ */
1458 }
1459 kprintf("[%d]%s>cpu_id %p cluster_id %d cpu_number %d is type %d\n", current_processor()->cpu_id, __FUNCTION__, in_processor_info->cpu_id, in_processor_info->cluster_id, this_cpu_datap->cpu_number, in_processor_info->cluster_type);
1460 lck_mtx_unlock(&pset_create_lock);
1461
1462 processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, this_cpu_datap);
1463 if (!is_boot_cpu) {
1464 processor_init(processor, this_cpu_datap->cpu_number, pset);
1465
1466 if (this_cpu_datap->cpu_l2_access_penalty) {
1467 /*
1468 * Cores that have a non-zero L2 access penalty compared
1469 * to the boot processor should be de-prioritized by the
1470 * scheduler, so that threads use the cores with better L2
1471 * preferentially.
1472 */
1473 processor_set_primary(processor, master_processor);
1474 }
1475 }
1476
1477 *processor_out = processor;
1478 *ipi_handler_out = cpu_signal_handler;
1479 #if CPMU_AIC_PMI && MONOTONIC
1480 *pmi_handler_out = mt_cpmu_aic_pmi;
1481 #else
1482 *pmi_handler_out = NULL;
1483 #endif /* CPMU_AIC_PMI && MONOTONIC */
1484 if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) {
1485 *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle;
1486 }
1487
1488 #if KPC
1489 if (kpc_register_cpu(this_cpu_datap) != TRUE) {
1490 goto processor_register_error;
1491 }
1492 #endif /* KPC */
1493
1494 if (!is_boot_cpu) {
1495 random_cpu_init(this_cpu_datap->cpu_number);
1496 // now let next CPU register itself
1497 OSIncrementAtomic((SInt32*)&real_ncpus);
1498 }
1499
1500 return KERN_SUCCESS;
1501
1502 processor_register_error:
1503 #if KPC
1504 kpc_unregister_cpu(this_cpu_datap);
1505 #endif /* KPC */
1506 if (!is_boot_cpu) {
1507 cpu_data_free(this_cpu_datap);
1508 }
1509
1510 return KERN_FAILURE;
1511 }
1512
1513 void
ml_init_arm_debug_interface(void * in_cpu_datap,vm_offset_t virt_address)1514 ml_init_arm_debug_interface(
1515 void * in_cpu_datap,
1516 vm_offset_t virt_address)
1517 {
1518 ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address;
1519 do_debugid();
1520 }
1521
1522 /*
1523 * Routine: init_ast_check
1524 * Function:
1525 */
1526 void
init_ast_check(__unused processor_t processor)1527 init_ast_check(
1528 __unused processor_t processor)
1529 {
1530 }
1531
1532 /*
1533 * Routine: cause_ast_check
1534 * Function:
1535 */
1536 void
cause_ast_check(processor_t processor)1537 cause_ast_check(
1538 processor_t processor)
1539 {
1540 if (current_processor() != processor) {
1541 cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL);
1542 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0);
1543 }
1544 }
1545
1546 extern uint32_t cpu_idle_count;
1547
1548 void
ml_get_power_state(boolean_t * icp,boolean_t * pidlep)1549 ml_get_power_state(boolean_t *icp, boolean_t *pidlep)
1550 {
1551 *icp = ml_at_interrupt_context();
1552 *pidlep = (cpu_idle_count == real_ncpus);
1553 }
1554
1555 /*
1556 * Routine: ml_cause_interrupt
1557 * Function: Generate a fake interrupt
1558 */
1559 void
ml_cause_interrupt(void)1560 ml_cause_interrupt(void)
1561 {
1562 return; /* BS_XXX */
1563 }
1564
1565 /* Map memory map IO space */
1566 vm_offset_t
ml_io_map(vm_offset_t phys_addr,vm_size_t size)1567 ml_io_map(
1568 vm_offset_t phys_addr,
1569 vm_size_t size)
1570 {
1571 return io_map(phys_addr, size, VM_WIMG_IO, VM_PROT_DEFAULT, false);
1572 }
1573
1574 /* Map memory map IO space (with protections specified) */
1575 vm_offset_t
ml_io_map_with_prot(vm_offset_t phys_addr,vm_size_t size,vm_prot_t prot)1576 ml_io_map_with_prot(
1577 vm_offset_t phys_addr,
1578 vm_size_t size,
1579 vm_prot_t prot)
1580 {
1581 return io_map(phys_addr, size, VM_WIMG_IO, prot, false);
1582 }
1583
1584 vm_offset_t
ml_io_map_unmappable(vm_offset_t phys_addr,vm_size_t size,unsigned int flags)1585 ml_io_map_unmappable(
1586 vm_offset_t phys_addr,
1587 vm_size_t size,
1588 unsigned int flags)
1589 {
1590 return io_map(phys_addr, size, flags, VM_PROT_DEFAULT, true);
1591 }
1592
1593 vm_offset_t
ml_io_map_wcomb(vm_offset_t phys_addr,vm_size_t size)1594 ml_io_map_wcomb(
1595 vm_offset_t phys_addr,
1596 vm_size_t size)
1597 {
1598 return io_map(phys_addr, size, VM_WIMG_WCOMB, VM_PROT_DEFAULT, false);
1599 }
1600
1601 void
ml_io_unmap(vm_offset_t addr,vm_size_t sz)1602 ml_io_unmap(vm_offset_t addr, vm_size_t sz)
1603 {
1604 pmap_remove(kernel_pmap, addr, addr + sz);
1605 kmem_free(kernel_map, addr, sz);
1606 }
1607
1608 vm_map_address_t
ml_map_high_window(vm_offset_t phys_addr,vm_size_t len)1609 ml_map_high_window(
1610 vm_offset_t phys_addr,
1611 vm_size_t len)
1612 {
1613 return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE);
1614 }
1615
1616 vm_offset_t
ml_static_ptovirt(vm_offset_t paddr)1617 ml_static_ptovirt(
1618 vm_offset_t paddr)
1619 {
1620 return phystokv(paddr);
1621 }
1622
1623 vm_offset_t
ml_static_slide(vm_offset_t vaddr)1624 ml_static_slide(
1625 vm_offset_t vaddr)
1626 {
1627 vm_offset_t slid_vaddr = 0;
1628
1629 {
1630 slid_vaddr = vaddr + vm_kernel_slide;
1631 }
1632
1633 if (!VM_KERNEL_IS_SLID(slid_vaddr)) {
1634 /* This is only intended for use on static kernel addresses. */
1635 return 0;
1636 }
1637
1638 return slid_vaddr;
1639 }
1640
1641 vm_offset_t
ml_static_unslide(vm_offset_t vaddr)1642 ml_static_unslide(
1643 vm_offset_t vaddr)
1644 {
1645 if (!VM_KERNEL_IS_SLID(vaddr)) {
1646 /* This is only intended for use on static kernel addresses. */
1647 return 0;
1648 }
1649
1650
1651 return vaddr - vm_kernel_slide;
1652 }
1653
1654 extern tt_entry_t *arm_kva_to_tte(vm_offset_t va);
1655
1656 kern_return_t
ml_static_protect(vm_offset_t vaddr,vm_size_t size,vm_prot_t new_prot __unused)1657 ml_static_protect(
1658 vm_offset_t vaddr, /* kernel virtual address */
1659 vm_size_t size,
1660 vm_prot_t new_prot __unused)
1661 {
1662 pt_entry_t arm_prot = 0;
1663 pt_entry_t arm_block_prot = 0;
1664 vm_offset_t vaddr_cur;
1665 ppnum_t ppn;
1666 kern_return_t result = KERN_SUCCESS;
1667
1668 if (vaddr < physmap_base) {
1669 panic("ml_static_protect(): %p < %p", (void *) vaddr, (void *) physmap_base);
1670 return KERN_FAILURE;
1671 }
1672
1673 assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
1674
1675 if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) {
1676 panic("ml_static_protect(): WX request on %p", (void *) vaddr);
1677 }
1678 if (lockdown_done && (new_prot & VM_PROT_EXECUTE)) {
1679 panic("ml_static_protect(): attempt to inject executable mapping on %p", (void *) vaddr);
1680 }
1681
1682 /* Set up the protection bits, and block bits so we can validate block mappings. */
1683 if (new_prot & VM_PROT_WRITE) {
1684 arm_prot |= ARM_PTE_AP(AP_RWNA);
1685 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA);
1686 } else {
1687 arm_prot |= ARM_PTE_AP(AP_RONA);
1688 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA);
1689 }
1690
1691 arm_prot |= ARM_PTE_NX;
1692 arm_block_prot |= ARM_TTE_BLOCK_NX;
1693
1694 if (!(new_prot & VM_PROT_EXECUTE)) {
1695 arm_prot |= ARM_PTE_PNX;
1696 arm_block_prot |= ARM_TTE_BLOCK_PNX;
1697 }
1698
1699 for (vaddr_cur = vaddr;
1700 vaddr_cur < trunc_page_64(vaddr + size);
1701 vaddr_cur += PAGE_SIZE) {
1702 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
1703 if (ppn != (vm_offset_t) NULL) {
1704 tt_entry_t *tte2;
1705 pt_entry_t *pte_p;
1706 pt_entry_t ptmp;
1707
1708 #if XNU_MONITOR
1709 assert(!pmap_is_monitor(ppn));
1710 assert(!TEST_PAGE_RATIO_4);
1711 #endif
1712
1713 tte2 = arm_kva_to_tte(vaddr_cur);
1714
1715 if (((*tte2) & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) {
1716 if ((((*tte2) & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) &&
1717 ((*tte2 & (ARM_TTE_BLOCK_NXMASK | ARM_TTE_BLOCK_PNXMASK | ARM_TTE_BLOCK_APMASK)) == arm_block_prot)) {
1718 /*
1719 * We can support ml_static_protect on a block mapping if the mapping already has
1720 * the desired protections. We still want to run checks on a per-page basis.
1721 */
1722 continue;
1723 }
1724
1725 result = KERN_FAILURE;
1726 break;
1727 }
1728
1729 pte_p = (pt_entry_t *)&((tt_entry_t*)(phystokv((*tte2) & ARM_TTE_TABLE_MASK)))[(((vaddr_cur) & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT)];
1730 ptmp = *pte_p;
1731
1732 if ((ptmp & ARM_PTE_HINT_MASK) && ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot)) {
1733 /*
1734 * The contiguous hint is similar to a block mapping for ml_static_protect; if the existing
1735 * protections do not match the desired protections, then we will fail (as we cannot update
1736 * this mapping without updating other mappings as well).
1737 */
1738 result = KERN_FAILURE;
1739 break;
1740 }
1741
1742 __unreachable_ok_push
1743 if (TEST_PAGE_RATIO_4) {
1744 {
1745 unsigned int i;
1746 pt_entry_t *ptep_iter;
1747
1748 ptep_iter = pte_p;
1749 for (i = 0; i < 4; i++, ptep_iter++) {
1750 /* Note that there is a hole in the HINT sanity checking here. */
1751 ptmp = *ptep_iter;
1752
1753 /* We only need to update the page tables if the protections do not match. */
1754 if ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot) {
1755 ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) | arm_prot;
1756 *ptep_iter = ptmp;
1757 }
1758 }
1759 }
1760 } else {
1761 ptmp = *pte_p;
1762 /* We only need to update the page tables if the protections do not match. */
1763 if ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot) {
1764 ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) | arm_prot;
1765 *pte_p = ptmp;
1766 }
1767 }
1768 __unreachable_ok_pop
1769 }
1770 }
1771
1772 if (vaddr_cur > vaddr) {
1773 assert(((vaddr_cur - vaddr) & 0xFFFFFFFF00000000ULL) == 0);
1774 flush_mmu_tlb_region(vaddr, (uint32_t)(vaddr_cur - vaddr));
1775 }
1776
1777
1778 return result;
1779 }
1780
1781
1782 /*
1783 * Routine: ml_static_mfree
1784 * Function:
1785 */
1786 void
ml_static_mfree(vm_offset_t vaddr,vm_size_t size)1787 ml_static_mfree(
1788 vm_offset_t vaddr,
1789 vm_size_t size)
1790 {
1791 vm_offset_t vaddr_cur;
1792 vm_offset_t paddr_cur;
1793 ppnum_t ppn;
1794 uint32_t freed_pages = 0;
1795 uint32_t freed_kernelcache_pages = 0;
1796
1797
1798 /* It is acceptable (if bad) to fail to free. */
1799 if (vaddr < physmap_base) {
1800 return;
1801 }
1802
1803 assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
1804
1805 for (vaddr_cur = vaddr;
1806 vaddr_cur < trunc_page_64(vaddr + size);
1807 vaddr_cur += PAGE_SIZE) {
1808 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
1809 if (ppn != (vm_offset_t) NULL) {
1810 /*
1811 * It is not acceptable to fail to update the protections on a page
1812 * we will release to the VM. We need to either panic or continue.
1813 * For now, we'll panic (to help flag if there is memory we can
1814 * reclaim).
1815 */
1816 if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) {
1817 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur);
1818 }
1819
1820 paddr_cur = ptoa(ppn);
1821
1822
1823 vm_page_create(ppn, (ppn + 1));
1824 freed_pages++;
1825 if (paddr_cur >= arm_vm_kernelcache_phys_start && paddr_cur < arm_vm_kernelcache_phys_end) {
1826 freed_kernelcache_pages++;
1827 }
1828 }
1829 }
1830 vm_page_lockspin_queues();
1831 vm_page_wire_count -= freed_pages;
1832 vm_page_wire_count_initial -= freed_pages;
1833 vm_page_kernelcache_count -= freed_kernelcache_pages;
1834 vm_page_unlock_queues();
1835 #if DEBUG
1836 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x, +%d bad\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn, bad_page_cnt);
1837 #endif
1838 }
1839
1840 /*
1841 * Routine: ml_page_protection_type
1842 * Function: Returns the type of page protection that the system supports.
1843 */
1844 ml_page_protection_t
ml_page_protection_type(void)1845 ml_page_protection_type(void)
1846 {
1847 #if XNU_MONITOR
1848 return 1;
1849 #else
1850 return 0;
1851 #endif
1852 }
1853
1854 /* virtual to physical on wired pages */
1855 vm_offset_t
ml_vtophys(vm_offset_t vaddr)1856 ml_vtophys(vm_offset_t vaddr)
1857 {
1858 return kvtophys(vaddr);
1859 }
1860
1861 /*
1862 * Routine: ml_nofault_copy
1863 * Function: Perform a physical mode copy if the source and destination have
1864 * valid translations in the kernel pmap. If translations are present, they are
1865 * assumed to be wired; e.g., no attempt is made to guarantee that the
1866 * translations obtained remain valid for the duration of the copy process.
1867 */
1868 vm_size_t
ml_nofault_copy(vm_offset_t virtsrc,vm_offset_t virtdst,vm_size_t size)1869 ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
1870 {
1871 addr64_t cur_phys_dst, cur_phys_src;
1872 vm_size_t count, nbytes = 0;
1873
1874 while (size > 0) {
1875 if (!(cur_phys_src = kvtophys(virtsrc))) {
1876 break;
1877 }
1878 if (!(cur_phys_dst = kvtophys(virtdst))) {
1879 break;
1880 }
1881 if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) ||
1882 !pmap_valid_address(trunc_page_64(cur_phys_src))) {
1883 break;
1884 }
1885 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
1886 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) {
1887 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
1888 }
1889 if (count > size) {
1890 count = size;
1891 }
1892
1893 bcopy_phys(cur_phys_src, cur_phys_dst, count);
1894
1895 nbytes += count;
1896 virtsrc += count;
1897 virtdst += count;
1898 size -= count;
1899 }
1900
1901 return nbytes;
1902 }
1903
1904 /*
1905 * Routine: ml_validate_nofault
1906 * Function: Validate that ths address range has a valid translations
1907 * in the kernel pmap. If translations are present, they are
1908 * assumed to be wired; i.e. no attempt is made to guarantee
1909 * that the translation persist after the check.
1910 * Returns: TRUE if the range is mapped and will not cause a fault,
1911 * FALSE otherwise.
1912 */
1913
1914 boolean_t
ml_validate_nofault(vm_offset_t virtsrc,vm_size_t size)1915 ml_validate_nofault(
1916 vm_offset_t virtsrc, vm_size_t size)
1917 {
1918 addr64_t cur_phys_src;
1919 uint32_t count;
1920
1921 while (size > 0) {
1922 if (!(cur_phys_src = kvtophys(virtsrc))) {
1923 return FALSE;
1924 }
1925 if (!pmap_valid_address(trunc_page_64(cur_phys_src))) {
1926 return FALSE;
1927 }
1928 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
1929 if (count > size) {
1930 count = (uint32_t)size;
1931 }
1932
1933 virtsrc += count;
1934 size -= count;
1935 }
1936
1937 return TRUE;
1938 }
1939
1940 void
ml_get_bouncepool_info(vm_offset_t * phys_addr,vm_size_t * size)1941 ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size)
1942 {
1943 *phys_addr = 0;
1944 *size = 0;
1945 }
1946
1947 void
active_rt_threads(__unused boolean_t active)1948 active_rt_threads(__unused boolean_t active)
1949 {
1950 }
1951
1952 static void
cpu_qos_cb_default(__unused int urgency,__unused uint64_t qos_param1,__unused uint64_t qos_param2)1953 cpu_qos_cb_default(__unused int urgency, __unused uint64_t qos_param1, __unused uint64_t qos_param2)
1954 {
1955 return;
1956 }
1957
1958 cpu_qos_update_t cpu_qos_update = cpu_qos_cb_default;
1959
1960 void
cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb)1961 cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb)
1962 {
1963 if (cpu_qos_cb != NULL) {
1964 cpu_qos_update = cpu_qos_cb;
1965 } else {
1966 cpu_qos_update = cpu_qos_cb_default;
1967 }
1968 }
1969
1970 void
thread_tell_urgency(thread_urgency_t urgency,uint64_t rt_period,uint64_t rt_deadline,uint64_t sched_latency __unused,__unused thread_t nthread)1971 thread_tell_urgency(thread_urgency_t urgency, uint64_t rt_period, uint64_t rt_deadline, uint64_t sched_latency __unused, __unused thread_t nthread)
1972 {
1973 SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, rt_deadline, sched_latency, 0);
1974
1975 cpu_qos_update((int)urgency, rt_period, rt_deadline);
1976
1977 SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, rt_deadline, 0, 0);
1978 }
1979
1980 void
machine_run_count(__unused uint32_t count)1981 machine_run_count(__unused uint32_t count)
1982 {
1983 }
1984
1985 processor_t
machine_choose_processor(__unused processor_set_t pset,processor_t processor)1986 machine_choose_processor(__unused processor_set_t pset, processor_t processor)
1987 {
1988 return processor;
1989 }
1990
1991 #if KASAN
1992 vm_offset_t ml_stack_base(void);
1993 vm_size_t ml_stack_size(void);
1994
1995 vm_offset_t
ml_stack_base(void)1996 ml_stack_base(void)
1997 {
1998 uintptr_t local = (uintptr_t) &local;
1999 vm_offset_t intstack_top_ptr;
2000
2001 intstack_top_ptr = getCpuDatap()->intstack_top;
2002 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
2003 return intstack_top_ptr - INTSTACK_SIZE;
2004 } else {
2005 return current_thread()->kernel_stack;
2006 }
2007 }
2008 vm_size_t
ml_stack_size(void)2009 ml_stack_size(void)
2010 {
2011 uintptr_t local = (uintptr_t) &local;
2012 vm_offset_t intstack_top_ptr;
2013
2014 intstack_top_ptr = getCpuDatap()->intstack_top;
2015 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
2016 return INTSTACK_SIZE;
2017 } else {
2018 return kernel_stack_size;
2019 }
2020 }
2021 #endif
2022
2023 #ifdef CONFIG_KCOV
2024
2025 kcov_cpu_data_t *
current_kcov_data(void)2026 current_kcov_data(void)
2027 {
2028 return ¤t_cpu_datap()->cpu_kcov_data;
2029 }
2030
2031 kcov_cpu_data_t *
cpu_kcov_data(int cpuid)2032 cpu_kcov_data(int cpuid)
2033 {
2034 return &cpu_datap(cpuid)->cpu_kcov_data;
2035 }
2036
2037 #endif /* CONFIG_KCOV */
2038
2039 boolean_t
machine_timeout_suspended(void)2040 machine_timeout_suspended(void)
2041 {
2042 return FALSE;
2043 }
2044
2045 kern_return_t
ml_interrupt_prewarm(__unused uint64_t deadline)2046 ml_interrupt_prewarm(__unused uint64_t deadline)
2047 {
2048 return KERN_FAILURE;
2049 }
2050
2051 /*
2052 * Assumes fiq, irq disabled.
2053 */
2054 void
ml_set_decrementer(uint32_t dec_value)2055 ml_set_decrementer(uint32_t dec_value)
2056 {
2057 cpu_data_t *cdp = getCpuDatap();
2058
2059 assert(ml_get_interrupts_enabled() == FALSE);
2060 cdp->cpu_decrementer = dec_value;
2061
2062 if (cdp->cpu_set_decrementer_func) {
2063 cdp->cpu_set_decrementer_func(dec_value);
2064 } else {
2065 __builtin_arm_wsr64("CNTV_TVAL_EL0", (uint64_t)dec_value);
2066 }
2067 }
2068
2069 /**
2070 * Perform a read of the timebase which is permitted to be executed
2071 * speculatively and/or out of program order.
2072 */
2073 static inline uint64_t
speculative_timebase(void)2074 speculative_timebase(void)
2075 {
2076 return __builtin_arm_rsr64("CNTVCT_EL0");
2077 }
2078
2079 /**
2080 * Read a non-speculative view of the timebase if one is available,
2081 * otherwise fallback on an ISB to prevent prevent speculation and
2082 * enforce ordering.
2083 */
2084 static inline uint64_t
nonspeculative_timebase(void)2085 nonspeculative_timebase(void)
2086 {
2087 #if defined(HAS_ACNTVCT)
2088 return __builtin_arm_rsr64("ACNTVCT_EL0");
2089 #elif __ARM_ARCH_8_6__
2090 return __builtin_arm_rsr64("CNTVCTSS_EL0");
2091 #else
2092 // ISB required by ARMV7C.b section B8.1.2 & ARMv8 section D6.1.2
2093 // "Reads of CNT[PV]CT[_EL0] can occur speculatively and out of order relative
2094 // to other instructions executed on the same processor."
2095 __builtin_arm_isb(ISB_SY);
2096 return speculative_timebase();
2097 #endif
2098 }
2099
2100
2101 uint64_t
ml_get_hwclock()2102 ml_get_hwclock()
2103 {
2104 uint64_t timebase = nonspeculative_timebase();
2105 return timebase;
2106 }
2107
2108 uint64_t
ml_get_timebase()2109 ml_get_timebase()
2110 {
2111 uint64_t clock, timebase;
2112
2113 //the retry is for the case where S2R catches us in the middle of this. see rdar://77019633
2114 do {
2115 timebase = getCpuDatap()->cpu_base_timebase;
2116 os_compiler_barrier();
2117 clock = ml_get_hwclock();
2118 os_compiler_barrier();
2119 } while (getCpuDatap()->cpu_base_timebase != timebase);
2120
2121 return clock + timebase;
2122 }
2123
2124 /**
2125 * Issue a barrier that guarantees all prior memory accesses will complete
2126 * before any subsequent timebase reads.
2127 */
2128 void
ml_memory_to_timebase_fence(void)2129 ml_memory_to_timebase_fence(void)
2130 {
2131 __builtin_arm_dmb(DMB_SY);
2132 const uint64_t take_backwards_branch = 0;
2133 asm volatile (
2134 "1:"
2135 "ldr x0, [%[take_backwards_branch]]" "\n"
2136 "cbnz x0, 1b" "\n"
2137 :
2138 : [take_backwards_branch] "r"(&take_backwards_branch)
2139 : "x0"
2140 );
2141
2142 /* throwaway read to prevent ml_get_speculative_timebase() reordering */
2143 (void)ml_get_hwclock();
2144 }
2145
2146 /**
2147 * Issue a barrier that guarantees all prior timebase reads will
2148 * be ordered before any subsequent memory accesses.
2149 */
2150 void
ml_timebase_to_memory_fence(void)2151 ml_timebase_to_memory_fence(void)
2152 {
2153 __builtin_arm_isb(ISB_SY);
2154 }
2155
2156 /*
2157 * Get the speculative timebase without an ISB.
2158 */
2159 uint64_t
ml_get_speculative_timebase(void)2160 ml_get_speculative_timebase(void)
2161 {
2162 uint64_t clock, timebase;
2163
2164 //the retry is for the case where S2R catches us in the middle of this. see rdar://77019633&77697482
2165 do {
2166 timebase = getCpuDatap()->cpu_base_timebase;
2167 os_compiler_barrier();
2168 clock = speculative_timebase();
2169
2170 os_compiler_barrier();
2171 } while (getCpuDatap()->cpu_base_timebase != timebase);
2172
2173 return clock + timebase;
2174 }
2175
2176 uint64_t
ml_get_timebase_entropy(void)2177 ml_get_timebase_entropy(void)
2178 {
2179 return ml_get_speculative_timebase();
2180 }
2181
2182 uint32_t
ml_get_decrementer(void)2183 ml_get_decrementer(void)
2184 {
2185 cpu_data_t *cdp = getCpuDatap();
2186 uint32_t dec;
2187
2188 assert(ml_get_interrupts_enabled() == FALSE);
2189
2190 if (cdp->cpu_get_decrementer_func) {
2191 dec = cdp->cpu_get_decrementer_func();
2192 } else {
2193 uint64_t wide_val;
2194
2195 wide_val = __builtin_arm_rsr64("CNTV_TVAL_EL0");
2196 dec = (uint32_t)wide_val;
2197 assert(wide_val == (uint64_t)dec);
2198 }
2199
2200 return dec;
2201 }
2202
2203 boolean_t
ml_get_timer_pending(void)2204 ml_get_timer_pending(void)
2205 {
2206 uint64_t cntv_ctl = __builtin_arm_rsr64("CNTV_CTL_EL0");
2207 return ((cntv_ctl & CNTV_CTL_EL0_ISTATUS) != 0) ? TRUE : FALSE;
2208 }
2209
2210 __attribute__((noreturn))
2211 void
platform_syscall(arm_saved_state_t * state)2212 platform_syscall(arm_saved_state_t *state)
2213 {
2214 uint32_t code;
2215
2216 #define platform_syscall_kprintf(x...) /* kprintf("platform_syscall: " x) */
2217
2218 code = (uint32_t)get_saved_state_reg(state, 3);
2219
2220 KDBG(MACHDBG_CODE(DBG_MACH_MACHDEP_EXCP_SC_ARM, code) | DBG_FUNC_START,
2221 get_saved_state_reg(state, 0),
2222 get_saved_state_reg(state, 1),
2223 get_saved_state_reg(state, 2));
2224
2225 switch (code) {
2226 case 2:
2227 /* set cthread */
2228 platform_syscall_kprintf("set cthread self.\n");
2229 thread_set_cthread_self(get_saved_state_reg(state, 0));
2230 break;
2231 case 3:
2232 /* get cthread */
2233 platform_syscall_kprintf("get cthread self.\n");
2234 set_saved_state_reg(state, 0, thread_get_cthread_self());
2235 break;
2236 case 0: /* I-Cache flush (removed) */
2237 case 1: /* D-Cache flush (removed) */
2238 default:
2239 platform_syscall_kprintf("unknown: %d\n", code);
2240 break;
2241 }
2242
2243 KDBG(MACHDBG_CODE(DBG_MACH_MACHDEP_EXCP_SC_ARM, code) | DBG_FUNC_END,
2244 get_saved_state_reg(state, 0));
2245
2246 thread_exception_return();
2247 }
2248
2249 static void
_enable_timebase_event_stream(uint32_t bit_index)2250 _enable_timebase_event_stream(uint32_t bit_index)
2251 {
2252 uint64_t cntkctl; /* One wants to use 32 bits, but "mrs" prefers it this way */
2253
2254 if (bit_index >= 64) {
2255 panic("%s: invalid bit index (%u)", __FUNCTION__, bit_index);
2256 }
2257
2258 __asm__ volatile ("mrs %0, CNTKCTL_EL1" : "=r"(cntkctl));
2259
2260 cntkctl |= (bit_index << CNTKCTL_EL1_EVENTI_SHIFT);
2261 cntkctl |= CNTKCTL_EL1_EVNTEN;
2262 cntkctl |= CNTKCTL_EL1_EVENTDIR; /* 1->0; why not? */
2263
2264 /*
2265 * If the SOC supports it (and it isn't broken), enable
2266 * EL0 access to the timebase registers.
2267 */
2268 if (user_timebase_type() != USER_TIMEBASE_NONE) {
2269 cntkctl |= (CNTKCTL_EL1_PL0PCTEN | CNTKCTL_EL1_PL0VCTEN);
2270 }
2271
2272 __builtin_arm_wsr64("CNTKCTL_EL1", cntkctl);
2273 }
2274
2275 /*
2276 * Turn timer on, unmask that interrupt.
2277 */
2278 static void
_enable_virtual_timer(void)2279 _enable_virtual_timer(void)
2280 {
2281 uint64_t cntvctl = CNTV_CTL_EL0_ENABLE; /* One wants to use 32 bits, but "mrs" prefers it this way */
2282
2283 __builtin_arm_wsr64("CNTV_CTL_EL0", cntvctl);
2284 /* disable the physical timer as a precaution, as its registers reset to architecturally unknown values */
2285 __builtin_arm_wsr64("CNTP_CTL_EL0", CNTP_CTL_EL0_IMASKED);
2286 }
2287
2288 void
fiq_context_init(boolean_t enable_fiq __unused)2289 fiq_context_init(boolean_t enable_fiq __unused)
2290 {
2291 /* Interrupts still disabled. */
2292 assert(ml_get_interrupts_enabled() == FALSE);
2293 _enable_virtual_timer();
2294 }
2295
2296 void
wfe_timeout_init(void)2297 wfe_timeout_init(void)
2298 {
2299 _enable_timebase_event_stream(arm64_eventi);
2300 }
2301
2302 /**
2303 * Configures, but does not enable, the WFE event stream. The event stream
2304 * generates an event at a set interval to act as a timeout for WFEs.
2305 *
2306 * This function sets the static global variable arm64_eventi to be the proper
2307 * bit index for the CNTKCTL_EL1.EVENTI field to generate events at the correct
2308 * period (1us unless specified by the "wfe_events_sec" boot-arg). arm64_eventi
2309 * is used by wfe_timeout_init to actually poke the registers and enable the
2310 * event stream.
2311 *
2312 * The CNTKCTL_EL1.EVENTI field contains the index of the bit of CNTVCT_EL0 that
2313 * is the trigger for the system to generate an event. The trigger can occur on
2314 * either the rising or falling edge of the bit depending on the value of
2315 * CNTKCTL_EL1.EVNTDIR. This is arbitrary for our purposes, so we use the
2316 * falling edge (1->0) transition to generate events.
2317 */
2318 void
wfe_timeout_configure(void)2319 wfe_timeout_configure(void)
2320 {
2321 /* Could fill in our own ops here, if we needed them */
2322 uint64_t ticks_per_sec, ticks_per_event, events_per_sec = 0;
2323 uint32_t bit_index;
2324
2325 if (PE_parse_boot_argn("wfe_events_sec", &events_per_sec, sizeof(events_per_sec))) {
2326 if (events_per_sec <= 0) {
2327 events_per_sec = 1;
2328 } else if (events_per_sec > USEC_PER_SEC) {
2329 events_per_sec = USEC_PER_SEC;
2330 }
2331 } else {
2332 events_per_sec = USEC_PER_SEC;
2333 }
2334 ticks_per_sec = gPEClockFrequencyInfo.timebase_frequency_hz;
2335 ticks_per_event = ticks_per_sec / events_per_sec;
2336
2337 /* Bit index of next power of two greater than ticks_per_event */
2338 bit_index = flsll(ticks_per_event) - 1;
2339 /* Round up to next power of two if ticks_per_event is initially power of two */
2340 if ((ticks_per_event & ((1 << bit_index) - 1)) != 0) {
2341 bit_index++;
2342 }
2343
2344 /*
2345 * The timer can only trigger on rising or falling edge, not both; we don't
2346 * care which we trigger on, but we do need to adjust which bit we are
2347 * interested in to account for this.
2348 *
2349 * In particular, we set CNTKCTL_EL1.EVENTDIR to trigger events on the
2350 * falling edge of the given bit. Therefore, we must decrement the bit index
2351 * by one as when the bit before the one we care about makes a 1 -> 0
2352 * transition, the bit we care about makes a 0 -> 1 transition.
2353 *
2354 * For example if we want an event generated every 8 ticks (if we calculated
2355 * a bit_index of 3), we would want the event to be generated whenever the
2356 * lower four bits of the counter transition from 0b0111 -> 0b1000. We can
2357 * see that the bit at index 2 makes a falling transition in this scenario,
2358 * so we would want EVENTI to be 2 instead of 3.
2359 */
2360 if (bit_index != 0) {
2361 bit_index--;
2362 }
2363
2364 arm64_eventi = bit_index;
2365 }
2366
2367 boolean_t
ml_delay_should_spin(uint64_t interval)2368 ml_delay_should_spin(uint64_t interval)
2369 {
2370 cpu_data_t *cdp = getCpuDatap();
2371
2372 if (cdp->cpu_idle_latency) {
2373 return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE;
2374 } else {
2375 /*
2376 * Early boot, latency is unknown. Err on the side of blocking,
2377 * which should always be safe, even if slow
2378 */
2379 return FALSE;
2380 }
2381 }
2382
2383 boolean_t
ml_thread_is64bit(thread_t thread)2384 ml_thread_is64bit(thread_t thread)
2385 {
2386 return thread_is_64bit_addr(thread);
2387 }
2388
2389 void
ml_delay_on_yield(void)2390 ml_delay_on_yield(void)
2391 {
2392 #if DEVELOPMENT || DEBUG
2393 if (yield_delay_us) {
2394 delay(yield_delay_us);
2395 }
2396 #endif
2397 }
2398
2399 void
ml_timer_evaluate(void)2400 ml_timer_evaluate(void)
2401 {
2402 }
2403
2404 boolean_t
ml_timer_forced_evaluation(void)2405 ml_timer_forced_evaluation(void)
2406 {
2407 return FALSE;
2408 }
2409
2410 void
ml_gpu_stat_update(__unused uint64_t gpu_ns_delta)2411 ml_gpu_stat_update(__unused uint64_t gpu_ns_delta)
2412 {
2413 /*
2414 * For now: update the resource coalition stats of the
2415 * current thread's coalition
2416 */
2417 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta);
2418 }
2419
2420 uint64_t
ml_gpu_stat(__unused thread_t t)2421 ml_gpu_stat(__unused thread_t t)
2422 {
2423 return 0;
2424 }
2425
2426 thread_t
current_thread(void)2427 current_thread(void)
2428 {
2429 return current_thread_fast();
2430 }
2431
2432 typedef struct{
2433 ex_cb_t cb;
2434 void *refcon;
2435 }
2436 ex_cb_info_t;
2437
2438 ex_cb_info_t ex_cb_info[EXCB_CLASS_MAX];
2439
2440 /*
2441 * Callback registration
2442 * Currently we support only one registered callback per class but
2443 * it should be possible to support more callbacks
2444 */
2445 kern_return_t
ex_cb_register(ex_cb_class_t cb_class,ex_cb_t cb,void * refcon)2446 ex_cb_register(
2447 ex_cb_class_t cb_class,
2448 ex_cb_t cb,
2449 void *refcon)
2450 {
2451 ex_cb_info_t *pInfo = &ex_cb_info[cb_class];
2452
2453 if ((NULL == cb) || (cb_class >= EXCB_CLASS_MAX)) {
2454 return KERN_INVALID_VALUE;
2455 }
2456
2457 if (NULL == pInfo->cb) {
2458 pInfo->cb = cb;
2459 pInfo->refcon = refcon;
2460 return KERN_SUCCESS;
2461 }
2462 return KERN_FAILURE;
2463 }
2464
2465 /*
2466 * Called internally by platform kernel to invoke the registered callback for class
2467 */
2468 ex_cb_action_t
ex_cb_invoke(ex_cb_class_t cb_class,vm_offset_t far)2469 ex_cb_invoke(
2470 ex_cb_class_t cb_class,
2471 vm_offset_t far)
2472 {
2473 ex_cb_info_t *pInfo = &ex_cb_info[cb_class];
2474 ex_cb_state_t state = {far};
2475
2476 if (cb_class >= EXCB_CLASS_MAX) {
2477 panic("Invalid exception callback class 0x%x", cb_class);
2478 }
2479
2480 if (pInfo->cb) {
2481 return pInfo->cb(cb_class, pInfo->refcon, &state);
2482 }
2483 return EXCB_ACTION_NONE;
2484 }
2485
2486 #if defined(HAS_APPLE_PAC)
2487 void
ml_task_set_disable_user_jop(task_t task,uint8_t disable_user_jop)2488 ml_task_set_disable_user_jop(task_t task, uint8_t disable_user_jop)
2489 {
2490 assert(task);
2491 task->disable_user_jop = disable_user_jop;
2492 }
2493
2494 void
ml_thread_set_disable_user_jop(thread_t thread,uint8_t disable_user_jop)2495 ml_thread_set_disable_user_jop(thread_t thread, uint8_t disable_user_jop)
2496 {
2497 assert(thread);
2498 if (disable_user_jop) {
2499 thread->machine.arm_machine_flags |= ARM_MACHINE_THREAD_DISABLE_USER_JOP;
2500 } else {
2501 thread->machine.arm_machine_flags &= ~ARM_MACHINE_THREAD_DISABLE_USER_JOP;
2502 }
2503 }
2504
2505 void
ml_task_set_rop_pid(task_t task,task_t parent_task,boolean_t inherit)2506 ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit)
2507 {
2508 if (inherit) {
2509 task->rop_pid = parent_task->rop_pid;
2510 } else {
2511 task->rop_pid = early_random();
2512 }
2513 }
2514
2515 /**
2516 * jop_pid may be inherited from the parent task or generated inside the shared
2517 * region. Unfortunately these two parameters are available at very different
2518 * times during task creation, so we need to split this into two steps.
2519 */
2520 void
ml_task_set_jop_pid(task_t task,task_t parent_task,boolean_t inherit)2521 ml_task_set_jop_pid(task_t task, task_t parent_task, boolean_t inherit)
2522 {
2523 if (inherit) {
2524 task->jop_pid = parent_task->jop_pid;
2525 } else {
2526 task->jop_pid = ml_default_jop_pid();
2527 }
2528 }
2529
2530 void
ml_task_set_jop_pid_from_shared_region(task_t task)2531 ml_task_set_jop_pid_from_shared_region(task_t task)
2532 {
2533 vm_shared_region_t sr = vm_shared_region_get(task);
2534 /*
2535 * If there's no shared region, we can assign the key arbitrarily. This
2536 * typically happens when Mach-O image activation failed part of the way
2537 * through, and this task is in the middle of dying with SIGKILL anyway.
2538 */
2539 if (__improbable(!sr)) {
2540 task->jop_pid = early_random();
2541 return;
2542 }
2543 vm_shared_region_deallocate(sr);
2544
2545 /*
2546 * Similarly we have to worry about jetsam having killed the task and
2547 * already cleared the shared_region_id.
2548 */
2549 task_lock(task);
2550 if (task->shared_region_id != NULL) {
2551 task->jop_pid = shared_region_find_key(task->shared_region_id);
2552 } else {
2553 task->jop_pid = early_random();
2554 }
2555 task_unlock(task);
2556 }
2557
2558 void
ml_thread_set_jop_pid(thread_t thread,task_t task)2559 ml_thread_set_jop_pid(thread_t thread, task_t task)
2560 {
2561 thread->machine.jop_pid = task->jop_pid;
2562 }
2563 #endif /* defined(HAS_APPLE_PAC) */
2564
2565 #if DEVELOPMENT || DEBUG
2566 static uint64_t minor_badness_suffered = 0;
2567 #endif
2568 void
ml_report_minor_badness(uint32_t __unused badness_id)2569 ml_report_minor_badness(uint32_t __unused badness_id)
2570 {
2571 #if DEVELOPMENT || DEBUG
2572 (void)os_atomic_or(&minor_badness_suffered, 1ULL << badness_id, relaxed);
2573 #endif
2574 }
2575
2576 #if defined(HAS_APPLE_PAC)
2577 #if __ARM_ARCH_8_6__ || APPLEVIRTUALPLATFORM
2578 /**
2579 * The ARMv8.6 implementation is also safe for non-FPAC CPUs, but less efficient;
2580 * guest kernels need to use it because it does not know at compile time whether
2581 * the host CPU supports FPAC.
2582 */
2583
2584 /**
2585 * Emulates the poisoning done by ARMv8.3-PAuth instructions on auth failure.
2586 */
2587 static void *
ml_poison_ptr(void * ptr,ptrauth_key key)2588 ml_poison_ptr(void *ptr, ptrauth_key key)
2589 {
2590 bool b_key = key & (1ULL << 0);
2591 uint64_t error_code;
2592 if (b_key) {
2593 error_code = 2;
2594 } else {
2595 error_code = 1;
2596 }
2597
2598 bool kernel_pointer = (uintptr_t)ptr & (1ULL << 55);
2599 bool data_key = key & (1ULL << 1);
2600 /* When PAC is enabled, only userspace data pointers use TBI, regardless of boot parameters */
2601 bool tbi = data_key && !kernel_pointer;
2602 unsigned int poison_shift;
2603 if (tbi) {
2604 poison_shift = 53;
2605 } else {
2606 poison_shift = 61;
2607 }
2608
2609 uintptr_t poisoned = (uintptr_t)ptr;
2610 poisoned &= ~(3ULL << poison_shift);
2611 poisoned |= error_code << poison_shift;
2612 return (void *)poisoned;
2613 }
2614
2615 /*
2616 * ptrauth_sign_unauthenticated() reimplemented using asm volatile, forcing the
2617 * compiler to assume this operation has side-effects and cannot be reordered
2618 */
2619 #define ptrauth_sign_volatile(__value, __suffix, __data) \
2620 ({ \
2621 void *__ret = __value; \
2622 asm volatile ( \
2623 "pac" #__suffix " %[value], %[data]" \
2624 : [value] "+r"(__ret) \
2625 : [data] "r"(__data) \
2626 ); \
2627 __ret; \
2628 })
2629
2630 #define ml_auth_ptr_unchecked_for_key(_ptr, _suffix, _key, _modifier) \
2631 do { \
2632 void *stripped = ptrauth_strip(_ptr, _key); \
2633 void *reauthed = ptrauth_sign_volatile(stripped, _suffix, _modifier); \
2634 if (__probable(_ptr == reauthed)) { \
2635 _ptr = stripped; \
2636 } else { \
2637 _ptr = ml_poison_ptr(stripped, _key); \
2638 } \
2639 } while (0)
2640
2641 #define _ml_auth_ptr_unchecked(_ptr, _suffix, _modifier) \
2642 ml_auth_ptr_unchecked_for_key(_ptr, _suffix, ptrauth_key_as ## _suffix, _modifier)
2643 #else
2644 #define _ml_auth_ptr_unchecked(_ptr, _suffix, _modifier) \
2645 asm volatile ("aut" #_suffix " %[ptr], %[modifier]" : [ptr] "+r"(_ptr) : [modifier] "r"(_modifier));
2646 #endif /* __ARM_ARCH_8_6__ || APPLEVIRTUALPLATFORM */
2647
2648 /**
2649 * Authenticates a signed pointer without trapping on failure.
2650 *
2651 * @warning This function must be called with interrupts disabled.
2652 *
2653 * @warning Pointer authentication failure should normally be treated as a fatal
2654 * error. This function is intended for a handful of callers that cannot panic
2655 * on failure, and that understand the risks in handling a poisoned return
2656 * value. Other code should generally use the trapping variant
2657 * ptrauth_auth_data() instead.
2658 *
2659 * @param ptr the pointer to authenticate
2660 * @param key which key to use for authentication
2661 * @param modifier a modifier to mix into the key
2662 * @return an authenticated version of ptr, possibly with poison bits set
2663 */
2664 void *
ml_auth_ptr_unchecked(void * ptr,ptrauth_key key,uint64_t modifier)2665 ml_auth_ptr_unchecked(void *ptr, ptrauth_key key, uint64_t modifier)
2666 {
2667 switch (key & 0x3) {
2668 case ptrauth_key_asia:
2669 _ml_auth_ptr_unchecked(ptr, ia, modifier);
2670 break;
2671 case ptrauth_key_asib:
2672 _ml_auth_ptr_unchecked(ptr, ib, modifier);
2673 break;
2674 case ptrauth_key_asda:
2675 _ml_auth_ptr_unchecked(ptr, da, modifier);
2676 break;
2677 case ptrauth_key_asdb:
2678 _ml_auth_ptr_unchecked(ptr, db, modifier);
2679 break;
2680 }
2681
2682 return ptr;
2683 }
2684 #endif /* defined(HAS_APPLE_PAC) */
2685
2686 #ifdef CONFIG_XNUPOST
2687 void
ml_expect_fault_begin(expected_fault_handler_t expected_fault_handler,uintptr_t expected_fault_addr)2688 ml_expect_fault_begin(expected_fault_handler_t expected_fault_handler, uintptr_t expected_fault_addr)
2689 {
2690 thread_t thread = current_thread();
2691 thread->machine.expected_fault_handler = expected_fault_handler;
2692 thread->machine.expected_fault_addr = expected_fault_addr;
2693 }
2694
2695 void
ml_expect_fault_end(void)2696 ml_expect_fault_end(void)
2697 {
2698 thread_t thread = current_thread();
2699 thread->machine.expected_fault_handler = NULL;
2700 thread->machine.expected_fault_addr = 0;
2701 }
2702 #endif /* CONFIG_XNUPOST */
2703
2704 void
ml_hibernate_active_pre(void)2705 ml_hibernate_active_pre(void)
2706 {
2707 #if HIBERNATION
2708 if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) {
2709
2710 hibernate_rebuild_vm_structs();
2711 }
2712 #endif /* HIBERNATION */
2713 }
2714
2715 void
ml_hibernate_active_post(void)2716 ml_hibernate_active_post(void)
2717 {
2718 #if HIBERNATION
2719 if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) {
2720 hibernate_machine_init();
2721 hibernate_vm_lock_end();
2722 current_cpu_datap()->cpu_hibernate = 0;
2723 }
2724 #endif /* HIBERNATION */
2725 }
2726
2727 /**
2728 * Return back a machine-dependent array of address space regions that should be
2729 * reserved by the VM (pre-mapped in the address space). This will prevent user
2730 * processes from allocating or deallocating from within these regions.
2731 *
2732 * @param vm_is64bit True if the process has a 64-bit address space.
2733 * @param regions An out parameter representing an array of regions to reserve.
2734 *
2735 * @return The number of reserved regions returned through `regions`.
2736 */
2737 size_t
ml_get_vm_reserved_regions(bool vm_is64bit,const struct vm_reserved_region ** regions)2738 ml_get_vm_reserved_regions(bool vm_is64bit, const struct vm_reserved_region **regions)
2739 {
2740 assert(regions != NULL);
2741
2742 /**
2743 * Reserved regions only apply to 64-bit address spaces. This is because
2744 * we only expect to grow the maximum user VA address on 64-bit address spaces
2745 * (we've essentially already reached the max for 32-bit spaces). The reserved
2746 * regions should safely fall outside of the max user VA for 32-bit processes.
2747 */
2748 if (vm_is64bit) {
2749 *regions = vm_reserved_regions;
2750 return ARRAY_COUNT(vm_reserved_regions);
2751 } else {
2752 /* Don't reserve any VA regions on arm64_32 processes. */
2753 *regions = NULL;
2754 return 0;
2755 }
2756 }
2757
2758 /* These WFE recommendations are expected to be updated on a relatively
2759 * infrequent cadence, possibly from a different cluster, hence
2760 * false cacheline sharing isn't expected to be material
2761 */
2762 static uint64_t arm64_cluster_wfe_recs[MAX_CPU_CLUSTERS];
2763
2764 uint32_t
ml_update_cluster_wfe_recommendation(uint32_t wfe_cluster_id,uint64_t wfe_timeout_abstime_interval,__unused uint64_t wfe_hint_flags)2765 ml_update_cluster_wfe_recommendation(uint32_t wfe_cluster_id, uint64_t wfe_timeout_abstime_interval, __unused uint64_t wfe_hint_flags)
2766 {
2767 assert(wfe_cluster_id < MAX_CPU_CLUSTERS);
2768 assert(wfe_timeout_abstime_interval <= ml_wfe_hint_max_interval);
2769 os_atomic_store(&arm64_cluster_wfe_recs[wfe_cluster_id], wfe_timeout_abstime_interval, relaxed);
2770 return 0; /* Success */
2771 }
2772
2773 #if DEVELOPMENT || DEBUG
2774 int wfe_rec_max = 0;
2775 int wfe_rec_none = 0;
2776 uint64_t wfe_rec_override_mat = 0;
2777 uint64_t wfe_rec_clamp = 0;
2778 #endif
2779
2780 uint64_t
ml_cluster_wfe_timeout(uint32_t wfe_cluster_id)2781 ml_cluster_wfe_timeout(uint32_t wfe_cluster_id)
2782 {
2783 /* This and its consumer does not synchronize vis-a-vis updates
2784 * of the recommendation; races are acceptable.
2785 */
2786 uint64_t wfet = os_atomic_load(&arm64_cluster_wfe_recs[wfe_cluster_id], relaxed);
2787 #if DEVELOPMENT || DEBUG
2788 if (wfe_rec_clamp) {
2789 wfet = MIN(wfe_rec_clamp, wfet);
2790 }
2791
2792 if (wfe_rec_max) {
2793 for (int i = 0; i < MAX_CPU_CLUSTERS; i++) {
2794 if (arm64_cluster_wfe_recs[i] > wfet) {
2795 wfet = arm64_cluster_wfe_recs[i];
2796 }
2797 }
2798 }
2799
2800 if (wfe_rec_none) {
2801 wfet = 0;
2802 }
2803
2804 if (wfe_rec_override_mat) {
2805 wfet = wfe_rec_override_mat;
2806 }
2807 #endif
2808 return wfet;
2809 }
2810
2811 __pure2 bool
ml_addr_in_non_xnu_stack(__unused uintptr_t addr)2812 ml_addr_in_non_xnu_stack(__unused uintptr_t addr)
2813 {
2814 #if XNU_MONITOR
2815 return (addr >= (uintptr_t)pmap_stacks_start) && (addr < (uintptr_t)pmap_stacks_end);
2816 #else
2817 return false;
2818 #endif /* XNU_MONITOR */
2819 }
2820
2821 uint64_t
ml_get_backtrace_pc(struct arm_saved_state * state)2822 ml_get_backtrace_pc(struct arm_saved_state *state)
2823 {
2824 assert((state != NULL) && is_saved_state64(state));
2825
2826
2827 return get_saved_state_pc(state);
2828 }
2829
2830