xref: /xnu-8020.140.41/iokit/Kernel/arm/AppleARMSMP.cpp (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2019 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 extern "C" {
30 #include <kern/debug.h>
31 #include <pexpert/pexpert.h>
32 };
33 
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kperf/kperf.h>
37 #include <machine/machine_routines.h>
38 #include <libkern/OSAtomic.h>
39 #include <libkern/c++/OSCollection.h>
40 #include <IOKit/IODeviceTreeSupport.h>
41 #include <IOKit/IOLib.h>
42 #include <IOKit/IOPlatformActions.h>
43 #include <IOKit/IOPMGR.h>
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOService.h>
46 #include <IOKit/PassthruInterruptController.h>
47 #include <IOKit/pwr_mgt/RootDomain.h>
48 #include <IOKit/pwr_mgt/IOPMPrivate.h>
49 #include <Kernel/IOKitKernelInternal.h>
50 
51 #if USE_APPLEARMSMP
52 
53 // FIXME: These are in <kern/misc_protos.h> but that file has other deps that aren't being resolved
54 extern "C" void console_suspend();
55 extern "C" void console_resume();
56 
57 static PassthruInterruptController *gCPUIC;
58 static IOPMGR *gPMGR;
59 static IOInterruptController *gAIC;
60 static bool aic_ipis = false;
61 static const ml_topology_info *topology_info;
62 
63 // cpu_id of the boot processor
64 static unsigned int boot_cpu;
65 
66 // array index is a cpu_id (so some elements may be NULL)
67 static processor_t *machProcessors;
68 
69 static void
processor_idle_wrapper(cpu_id_t,boolean_t enter,uint64_t * new_timeout_ticks)70 processor_idle_wrapper(cpu_id_t /*cpu_id*/, boolean_t enter, uint64_t *new_timeout_ticks)
71 {
72 	if (enter) {
73 		gPMGR->enterCPUIdle(new_timeout_ticks);
74 	} else {
75 		gPMGR->exitCPUIdle(new_timeout_ticks);
76 	}
77 }
78 
79 static void
idle_timer_wrapper(void *,uint64_t * new_timeout_ticks)80 idle_timer_wrapper(void */*refCon*/, uint64_t *new_timeout_ticks)
81 {
82 	gPMGR->updateCPUIdle(new_timeout_ticks);
83 }
84 
85 static OSDictionary *
matching_dict_for_cpu_id(unsigned int cpu_id)86 matching_dict_for_cpu_id(unsigned int cpu_id)
87 {
88 	// The cpu-id property in EDT doesn't necessarily match the dynamically
89 	// assigned logical ID in XNU, so look up the cpu node by the physical
90 	// (cluster/core) ID instead.
91 	OSSymbolConstPtr cpuTypeSymbol = OSSymbol::withCString("cpu");
92 	OSSymbolConstPtr cpuIdSymbol = OSSymbol::withCString("reg");
93 	OSDataPtr cpuId = OSData::withValue(topology_info->cpus[cpu_id].phys_id);
94 
95 	OSDictionary *propMatch = OSDictionary::withCapacity(4);
96 	propMatch->setObject(gIODTTypeKey, cpuTypeSymbol);
97 	propMatch->setObject(cpuIdSymbol, cpuId);
98 
99 	OSDictionary *matching = IOService::serviceMatching("IOPlatformDevice");
100 	matching->setObject(gIOPropertyMatchKey, propMatch);
101 
102 	propMatch->release();
103 	cpuTypeSymbol->release();
104 	cpuIdSymbol->release();
105 	cpuId->release();
106 
107 	return matching;
108 }
109 
110 static void
register_aic_handlers(const ml_topology_cpu * cpu_info,ipi_handler_t ipi_handler,perfmon_interrupt_handler_func pmi_handler)111 register_aic_handlers(const ml_topology_cpu *cpu_info,
112     ipi_handler_t ipi_handler,
113     perfmon_interrupt_handler_func pmi_handler)
114 {
115 	OSDictionary *matching = matching_dict_for_cpu_id(cpu_info->cpu_id);
116 	IOService *cpu = IOService::waitForMatchingService(matching, UINT64_MAX);
117 	matching->release();
118 
119 	OSArray *irqs = (OSArray *) cpu->getProperty(gIOInterruptSpecifiersKey);
120 	if (!irqs) {
121 		panic("Error finding interrupts for CPU %d", cpu_info->cpu_id);
122 	}
123 
124 	unsigned int irqcount = irqs->getCount();
125 
126 	if (irqcount == 3) {
127 		// Legacy configuration, for !HAS_IPI chips (pre-Skye).
128 		if (cpu->registerInterrupt(0, NULL, (IOInterruptAction)ipi_handler, NULL) != kIOReturnSuccess ||
129 		    cpu->enableInterrupt(0) != kIOReturnSuccess ||
130 		    cpu->registerInterrupt(2, NULL, (IOInterruptAction)ipi_handler, NULL) != kIOReturnSuccess ||
131 		    cpu->enableInterrupt(2) != kIOReturnSuccess) {
132 			panic("Error registering IPIs");
133 		}
134 #if !defined(HAS_IPI)
135 		// Ideally this should be decided by EDT, but first we need to update EDT
136 		// to default to fast IPIs on modern platforms.
137 		aic_ipis = true;
138 #endif
139 	}
140 
141 	// Conditional, because on Skye and later, we use an FIQ instead of an external IRQ.
142 	if (pmi_handler && irqcount == 1) {
143 		if (cpu->registerInterrupt(1, NULL, (IOInterruptAction)pmi_handler, NULL) != kIOReturnSuccess ||
144 		    cpu->enableInterrupt(1) != kIOReturnSuccess) {
145 			panic("Error registering PMI");
146 		}
147 	}
148 }
149 
150 static void
cpu_boot_thread(void *,wait_result_t)151 cpu_boot_thread(void */*unused0*/, wait_result_t /*unused1*/)
152 {
153 	OSDictionary *matching = IOService::serviceMatching("IOPlatformExpert");
154 	IOService::waitForMatchingService(matching, UINT64_MAX);
155 	matching->release();
156 
157 	gCPUIC = new PassthruInterruptController;
158 	if (!gCPUIC || !gCPUIC->init()) {
159 		panic("Can't initialize PassthruInterruptController");
160 	}
161 	gAIC = static_cast<IOInterruptController *>(gCPUIC->waitForChildController());
162 
163 	ml_set_max_cpus(topology_info->max_cpu_id + 1);
164 
165 	matching = IOService::serviceMatching("IOPMGR");
166 	gPMGR = OSDynamicCast(IOPMGR,
167 	    IOService::waitForMatchingService(matching, UINT64_MAX));
168 	matching->release();
169 
170 	const size_t array_size = (topology_info->max_cpu_id + 1) * sizeof(*machProcessors);
171 	machProcessors = static_cast<processor_t *>(zalloc_permanent(array_size, ZALIGN_PTR));
172 
173 	for (unsigned int cpu = 0; cpu < topology_info->num_cpus; cpu++) {
174 		const ml_topology_cpu *cpu_info = &topology_info->cpus[cpu];
175 		const unsigned int cpu_id = cpu_info->cpu_id;
176 		ml_processor_info_t this_processor_info;
177 		ipi_handler_t ipi_handler;
178 		perfmon_interrupt_handler_func pmi_handler;
179 
180 		memset(&this_processor_info, 0, sizeof(this_processor_info));
181 		this_processor_info.cpu_id = reinterpret_cast<cpu_id_t>(cpu_id);
182 		this_processor_info.phys_id = cpu_info->phys_id;
183 		this_processor_info.log_id = cpu_id;
184 		this_processor_info.cluster_id = cpu_info->cluster_id;
185 		this_processor_info.cluster_type = cpu_info->cluster_type;
186 		this_processor_info.l2_cache_size = cpu_info->l2_cache_size;
187 		this_processor_info.l2_cache_id = cpu_info->l2_cache_id;
188 		this_processor_info.l3_cache_size = cpu_info->l3_cache_size;
189 		this_processor_info.l3_cache_id = cpu_info->l3_cache_id;
190 
191 		gPMGR->initCPUIdle(&this_processor_info);
192 		this_processor_info.processor_idle = &processor_idle_wrapper;
193 		this_processor_info.idle_timer = &idle_timer_wrapper;
194 
195 		kern_return_t result = ml_processor_register(&this_processor_info,
196 		    &machProcessors[cpu_id], &ipi_handler, &pmi_handler);
197 		if (result == KERN_FAILURE) {
198 			panic("ml_processor_register failed: %d", result);
199 		}
200 		register_aic_handlers(cpu_info, ipi_handler, pmi_handler);
201 
202 		if (processor_start(machProcessors[cpu_id]) != KERN_SUCCESS) {
203 			panic("processor_start failed");
204 		}
205 	}
206 	ml_cpu_init_completed();
207 	IOService::publishResource(gIOAllCPUInitializedKey, kOSBooleanTrue);
208 }
209 
210 void
IOCPUInitialize(void)211 IOCPUInitialize(void)
212 {
213 	topology_info = ml_get_topology_info();
214 	boot_cpu = topology_info->boot_cpu->cpu_id;
215 
216 	thread_t thread;
217 	kernel_thread_start(&cpu_boot_thread, NULL, &thread);
218 	thread_set_thread_name(thread, "cpu_boot_thread");
219 	thread_deallocate(thread);
220 }
221 
222 static unsigned int
target_to_cpu_id(cpu_id_t in)223 target_to_cpu_id(cpu_id_t in)
224 {
225 	return (unsigned int)(uintptr_t)in;
226 }
227 
228 // Release a secondary CPU from reset.  Runs from a different CPU (obviously).
229 kern_return_t
PE_cpu_start(cpu_id_t target,vm_offset_t,vm_offset_t)230 PE_cpu_start(cpu_id_t target,
231     vm_offset_t /*start_paddr*/, vm_offset_t /*arg_paddr*/)
232 {
233 	unsigned int cpu_id = target_to_cpu_id(target);
234 
235 	if (cpu_id != boot_cpu) {
236 		extern unsigned int LowResetVectorBase;
237 		gPMGR->enableCPUCore(cpu_id, ml_vtophys((vm_offset_t)&LowResetVectorBase));
238 	}
239 	return KERN_SUCCESS;
240 }
241 
242 // Initialize a CPU when it first comes up.  Runs on the target CPU.
243 // |bootb| is true on the initial boot, false on S2R resume.
244 void
PE_cpu_machine_init(cpu_id_t target,boolean_t bootb)245 PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
246 {
247 	unsigned int cpu_id = target_to_cpu_id(target);
248 
249 	if (!bootb && cpu_id == boot_cpu && ml_is_quiescing()) {
250 		IOCPURunPlatformActiveActions();
251 	}
252 
253 	ml_broadcast_cpu_event(CPU_BOOTED, cpu_id);
254 
255 	// Send myself an IPI to clear SIGPdisabled.  Hang here if IPIs are broken.
256 	// (Probably only works on the boot CPU.)
257 	PE_cpu_signal(target, target);
258 	while (ml_get_interrupts_enabled() && !ml_cpu_signal_is_enabled()) {
259 		OSMemoryBarrier();
260 	}
261 }
262 
263 void
PE_cpu_halt(cpu_id_t target)264 PE_cpu_halt(cpu_id_t target)
265 {
266 	unsigned int cpu_id = target_to_cpu_id(target);
267 	processor_exit(machProcessors[cpu_id]);
268 }
269 
270 void
PE_cpu_signal(cpu_id_t,cpu_id_t target)271 PE_cpu_signal(cpu_id_t /*source*/, cpu_id_t target)
272 {
273 	struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
274 	if (aic_ipis) {
275 		gAIC->sendIPI(cpu->cpu_id, false);
276 	} else {
277 		ml_cpu_signal(cpu->phys_id);
278 	}
279 }
280 
281 void
PE_cpu_signal_deferred(cpu_id_t,cpu_id_t target)282 PE_cpu_signal_deferred(cpu_id_t /*source*/, cpu_id_t target)
283 {
284 	struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
285 	if (aic_ipis) {
286 		gAIC->sendIPI(cpu->cpu_id, true);
287 	} else {
288 		ml_cpu_signal_deferred(cpu->phys_id);
289 	}
290 }
291 
292 void
PE_cpu_signal_cancel(cpu_id_t,cpu_id_t target)293 PE_cpu_signal_cancel(cpu_id_t /*source*/, cpu_id_t target)
294 {
295 	struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
296 	if (aic_ipis) {
297 		gAIC->cancelDeferredIPI(cpu->cpu_id);
298 	} else {
299 		ml_cpu_signal_retract(cpu->phys_id);
300 	}
301 }
302 
303 // Brings down one CPU core for S2R.  Runs on the target CPU.
304 void
PE_cpu_machine_quiesce(cpu_id_t target)305 PE_cpu_machine_quiesce(cpu_id_t target)
306 {
307 	unsigned int cpu_id = target_to_cpu_id(target);
308 
309 	if (cpu_id == boot_cpu) {
310 		IOCPURunPlatformQuiesceActions();
311 	} else {
312 		gPMGR->disableCPUCore(cpu_id);
313 	}
314 
315 	ml_broadcast_cpu_event(CPU_DOWN, cpu_id);
316 	ml_arm_sleep();
317 }
318 
319 // Takes one secondary CPU core offline at runtime.  Runs on the target CPU.
320 // Returns true if the platform code should go into deep sleep WFI, false otherwise.
321 bool
PE_cpu_down(cpu_id_t target)322 PE_cpu_down(cpu_id_t target)
323 {
324 	unsigned int cpu_id = target_to_cpu_id(target);
325 	assert(cpu_id != boot_cpu);
326 	gPMGR->disableCPUCore(cpu_id);
327 	return false;
328 }
329 
330 void
PE_handle_ext_interrupt(void)331 PE_handle_ext_interrupt(void)
332 {
333 	gCPUIC->externalInterrupt();
334 }
335 
336 void
IOCPUSleepKernel(void)337 IOCPUSleepKernel(void)
338 {
339 	IOPMrootDomain  *rootDomain = IOService::getPMRootDomain();
340 	unsigned int i;
341 
342 	printf("IOCPUSleepKernel enter\n");
343 #if defined(__arm64__)
344 	sched_override_recommended_cores_for_sleep();
345 #endif
346 
347 	rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
348 	IOPlatformActionsPreSleep();
349 	rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
350 
351 	integer_t old_pri;
352 	thread_t self = current_thread();
353 
354 	/*
355 	 * We need to boost this thread's priority to the maximum kernel priority to
356 	 * ensure we can urgently preempt ANY thread currently executing on the
357 	 * target CPU.  Note that realtime threads have their own mechanism to eventually
358 	 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
359 	 */
360 	old_pri = thread_kern_get_pri(self);
361 	thread_kern_set_pri(self, thread_kern_get_kernel_maxpri());
362 
363 	// Sleep the non-boot CPUs.
364 	ml_set_is_quiescing(true);
365 	for (i = 0; i < topology_info->num_cpus; i++) {
366 		unsigned int cpu_id = topology_info->cpus[i].cpu_id;
367 		if (cpu_id != boot_cpu) {
368 			processor_exit(machProcessors[cpu_id]);
369 		}
370 	}
371 
372 	console_suspend();
373 
374 	rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
375 	rootDomain->stop_watchdog_timer();
376 
377 	/*
378 	 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
379 	 * The system sleeps here.
380 	 */
381 	processor_exit(machProcessors[boot_cpu]);
382 
383 	/*
384 	 * The system is now coming back from sleep on the boot CPU.
385 	 * The kQueueActive actions have already been called.
386 	 */
387 
388 	ml_set_is_quiescing(false);
389 
390 #if defined(XNU_TARGET_OS_OSX)
391 	rootDomain->start_watchdog_timer();
392 
393 	console_resume();
394 
395 	rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
396 
397 	for (i = 0; i < topology_info->num_cpus; i++) {
398 		unsigned int cpu_id = topology_info->cpus[i].cpu_id;
399 		if (cpu_id != boot_cpu) {
400 			processor_start(machProcessors[cpu_id]);
401 		}
402 	}
403 
404 #if defined(__arm64__)
405 	sched_restore_recommended_cores_after_sleep();
406 #endif
407 
408 	rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
409 	IOPlatformActionsPostResume();
410 
411 #else /* defined(!XNU_TARGET_OS_OSX) */
412 	// Keep the old ordering around for iOS temporarily - rdar://88891040
413 
414 	rootDomain->start_watchdog_timer();
415 	rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
416 
417 	console_resume();
418 
419 	IOPlatformActionsPostResume();
420 	rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
421 
422 	for (i = 0; i < topology_info->num_cpus; i++) {
423 		unsigned int cpu_id = topology_info->cpus[i].cpu_id;
424 		if (cpu_id != boot_cpu) {
425 			processor_start(machProcessors[cpu_id]);
426 		}
427 	}
428 
429 #if defined(__arm64__)
430 	sched_restore_recommended_cores_after_sleep();
431 #endif
432 
433 #endif /* defined(XNU_TARGET_OS_OSX) */
434 
435 	thread_kern_set_pri(self, old_pri);
436 	printf("IOCPUSleepKernel exit\n");
437 }
438 
439 #endif /* USE_APPLEARMSMP */
440