xref: /xnu-8020.101.4/iokit/Kernel/IOCPU.cpp (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 1999-2016 Apple Inc.  All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define IOKIT_ENABLE_SHARED_PTR
30 
31 extern "C" {
32 #include <pexpert/pexpert.h>
33 #include <kern/cpu_number.h>
34 extern void kperf_kernel_configure(char *);
35 }
36 
37 #include <machine/machine_routines.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <IOKit/pwr_mgt/RootDomain.h>
41 #include <IOKit/pwr_mgt/IOPMPrivate.h>
42 #include <libkern/c++/OSSharedPtr.h>
43 #include <IOKit/IOUserClient.h>
44 #include <IOKit/IOKitKeysPrivate.h>
45 #include <IOKit/IOCPU.h>
46 #include "IOKitKernelInternal.h"
47 
48 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
49 
50 #include <kern/queue.h>
51 #include <kern/sched_prim.h>
52 
53 extern "C" void console_suspend();
54 extern "C" void console_resume();
55 extern "C" void sched_override_recommended_cores_for_sleep(void);
56 extern "C" void sched_restore_recommended_cores_after_sleep(void);
57 
58 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
59 
60 static IOLock *gIOCPUsLock;
61 static OSSharedPtr<OSArray> gIOCPUs;
62 static OSSharedPtr<const OSSymbol> gIOCPUStateKey;
63 static OSSharedPtr<OSString> gIOCPUStateNames[kIOCPUStateCount];
64 
65 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
66 
67 #if !USE_APPLEARMSMP
68 
69 void
IOCPUInitialize(void)70 IOCPUInitialize(void)
71 {
72 	gIOCPUsLock = IOLockAlloc();
73 	gIOCPUs     = OSArray::withCapacity(1);
74 
75 	gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
76 
77 	gIOCPUStateNames[kIOCPUStateUnregistered] =
78 	    OSString::withCStringNoCopy("Unregistered");
79 	gIOCPUStateNames[kIOCPUStateUninitalized] =
80 	    OSString::withCStringNoCopy("Uninitalized");
81 	gIOCPUStateNames[kIOCPUStateStopped] =
82 	    OSString::withCStringNoCopy("Stopped");
83 	gIOCPUStateNames[kIOCPUStateRunning] =
84 	    OSString::withCStringNoCopy("Running");
85 }
86 
87 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
88 
89 kern_return_t
PE_cpu_start(cpu_id_t target,vm_offset_t start_paddr,vm_offset_t arg_paddr)90 PE_cpu_start(cpu_id_t target,
91     vm_offset_t start_paddr, vm_offset_t arg_paddr)
92 {
93 	IOCPU *targetCPU = (IOCPU *)target;
94 
95 	if (targetCPU == NULL) {
96 		return KERN_FAILURE;
97 	}
98 	return targetCPU->startCPU(start_paddr, arg_paddr);
99 }
100 
101 void
PE_cpu_halt(cpu_id_t target)102 PE_cpu_halt(cpu_id_t target)
103 {
104 	IOCPU *targetCPU = (IOCPU *)target;
105 
106 	targetCPU->haltCPU();
107 }
108 
109 void
PE_cpu_signal(cpu_id_t source,cpu_id_t target)110 PE_cpu_signal(cpu_id_t source, cpu_id_t target)
111 {
112 	IOCPU *sourceCPU = (IOCPU *)source;
113 	IOCPU *targetCPU = (IOCPU *)target;
114 
115 	sourceCPU->signalCPU(targetCPU);
116 }
117 
118 void
PE_cpu_signal_deferred(cpu_id_t source,cpu_id_t target)119 PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
120 {
121 	IOCPU *sourceCPU = (IOCPU *)source;
122 	IOCPU *targetCPU = (IOCPU *)target;
123 
124 	sourceCPU->signalCPUDeferred(targetCPU);
125 }
126 
127 void
PE_cpu_signal_cancel(cpu_id_t source,cpu_id_t target)128 PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
129 {
130 	IOCPU *sourceCPU = (IOCPU *)source;
131 	IOCPU *targetCPU = (IOCPU *)target;
132 
133 	sourceCPU->signalCPUCancel(targetCPU);
134 }
135 
136 void
PE_cpu_machine_init(cpu_id_t target,boolean_t bootb)137 PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
138 {
139 	IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
140 
141 	if (targetCPU == NULL) {
142 		panic("%s: invalid target CPU %p", __func__, target);
143 	}
144 
145 	targetCPU->initCPU(bootb);
146 #if defined(__arm__) || defined(__arm64__)
147 	if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) {
148 		ml_set_is_quiescing(false);
149 	}
150 #endif /* defined(__arm__) || defined(__arm64__) */
151 }
152 
153 void
PE_cpu_machine_quiesce(cpu_id_t target)154 PE_cpu_machine_quiesce(cpu_id_t target)
155 {
156 	IOCPU *targetCPU = (IOCPU*)target;
157 #if defined(__arm__) || defined(__arm64__)
158 	if (targetCPU->getCPUNumber() == (UInt32)master_cpu) {
159 		ml_set_is_quiescing(true);
160 	}
161 #endif /* defined(__arm__) || defined(__arm64__) */
162 	targetCPU->quiesceCPU();
163 }
164 
165 #if defined(__arm__) || defined(__arm64__)
166 static perfmon_interrupt_handler_func pmi_handler = NULL;
167 
168 kern_return_t
PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)169 PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
170 {
171 	pmi_handler = handler;
172 
173 	return KERN_SUCCESS;
174 }
175 
176 void
PE_cpu_perfmon_interrupt_enable(cpu_id_t target,boolean_t enable)177 PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
178 {
179 	IOCPU *targetCPU = (IOCPU*)target;
180 
181 	if (targetCPU == nullptr) {
182 		return;
183 	}
184 
185 	if (enable) {
186 		targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, NULL);
187 		targetCPU->getProvider()->enableInterrupt(1);
188 	} else {
189 		targetCPU->getProvider()->disableInterrupt(1);
190 	}
191 }
192 #endif
193 
194 #endif /* !USE_APPLEARMSMP */
195 
196 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
197 
198 #define super IOService
199 
200 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
201 OSMetaClassDefineReservedUnused(IOCPU, 0);
202 OSMetaClassDefineReservedUnused(IOCPU, 1);
203 OSMetaClassDefineReservedUnused(IOCPU, 2);
204 OSMetaClassDefineReservedUnused(IOCPU, 3);
205 OSMetaClassDefineReservedUnused(IOCPU, 4);
206 OSMetaClassDefineReservedUnused(IOCPU, 5);
207 OSMetaClassDefineReservedUnused(IOCPU, 6);
208 OSMetaClassDefineReservedUnused(IOCPU, 7);
209 
210 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
211 
212 #if !USE_APPLEARMSMP
213 void
IOCPUSleepKernel(void)214 IOCPUSleepKernel(void)
215 {
216 #if defined(__x86_64__)
217 	extern IOCPU *currentShutdownTarget;
218 #endif
219 	unsigned int cnt, numCPUs;
220 	IOCPU *target;
221 	IOCPU *bootCPU = NULL;
222 	IOPMrootDomain  *rootDomain = IOService::getPMRootDomain();
223 
224 	printf("IOCPUSleepKernel enter\n");
225 #if defined(__arm64__)
226 	sched_override_recommended_cores_for_sleep();
227 #endif
228 
229 	rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
230 	IOPlatformActionsPreSleep();
231 	rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
232 
233 	numCPUs = gIOCPUs->getCount();
234 #if defined(__x86_64__)
235 	currentShutdownTarget = NULL;
236 #endif
237 
238 	integer_t old_pri;
239 	thread_t self = current_thread();
240 
241 	/*
242 	 * We need to boost this thread's priority to the maximum kernel priority to
243 	 * ensure we can urgently preempt ANY thread currently executing on the
244 	 * target CPU.  Note that realtime threads have their own mechanism to eventually
245 	 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
246 	 */
247 	old_pri = thread_kern_get_pri(self);
248 	thread_kern_set_pri(self, thread_kern_get_kernel_maxpri());
249 
250 	// Sleep the CPUs.
251 	ml_set_is_quiescing(true);
252 	cnt = numCPUs;
253 	while (cnt--) {
254 		target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
255 
256 		// We make certain that the bootCPU is the last to sleep
257 		// We'll skip it for now, and halt it after finishing the
258 		// non-boot CPU's.
259 		if (target->getCPUNumber() == (UInt32)master_cpu) {
260 			bootCPU = target;
261 		} else if (target->getCPUState() == kIOCPUStateRunning) {
262 #if defined(__x86_64__)
263 			currentShutdownTarget = target;
264 #endif
265 			target->haltCPU();
266 		}
267 	}
268 
269 	assert(bootCPU != NULL);
270 	assert(cpu_number() == master_cpu);
271 
272 	console_suspend();
273 
274 	rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
275 	rootDomain->stop_watchdog_timer();
276 
277 	/*
278 	 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
279 	 * The system sleeps here.
280 	 */
281 
282 	bootCPU->haltCPU();
283 	ml_set_is_quiescing(false);
284 
285 	/*
286 	 * The system is now coming back from sleep on the boot CPU.
287 	 * The kQueueActive actions have already been called.
288 	 */
289 
290 	rootDomain->start_watchdog_timer();
291 
292 	console_resume();
293 
294 	rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
295 
296 	// Wake the other CPUs.
297 	for (cnt = 0; cnt < numCPUs; cnt++) {
298 		target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
299 
300 		// Skip the already-woken boot CPU.
301 		if (target->getCPUNumber() != (UInt32)master_cpu) {
302 			if (target->getCPUState() == kIOCPUStateRunning) {
303 				panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
304 			}
305 
306 			if (target->getCPUState() == kIOCPUStateStopped) {
307 				processor_start(target->getMachProcessor());
308 			}
309 		}
310 	}
311 
312 #if defined(__arm64__)
313 	sched_restore_recommended_cores_after_sleep();
314 #endif
315 
316 	rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
317 	IOPlatformActionsPostResume();
318 
319 	thread_kern_set_pri(self, old_pri);
320 	printf("IOCPUSleepKernel exit\n");
321 }
322 
323 static bool
is_IOCPU_disabled(void)324 is_IOCPU_disabled(void)
325 {
326 	return false;
327 }
328 #else /* !USE_APPLEARMSMP */
329 static bool
is_IOCPU_disabled(void)330 is_IOCPU_disabled(void)
331 {
332 	return true;
333 }
334 #endif /* !USE_APPLEARMSMP */
335 
336 bool
start(IOService * provider)337 IOCPU::start(IOService *provider)
338 {
339 	if (is_IOCPU_disabled()) {
340 		return false;
341 	}
342 
343 	if (!super::start(provider)) {
344 		return false;
345 	}
346 
347 	_cpuGroup = gIOCPUs;
348 	cpuNub = provider;
349 
350 	IOLockLock(gIOCPUsLock);
351 	gIOCPUs->setObject(this);
352 	IOLockUnlock(gIOCPUsLock);
353 
354 	// Correct the bus, cpu and timebase frequencies in the device tree.
355 	if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
356 		OSSharedPtr<OSData> busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
357 		provider->setProperty("bus-frequency", busFrequency.get());
358 	} else {
359 		OSSharedPtr<OSData> busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
360 		provider->setProperty("bus-frequency", busFrequency.get());
361 	}
362 
363 	if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
364 		OSSharedPtr<OSData> cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
365 		provider->setProperty("clock-frequency", cpuFrequency.get());
366 	} else {
367 		OSSharedPtr<OSData> cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
368 		provider->setProperty("clock-frequency", cpuFrequency.get());
369 	}
370 
371 	OSSharedPtr<OSData> timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
372 	provider->setProperty("timebase-frequency", timebaseFrequency.get());
373 
374 	super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8);
375 
376 	setCPUNumber(0);
377 	setCPUState(kIOCPUStateUnregistered);
378 
379 	return true;
380 }
381 
382 void
detach(IOService * provider)383 IOCPU::detach(IOService *provider)
384 {
385 	if (is_IOCPU_disabled()) {
386 		return;
387 	}
388 
389 	super::detach(provider);
390 	IOLockLock(gIOCPUsLock);
391 	unsigned int index = gIOCPUs->getNextIndexOfObject(this, 0);
392 	if (index != (unsigned int)-1) {
393 		gIOCPUs->removeObject(index);
394 	}
395 	IOLockUnlock(gIOCPUsLock);
396 }
397 
398 OSObject *
getProperty(const OSSymbol * aKey) const399 IOCPU::getProperty(const OSSymbol *aKey) const
400 {
401 	if (aKey == gIOCPUStateKey) {
402 		return gIOCPUStateNames[_cpuState].get();
403 	}
404 #pragma clang diagnostic push
405 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
406 	return super::getProperty(aKey);
407 #pragma clang diagnostic pop
408 }
409 
410 bool
setProperty(const OSSymbol * aKey,OSObject * anObject)411 IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
412 {
413 	if (aKey == gIOCPUStateKey) {
414 		return false;
415 	}
416 
417 	return super::setProperty(aKey, anObject);
418 }
419 
420 bool
serializeProperties(OSSerialize * serialize) const421 IOCPU::serializeProperties(OSSerialize *serialize) const
422 {
423 	bool result;
424 	OSSharedPtr<OSDictionary> dict = dictionaryWithProperties();
425 	if (!dict) {
426 		return false;
427 	}
428 	dict->setObject(gIOCPUStateKey.get(), gIOCPUStateNames[_cpuState].get());
429 	result = dict->serialize(serialize);
430 	return result;
431 }
432 
433 IOReturn
setProperties(OSObject * properties)434 IOCPU::setProperties(OSObject *properties)
435 {
436 	OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
437 	OSString     *stateStr;
438 	IOReturn     result;
439 
440 	if (dict == NULL) {
441 		return kIOReturnUnsupported;
442 	}
443 
444 	stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey.get()));
445 	if (stateStr != NULL) {
446 		result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
447 		if (result != kIOReturnSuccess) {
448 			return result;
449 		}
450 
451 		if (setProperty(gIOCPUStateKey.get(), stateStr)) {
452 			return kIOReturnSuccess;
453 		}
454 
455 		return kIOReturnUnsupported;
456 	}
457 
458 	return kIOReturnUnsupported;
459 }
460 
461 void
signalCPU(IOCPU *)462 IOCPU::signalCPU(IOCPU */*target*/)
463 {
464 }
465 
466 void
signalCPUDeferred(IOCPU * target)467 IOCPU::signalCPUDeferred(IOCPU *target)
468 {
469 	// Our CPU may not support deferred IPIs,
470 	// so send a regular IPI by default
471 	signalCPU(target);
472 }
473 
474 void
signalCPUCancel(IOCPU *)475 IOCPU::signalCPUCancel(IOCPU */*target*/)
476 {
477 	// Meant to cancel signals sent by
478 	// signalCPUDeferred; unsupported
479 	// by default
480 }
481 
482 void
enableCPUTimeBase(bool)483 IOCPU::enableCPUTimeBase(bool /*enable*/)
484 {
485 }
486 
487 UInt32
getCPUNumber(void)488 IOCPU::getCPUNumber(void)
489 {
490 	return _cpuNumber;
491 }
492 
493 void
setCPUNumber(UInt32 cpuNumber)494 IOCPU::setCPUNumber(UInt32 cpuNumber)
495 {
496 	_cpuNumber = cpuNumber;
497 	super::setProperty("IOCPUNumber", _cpuNumber, 32);
498 }
499 
500 UInt32
getCPUState(void)501 IOCPU::getCPUState(void)
502 {
503 	return _cpuState;
504 }
505 
506 void
setCPUState(UInt32 cpuState)507 IOCPU::setCPUState(UInt32 cpuState)
508 {
509 	if (cpuState < kIOCPUStateCount) {
510 		_cpuState = cpuState;
511 	}
512 }
513 
514 OSArray *
getCPUGroup(void)515 IOCPU::getCPUGroup(void)
516 {
517 	return _cpuGroup.get();
518 }
519 
520 UInt32
getCPUGroupSize(void)521 IOCPU::getCPUGroupSize(void)
522 {
523 	return _cpuGroup->getCount();
524 }
525 
526 processor_t
getMachProcessor(void)527 IOCPU::getMachProcessor(void)
528 {
529 	return machProcessor;
530 }
531 
532 
533 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
534 
535 #undef super
536 #define super IOInterruptController
537 
538 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
539 
540 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
541 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
542 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
543 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
544 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
545 
546 
547 
548 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
549 
550 IOReturn
initCPUInterruptController(int sources)551 IOCPUInterruptController::initCPUInterruptController(int sources)
552 {
553 	return initCPUInterruptController(sources, sources);
554 }
555 
556 IOReturn
initCPUInterruptController(int sources,int cpus)557 IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
558 {
559 	int cnt;
560 
561 	if (!super::init()) {
562 		return kIOReturnInvalid;
563 	}
564 
565 	numSources = sources;
566 	numCPUs = cpus;
567 
568 	vectors = (IOInterruptVector *)zalloc_permanent(numSources *
569 	    sizeof(IOInterruptVector), ZALIGN(IOInterruptVector));
570 
571 	// Allocate a lock for each vector
572 	for (cnt = 0; cnt < numSources; cnt++) {
573 		vectors[cnt].interruptLock = IOLockAlloc();
574 		if (vectors[cnt].interruptLock == NULL) {
575 			for (cnt = 0; cnt < numSources; cnt++) {
576 				if (vectors[cnt].interruptLock != NULL) {
577 					IOLockFree(vectors[cnt].interruptLock);
578 				}
579 			}
580 			return kIOReturnNoResources;
581 		}
582 	}
583 
584 	ml_set_max_cpus(numSources);
585 	return kIOReturnSuccess;
586 }
587 
588 void
registerCPUInterruptController(void)589 IOCPUInterruptController::registerCPUInterruptController(void)
590 {
591 	setProperty(gPlatformInterruptControllerName, kOSBooleanTrue);
592 	registerService();
593 
594 	getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
595 	    this);
596 }
597 
598 void
setCPUInterruptProperties(IOService * service)599 IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
600 {
601 	int          cnt;
602 	OSSharedPtr<OSArray> specifier;
603 	OSSharedPtr<OSArray> controller;
604 	long         tmpLong;
605 
606 	if ((service->propertyExists(gIOInterruptControllersKey)) &&
607 	    (service->propertyExists(gIOInterruptSpecifiersKey))) {
608 		return;
609 	}
610 
611 	// Create the interrupt specifer array.
612 	specifier = OSArray::withCapacity(numSources);
613 	for (cnt = 0; cnt < numSources; cnt++) {
614 		tmpLong = cnt;
615 		OSSharedPtr<OSData> tmpData = OSData::withValue(tmpLong);
616 		specifier->setObject(tmpData.get());
617 	}
618 
619 	// Create the interrupt controller array.
620 	controller = OSArray::withCapacity(numSources);
621 	for (cnt = 0; cnt < numSources; cnt++) {
622 		controller->setObject(gPlatformInterruptControllerName);
623 	}
624 
625 	// Put the two arrays into the property table.
626 	service->setProperty(gIOInterruptControllersKey, controller.get());
627 	service->setProperty(gIOInterruptSpecifiersKey, specifier.get());
628 }
629 
630 void
enableCPUInterrupt(IOCPU * cpu)631 IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
632 {
633 	IOInterruptHandler handler = OSMemberFunctionCast(
634 		IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
635 
636 	assert(numCPUs > 0);
637 
638 	ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, NULL);
639 
640 	IOTakeLock(vectors[0].interruptLock);
641 	++enabledCPUs;
642 
643 	if (enabledCPUs == numCPUs) {
644 		IOService::cpusRunning();
645 		thread_wakeup(this);
646 	}
647 	IOUnlock(vectors[0].interruptLock);
648 }
649 
650 IOReturn
registerInterrupt(IOService * nub,int source,void * target,IOInterruptHandler handler,void * refCon)651 IOCPUInterruptController::registerInterrupt(IOService *nub,
652     int source,
653     void *target,
654     IOInterruptHandler handler,
655     void *refCon)
656 {
657 	IOInterruptVector *vector;
658 
659 	// Interrupts must be enabled, as this can allocate memory.
660 	assert(ml_get_interrupts_enabled() == TRUE);
661 
662 	if (source >= numSources) {
663 		return kIOReturnNoResources;
664 	}
665 
666 	vector = &vectors[source];
667 
668 	// Get the lock for this vector.
669 	IOTakeLock(vector->interruptLock);
670 
671 	// Make sure the vector is not in use.
672 	if (vector->interruptRegistered) {
673 		IOUnlock(vector->interruptLock);
674 		return kIOReturnNoResources;
675 	}
676 
677 	// Fill in vector with the client's info.
678 	vector->handler = handler;
679 	vector->nub     = nub;
680 	vector->source  = source;
681 	vector->target  = target;
682 	vector->refCon  = refCon;
683 
684 	// Get the vector ready.  It starts hard disabled.
685 	vector->interruptDisabledHard = 1;
686 	vector->interruptDisabledSoft = 1;
687 	vector->interruptRegistered   = 1;
688 
689 	IOUnlock(vector->interruptLock);
690 
691 	IOTakeLock(vectors[0].interruptLock);
692 	if (enabledCPUs != numCPUs) {
693 		assert_wait(this, THREAD_UNINT);
694 		IOUnlock(vectors[0].interruptLock);
695 		thread_block(THREAD_CONTINUE_NULL);
696 	} else {
697 		IOUnlock(vectors[0].interruptLock);
698 	}
699 
700 	return kIOReturnSuccess;
701 }
702 
703 IOReturn
getInterruptType(IOService *,int,int * interruptType)704 IOCPUInterruptController::getInterruptType(IOService */*nub*/,
705     int /*source*/,
706     int *interruptType)
707 {
708 	if (interruptType == NULL) {
709 		return kIOReturnBadArgument;
710 	}
711 
712 	*interruptType = kIOInterruptTypeLevel;
713 
714 	return kIOReturnSuccess;
715 }
716 
717 IOReturn
enableInterrupt(IOService *,int)718 IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
719     int /*source*/)
720 {
721 //  ml_set_interrupts_enabled(true);
722 	return kIOReturnSuccess;
723 }
724 
725 IOReturn
disableInterrupt(IOService *,int)726 IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
727     int /*source*/)
728 {
729 //  ml_set_interrupts_enabled(false);
730 	return kIOReturnSuccess;
731 }
732 
733 IOReturn
causeInterrupt(IOService *,int)734 IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
735     int /*source*/)
736 {
737 	ml_cause_interrupt();
738 	return kIOReturnSuccess;
739 }
740 
741 IOReturn
handleInterrupt(void *,IOService *,int source)742 IOCPUInterruptController::handleInterrupt(void */*refCon*/,
743     IOService */*nub*/,
744     int source)
745 {
746 	IOInterruptVector *vector;
747 
748 	vector = &vectors[source];
749 
750 	if (!vector->interruptRegistered) {
751 		return kIOReturnInvalid;
752 	}
753 
754 	vector->handler(vector->target, vector->refCon,
755 	    vector->nub, vector->source);
756 
757 	return kIOReturnSuccess;
758 }
759 
760 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
761