1 /*
2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define IOKIT_ENABLE_SHARED_PTR
30
31 extern "C" {
32 #include <pexpert/pexpert.h>
33 #include <kern/cpu_number.h>
34 extern void kperf_kernel_configure(char *);
35 }
36
37 #include <machine/machine_routines.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <IOKit/pwr_mgt/RootDomain.h>
41 #include <IOKit/pwr_mgt/IOPMPrivate.h>
42 #include <libkern/c++/OSSharedPtr.h>
43 #include <IOKit/IOUserClient.h>
44 #include <IOKit/IOKitKeysPrivate.h>
45 #include <IOKit/IOCPU.h>
46 #include "IOKitKernelInternal.h"
47
48 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
49
50 #include <kern/queue.h>
51 #include <kern/sched_prim.h>
52
53 extern "C" void console_suspend();
54 extern "C" void console_resume();
55 extern "C" void sched_override_available_cores_for_sleep(void);
56 extern "C" void sched_restore_available_cores_after_sleep(void);
57
58 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
59
60 static IOLock *gIOCPUsLock;
61 static OSSharedPtr<OSArray> gIOCPUs;
62 static OSSharedPtr<const OSSymbol> gIOCPUStateKey;
63 static OSSharedPtr<OSString> gIOCPUStateNames[kIOCPUStateCount];
64
65 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
66
67 #if !USE_APPLEARMSMP
68
69 void
IOCPUInitialize(void)70 IOCPUInitialize(void)
71 {
72 gIOCPUsLock = IOLockAlloc();
73 gIOCPUs = OSArray::withCapacity(1);
74
75 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
76
77 gIOCPUStateNames[kIOCPUStateUnregistered] =
78 OSString::withCStringNoCopy("Unregistered");
79 gIOCPUStateNames[kIOCPUStateUninitalized] =
80 OSString::withCStringNoCopy("Uninitalized");
81 gIOCPUStateNames[kIOCPUStateStopped] =
82 OSString::withCStringNoCopy("Stopped");
83 gIOCPUStateNames[kIOCPUStateRunning] =
84 OSString::withCStringNoCopy("Running");
85 }
86
87 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
88
89 kern_return_t
PE_cpu_start(cpu_id_t target,vm_offset_t start_paddr,vm_offset_t arg_paddr)90 PE_cpu_start(cpu_id_t target,
91 vm_offset_t start_paddr, vm_offset_t arg_paddr)
92 {
93 IOCPU *targetCPU = (IOCPU *)target;
94
95 if (targetCPU == NULL) {
96 return KERN_FAILURE;
97 }
98 return targetCPU->startCPU(start_paddr, arg_paddr);
99 }
100
101 void
PE_cpu_halt(cpu_id_t target)102 PE_cpu_halt(cpu_id_t target)
103 {
104 IOCPU *targetCPU = (IOCPU *)target;
105
106 targetCPU->haltCPU();
107 }
108
109 void
PE_cpu_signal(cpu_id_t source,cpu_id_t target)110 PE_cpu_signal(cpu_id_t source, cpu_id_t target)
111 {
112 IOCPU *sourceCPU = (IOCPU *)source;
113 IOCPU *targetCPU = (IOCPU *)target;
114
115 sourceCPU->signalCPU(targetCPU);
116 }
117
118 void
PE_cpu_signal_deferred(cpu_id_t source,cpu_id_t target)119 PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
120 {
121 IOCPU *sourceCPU = (IOCPU *)source;
122 IOCPU *targetCPU = (IOCPU *)target;
123
124 sourceCPU->signalCPUDeferred(targetCPU);
125 }
126
127 void
PE_cpu_signal_cancel(cpu_id_t source,cpu_id_t target)128 PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
129 {
130 IOCPU *sourceCPU = (IOCPU *)source;
131 IOCPU *targetCPU = (IOCPU *)target;
132
133 sourceCPU->signalCPUCancel(targetCPU);
134 }
135
136 void
PE_cpu_machine_init(cpu_id_t target,boolean_t bootb)137 PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
138 {
139 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
140
141 if (targetCPU == NULL) {
142 panic("%s: invalid target CPU %p", __func__, target);
143 }
144
145 targetCPU->initCPU(bootb);
146 #if defined(__arm64__)
147 if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) {
148 ml_set_is_quiescing(false);
149 }
150 #endif /* defined(__arm64__) */
151 }
152
153 void
PE_cpu_machine_quiesce(cpu_id_t target)154 PE_cpu_machine_quiesce(cpu_id_t target)
155 {
156 IOCPU *targetCPU = (IOCPU*)target;
157 #if defined(__arm64__)
158 if (targetCPU->getCPUNumber() == (UInt32)master_cpu) {
159 ml_set_is_quiescing(true);
160 }
161 #endif /* defined(__arm64__) */
162 targetCPU->quiesceCPU();
163 }
164
165 #if defined(__arm64__)
166 static perfmon_interrupt_handler_func pmi_handler = NULL;
167
168 kern_return_t
PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)169 PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
170 {
171 pmi_handler = handler;
172
173 return KERN_SUCCESS;
174 }
175
176 void
PE_cpu_perfmon_interrupt_enable(cpu_id_t target,boolean_t enable)177 PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
178 {
179 IOCPU *targetCPU = (IOCPU*)target;
180
181 if (targetCPU == nullptr) {
182 return;
183 }
184
185 if (enable) {
186 targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)(void (*)(void))pmi_handler, NULL);
187 targetCPU->getProvider()->enableInterrupt(1);
188 } else {
189 targetCPU->getProvider()->disableInterrupt(1);
190 }
191 }
192 #endif
193
194 #endif /* !USE_APPLEARMSMP */
195
196 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
197
198 #define super IOService
199
200 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
201 OSMetaClassDefineReservedUnused(IOCPU, 0);
202 OSMetaClassDefineReservedUnused(IOCPU, 1);
203 OSMetaClassDefineReservedUnused(IOCPU, 2);
204 OSMetaClassDefineReservedUnused(IOCPU, 3);
205 OSMetaClassDefineReservedUnused(IOCPU, 4);
206 OSMetaClassDefineReservedUnused(IOCPU, 5);
207 OSMetaClassDefineReservedUnused(IOCPU, 6);
208 OSMetaClassDefineReservedUnused(IOCPU, 7);
209
210 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
211
212 #if !USE_APPLEARMSMP
213 void
IOCPUSleepKernel(void)214 IOCPUSleepKernel(void)
215 {
216 #if defined(__x86_64__)
217 extern IOCPU *currentShutdownTarget;
218 #endif
219 unsigned int cnt, numCPUs;
220 IOCPU *target;
221 IOCPU *bootCPU = NULL;
222 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
223
224 printf("IOCPUSleepKernel enter\n");
225 sched_override_available_cores_for_sleep();
226
227 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
228 IOPlatformActionsPreSleep();
229 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
230
231 numCPUs = gIOCPUs->getCount();
232 #if defined(__x86_64__)
233 currentShutdownTarget = NULL;
234 #endif
235
236 integer_t old_pri;
237 thread_t self = current_thread();
238
239 /*
240 * We need to boost this thread's priority to the maximum kernel priority to
241 * ensure we can urgently preempt ANY thread currently executing on the
242 * target CPU. Note that realtime threads have their own mechanism to eventually
243 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
244 */
245 old_pri = thread_kern_get_pri(self);
246 thread_kern_set_pri(self, thread_kern_get_kernel_maxpri());
247
248 // Sleep the CPUs.
249 ml_set_is_quiescing(true);
250 cnt = numCPUs;
251 while (cnt--) {
252 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
253
254 // We make certain that the bootCPU is the last to sleep
255 // We'll skip it for now, and halt it after finishing the
256 // non-boot CPU's.
257 if (target->getCPUNumber() == (UInt32)master_cpu) {
258 bootCPU = target;
259 } else if (target->getCPUState() == kIOCPUStateRunning) {
260 #if defined(__x86_64__)
261 currentShutdownTarget = target;
262 #endif
263 target->haltCPU();
264 }
265 }
266
267 assert(bootCPU != NULL);
268 assert(cpu_number() == master_cpu);
269
270 console_suspend();
271
272 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
273 rootDomain->stop_watchdog_timer();
274
275 /*
276 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
277 * The system sleeps here.
278 */
279
280 bootCPU->haltCPU();
281 ml_set_is_quiescing(false);
282
283 /*
284 * The system is now coming back from sleep on the boot CPU.
285 * The kQueueActive actions have already been called.
286 */
287
288 rootDomain->start_watchdog_timer();
289
290 console_resume();
291
292 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
293
294 // Wake the other CPUs.
295 for (cnt = 0; cnt < numCPUs; cnt++) {
296 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
297
298 // Skip the already-woken boot CPU.
299 if (target->getCPUNumber() != (UInt32)master_cpu) {
300 if (target->getCPUState() == kIOCPUStateRunning) {
301 panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
302 }
303
304 if (target->getCPUState() == kIOCPUStateStopped) {
305 processor_start(target->getMachProcessor());
306 }
307 }
308 }
309
310 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
311 IOPlatformActionsPostResume();
312
313 sched_restore_available_cores_after_sleep();
314
315 thread_kern_set_pri(self, old_pri);
316 printf("IOCPUSleepKernel exit\n");
317 }
318
319 static bool
is_IOCPU_disabled(void)320 is_IOCPU_disabled(void)
321 {
322 return false;
323 }
324 #else /* !USE_APPLEARMSMP */
325 static bool
is_IOCPU_disabled(void)326 is_IOCPU_disabled(void)
327 {
328 return true;
329 }
330 #endif /* !USE_APPLEARMSMP */
331
332 bool
start(IOService * provider)333 IOCPU::start(IOService *provider)
334 {
335 if (is_IOCPU_disabled()) {
336 return false;
337 }
338
339 if (!super::start(provider)) {
340 return false;
341 }
342
343 _cpuGroup = gIOCPUs;
344 cpuNub = provider;
345
346 IOLockLock(gIOCPUsLock);
347 gIOCPUs->setObject(this);
348 IOLockUnlock(gIOCPUsLock);
349
350 // Correct the bus, cpu and timebase frequencies in the device tree.
351 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
352 OSSharedPtr<OSData> busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
353 provider->setProperty("bus-frequency", busFrequency.get());
354 } else {
355 OSSharedPtr<OSData> busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
356 provider->setProperty("bus-frequency", busFrequency.get());
357 }
358
359 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
360 OSSharedPtr<OSData> cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
361 provider->setProperty("clock-frequency", cpuFrequency.get());
362 } else {
363 OSSharedPtr<OSData> cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
364 provider->setProperty("clock-frequency", cpuFrequency.get());
365 }
366
367 OSSharedPtr<OSData> timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
368 provider->setProperty("timebase-frequency", timebaseFrequency.get());
369
370 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8);
371
372 setCPUNumber(0);
373 setCPUState(kIOCPUStateUnregistered);
374
375 return true;
376 }
377
378 void
detach(IOService * provider)379 IOCPU::detach(IOService *provider)
380 {
381 if (is_IOCPU_disabled()) {
382 return;
383 }
384
385 super::detach(provider);
386 IOLockLock(gIOCPUsLock);
387 unsigned int index = gIOCPUs->getNextIndexOfObject(this, 0);
388 if (index != (unsigned int)-1) {
389 gIOCPUs->removeObject(index);
390 }
391 IOLockUnlock(gIOCPUsLock);
392 }
393
394 OSObject *
getProperty(const OSSymbol * aKey) const395 IOCPU::getProperty(const OSSymbol *aKey) const
396 {
397 if (aKey == gIOCPUStateKey) {
398 return gIOCPUStateNames[_cpuState].get();
399 }
400 #pragma clang diagnostic push
401 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
402 return super::getProperty(aKey);
403 #pragma clang diagnostic pop
404 }
405
406 bool
setProperty(const OSSymbol * aKey,OSObject * anObject)407 IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
408 {
409 if (aKey == gIOCPUStateKey) {
410 return false;
411 }
412
413 return super::setProperty(aKey, anObject);
414 }
415
416 bool
serializeProperties(OSSerialize * serialize) const417 IOCPU::serializeProperties(OSSerialize *serialize) const
418 {
419 bool result;
420 OSSharedPtr<OSDictionary> dict = dictionaryWithProperties();
421 if (!dict) {
422 return false;
423 }
424 dict->setObject(gIOCPUStateKey.get(), gIOCPUStateNames[_cpuState].get());
425 result = dict->serialize(serialize);
426 return result;
427 }
428
429 IOReturn
setProperties(OSObject * properties)430 IOCPU::setProperties(OSObject *properties)
431 {
432 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
433 OSString *stateStr;
434 IOReturn result;
435
436 if (dict == NULL) {
437 return kIOReturnUnsupported;
438 }
439
440 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey.get()));
441 if (stateStr != NULL) {
442 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
443 if (result != kIOReturnSuccess) {
444 return result;
445 }
446
447 if (setProperty(gIOCPUStateKey.get(), stateStr)) {
448 return kIOReturnSuccess;
449 }
450
451 return kIOReturnUnsupported;
452 }
453
454 return kIOReturnUnsupported;
455 }
456
457 void
signalCPU(IOCPU *)458 IOCPU::signalCPU(IOCPU */*target*/)
459 {
460 }
461
462 void
signalCPUDeferred(IOCPU * target)463 IOCPU::signalCPUDeferred(IOCPU *target)
464 {
465 // Our CPU may not support deferred IPIs,
466 // so send a regular IPI by default
467 signalCPU(target);
468 }
469
470 void
signalCPUCancel(IOCPU *)471 IOCPU::signalCPUCancel(IOCPU */*target*/)
472 {
473 // Meant to cancel signals sent by
474 // signalCPUDeferred; unsupported
475 // by default
476 }
477
478 void
enableCPUTimeBase(bool)479 IOCPU::enableCPUTimeBase(bool /*enable*/)
480 {
481 }
482
483 UInt32
getCPUNumber(void)484 IOCPU::getCPUNumber(void)
485 {
486 return _cpuNumber;
487 }
488
489 void
setCPUNumber(UInt32 cpuNumber)490 IOCPU::setCPUNumber(UInt32 cpuNumber)
491 {
492 _cpuNumber = cpuNumber;
493 super::setProperty("IOCPUNumber", _cpuNumber, 32);
494 }
495
496 UInt32
getCPUState(void)497 IOCPU::getCPUState(void)
498 {
499 return _cpuState;
500 }
501
502 void
setCPUState(UInt32 cpuState)503 IOCPU::setCPUState(UInt32 cpuState)
504 {
505 if (cpuState < kIOCPUStateCount) {
506 _cpuState = cpuState;
507 }
508 }
509
510 OSArray *
getCPUGroup(void)511 IOCPU::getCPUGroup(void)
512 {
513 return _cpuGroup.get();
514 }
515
516 UInt32
getCPUGroupSize(void)517 IOCPU::getCPUGroupSize(void)
518 {
519 return _cpuGroup->getCount();
520 }
521
522 processor_t
getMachProcessor(void)523 IOCPU::getMachProcessor(void)
524 {
525 return machProcessor;
526 }
527
528
529 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
530
531 #undef super
532 #define super IOInterruptController
533
534 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
535
536 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
537 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
538 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
539 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
540 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
541
542
543
544 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
545
546 IOReturn
initCPUInterruptController(int sources)547 IOCPUInterruptController::initCPUInterruptController(int sources)
548 {
549 return initCPUInterruptController(sources, sources);
550 }
551
552 IOReturn
initCPUInterruptController(int sources,int cpus)553 IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
554 {
555 int cnt;
556
557 if (!super::init()) {
558 return kIOReturnInvalid;
559 }
560
561 numSources = sources;
562 numCPUs = cpus;
563
564 vectors = (IOInterruptVector *)zalloc_permanent(numSources *
565 sizeof(IOInterruptVector), ZALIGN(IOInterruptVector));
566
567 // Allocate a lock for each vector
568 for (cnt = 0; cnt < numSources; cnt++) {
569 vectors[cnt].interruptLock = IOLockAlloc();
570 if (vectors[cnt].interruptLock == NULL) {
571 for (cnt = 0; cnt < numSources; cnt++) {
572 if (vectors[cnt].interruptLock != NULL) {
573 IOLockFree(vectors[cnt].interruptLock);
574 }
575 }
576 return kIOReturnNoResources;
577 }
578 }
579
580 ml_set_max_cpus(numSources);
581 return kIOReturnSuccess;
582 }
583
584 void
registerCPUInterruptController(void)585 IOCPUInterruptController::registerCPUInterruptController(void)
586 {
587 setProperty(gPlatformInterruptControllerName, kOSBooleanTrue);
588 registerService();
589
590 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
591 this);
592 }
593
594 void
setCPUInterruptProperties(IOService * service)595 IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
596 {
597 int cnt;
598 OSSharedPtr<OSArray> specifier;
599 OSSharedPtr<OSArray> controller;
600 long tmpLong;
601
602 if ((service->propertyExists(gIOInterruptControllersKey)) &&
603 (service->propertyExists(gIOInterruptSpecifiersKey))) {
604 return;
605 }
606
607 // Create the interrupt specifer array.
608 specifier = OSArray::withCapacity(numSources);
609 for (cnt = 0; cnt < numSources; cnt++) {
610 tmpLong = cnt;
611 OSSharedPtr<OSData> tmpData = OSData::withValue(tmpLong);
612 specifier->setObject(tmpData.get());
613 }
614
615 // Create the interrupt controller array.
616 controller = OSArray::withCapacity(numSources);
617 for (cnt = 0; cnt < numSources; cnt++) {
618 controller->setObject(gPlatformInterruptControllerName);
619 }
620
621 // Put the two arrays into the property table.
622 service->setProperty(gIOInterruptControllersKey, controller.get());
623 service->setProperty(gIOInterruptSpecifiersKey, specifier.get());
624 }
625
626 void
enableCPUInterrupt(IOCPU * cpu)627 IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
628 {
629 IOInterruptHandler handler = OSMemberFunctionCast(
630 IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
631
632 assert(numCPUs > 0);
633
634 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, NULL);
635
636 IOTakeLock(vectors[0].interruptLock);
637 ++enabledCPUs;
638
639 if (enabledCPUs == numCPUs) {
640 IOService::cpusRunning();
641 thread_wakeup(this);
642 }
643 IOUnlock(vectors[0].interruptLock);
644 }
645
646 IOReturn
registerInterrupt(IOService * nub,int source,void * target,IOInterruptHandler handler,void * refCon)647 IOCPUInterruptController::registerInterrupt(IOService *nub,
648 int source,
649 void *target,
650 IOInterruptHandler handler,
651 void *refCon)
652 {
653 IOInterruptVector *vector;
654
655 // Interrupts must be enabled, as this can allocate memory.
656 assert(ml_get_interrupts_enabled() == TRUE);
657
658 if (source >= numSources) {
659 return kIOReturnNoResources;
660 }
661
662 vector = &vectors[source];
663
664 // Get the lock for this vector.
665 IOTakeLock(vector->interruptLock);
666
667 // Make sure the vector is not in use.
668 if (vector->interruptRegistered) {
669 IOUnlock(vector->interruptLock);
670 return kIOReturnNoResources;
671 }
672
673 // Fill in vector with the client's info.
674 vector->handler = handler;
675 vector->nub = nub;
676 vector->source = source;
677 vector->target = target;
678 vector->refCon = refCon;
679
680 // Get the vector ready. It starts hard disabled.
681 vector->interruptDisabledHard = 1;
682 vector->interruptDisabledSoft = 1;
683 vector->interruptRegistered = 1;
684
685 IOUnlock(vector->interruptLock);
686
687 IOTakeLock(vectors[0].interruptLock);
688 if (enabledCPUs != numCPUs) {
689 assert_wait(this, THREAD_UNINT);
690 IOUnlock(vectors[0].interruptLock);
691 thread_block(THREAD_CONTINUE_NULL);
692 } else {
693 IOUnlock(vectors[0].interruptLock);
694 }
695
696 return kIOReturnSuccess;
697 }
698
699 IOReturn
getInterruptType(IOService *,int,int * interruptType)700 IOCPUInterruptController::getInterruptType(IOService */*nub*/,
701 int /*source*/,
702 int *interruptType)
703 {
704 if (interruptType == NULL) {
705 return kIOReturnBadArgument;
706 }
707
708 *interruptType = kIOInterruptTypeLevel;
709
710 return kIOReturnSuccess;
711 }
712
713 IOReturn
enableInterrupt(IOService *,int)714 IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
715 int /*source*/)
716 {
717 // ml_set_interrupts_enabled(true);
718 return kIOReturnSuccess;
719 }
720
721 IOReturn
disableInterrupt(IOService *,int)722 IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
723 int /*source*/)
724 {
725 // ml_set_interrupts_enabled(false);
726 return kIOReturnSuccess;
727 }
728
729 IOReturn
causeInterrupt(IOService *,int)730 IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
731 int /*source*/)
732 {
733 ml_cause_interrupt();
734 return kIOReturnSuccess;
735 }
736
737 IOReturn
handleInterrupt(void *,IOService *,int source)738 IOCPUInterruptController::handleInterrupt(void */*refCon*/,
739 IOService */*nub*/,
740 int source)
741 {
742 IOInterruptVector *vector;
743
744 vector = &vectors[source];
745
746 if (!vector->interruptRegistered) {
747 return kIOReturnInvalid;
748 }
749
750 vector->handler(vector->target, vector->refCon,
751 vector->nub, vector->source);
752
753 return kIOReturnSuccess;
754 }
755
756 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
757