1 /*
2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define IOKIT_ENABLE_SHARED_PTR
30
31 extern "C" {
32 #include <pexpert/pexpert.h>
33 #include <kern/cpu_number.h>
34 extern void kperf_kernel_configure(char *);
35 }
36
37 #include <machine/machine_routines.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <IOKit/pwr_mgt/RootDomain.h>
41 #include <IOKit/pwr_mgt/IOPMPrivate.h>
42 #include <libkern/c++/OSSharedPtr.h>
43 #include <IOKit/IOUserClient.h>
44 #include <IOKit/IOKitKeysPrivate.h>
45 #include <IOKit/IOCPU.h>
46 #include "IOKitKernelInternal.h"
47
48 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
49
50 #include <kern/queue.h>
51 #include <kern/sched_prim.h>
52
53 extern "C" void console_suspend();
54 extern "C" void console_resume();
55 extern "C" void sched_override_available_cores_for_sleep(void);
56 extern "C" void sched_restore_available_cores_after_sleep(void);
57
58 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
59
60 static IOLock *gIOCPUsLock;
61 static OSSharedPtr<OSArray> gIOCPUs;
62 static OSSharedPtr<const OSSymbol> gIOCPUStateKey;
63 static OSSharedPtr<OSString> gIOCPUStateNames[kIOCPUStateCount];
64
65 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
66
67 #if !USE_APPLEARMSMP
68
69 void
IOCPUInitialize(void)70 IOCPUInitialize(void)
71 {
72 gIOCPUsLock = IOLockAlloc();
73 gIOCPUs = OSArray::withCapacity(1);
74
75 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
76
77 gIOCPUStateNames[kIOCPUStateUnregistered] =
78 OSString::withCStringNoCopy("Unregistered");
79 gIOCPUStateNames[kIOCPUStateUninitalized] =
80 OSString::withCStringNoCopy("Uninitalized");
81 gIOCPUStateNames[kIOCPUStateStopped] =
82 OSString::withCStringNoCopy("Stopped");
83 gIOCPUStateNames[kIOCPUStateRunning] =
84 OSString::withCStringNoCopy("Running");
85 }
86
87 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
88
89 kern_return_t
PE_cpu_start(cpu_id_t target,vm_offset_t start_paddr,vm_offset_t arg_paddr)90 PE_cpu_start(cpu_id_t target,
91 vm_offset_t start_paddr, vm_offset_t arg_paddr)
92 {
93 IOCPU *targetCPU = (IOCPU *)target;
94
95 if (targetCPU == NULL) {
96 return KERN_FAILURE;
97 }
98 return targetCPU->startCPU(start_paddr, arg_paddr);
99 }
100
101 void
PE_cpu_halt(cpu_id_t target)102 PE_cpu_halt(cpu_id_t target)
103 {
104 IOCPU *targetCPU = (IOCPU *)target;
105
106 targetCPU->haltCPU();
107 }
108
109 void
PE_cpu_signal(cpu_id_t source,cpu_id_t target)110 PE_cpu_signal(cpu_id_t source, cpu_id_t target)
111 {
112 IOCPU *sourceCPU = (IOCPU *)source;
113 IOCPU *targetCPU = (IOCPU *)target;
114
115 sourceCPU->signalCPU(targetCPU);
116 }
117
118 void
PE_cpu_signal_deferred(cpu_id_t source,cpu_id_t target)119 PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
120 {
121 IOCPU *sourceCPU = (IOCPU *)source;
122 IOCPU *targetCPU = (IOCPU *)target;
123
124 sourceCPU->signalCPUDeferred(targetCPU);
125 }
126
127 void
PE_cpu_signal_cancel(cpu_id_t source,cpu_id_t target)128 PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
129 {
130 IOCPU *sourceCPU = (IOCPU *)source;
131 IOCPU *targetCPU = (IOCPU *)target;
132
133 sourceCPU->signalCPUCancel(targetCPU);
134 }
135
136 void
PE_cpu_machine_init(cpu_id_t target,boolean_t bootb)137 PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
138 {
139 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
140
141 if (targetCPU == NULL) {
142 panic("%s: invalid target CPU %p", __func__, target);
143 }
144
145 targetCPU->initCPU(bootb);
146 #if defined(__arm64__)
147 if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) {
148 ml_set_is_quiescing(false);
149 }
150 #endif /* defined(__arm64__) */
151 }
152
153 void
PE_cpu_machine_quiesce(cpu_id_t target)154 PE_cpu_machine_quiesce(cpu_id_t target)
155 {
156 IOCPU *targetCPU = (IOCPU*)target;
157 #if defined(__arm64__)
158 if (targetCPU->getCPUNumber() == (UInt32)master_cpu) {
159 ml_set_is_quiescing(true);
160 }
161 #endif /* defined(__arm64__) */
162 targetCPU->quiesceCPU();
163 }
164
165 #if defined(__arm64__)
166 static perfmon_interrupt_handler_func pmi_handler = NULL;
167
168 kern_return_t
PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)169 PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
170 {
171 pmi_handler = handler;
172
173 return KERN_SUCCESS;
174 }
175
176 void
PE_cpu_perfmon_interrupt_enable(cpu_id_t target,boolean_t enable)177 PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
178 {
179 IOCPU *targetCPU = (IOCPU*)target;
180
181 if (targetCPU == nullptr) {
182 return;
183 }
184
185 if (enable) {
186 targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)(void (*)(void))pmi_handler, NULL);
187 targetCPU->getProvider()->enableInterrupt(1);
188 } else {
189 targetCPU->getProvider()->disableInterrupt(1);
190 }
191 }
192 #endif
193
194 bool
PE_cpu_power_check_kdp(int cpu_id)195 PE_cpu_power_check_kdp(int cpu_id)
196 {
197 return true;
198 }
199
200 #endif /* !USE_APPLEARMSMP */
201
202 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
203
204 #define super IOService
205
206 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
207 OSMetaClassDefineReservedUnused(IOCPU, 0);
208 OSMetaClassDefineReservedUnused(IOCPU, 1);
209 OSMetaClassDefineReservedUnused(IOCPU, 2);
210 OSMetaClassDefineReservedUnused(IOCPU, 3);
211 OSMetaClassDefineReservedUnused(IOCPU, 4);
212 OSMetaClassDefineReservedUnused(IOCPU, 5);
213 OSMetaClassDefineReservedUnused(IOCPU, 6);
214 OSMetaClassDefineReservedUnused(IOCPU, 7);
215
216 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
217
218 #if !USE_APPLEARMSMP
219 void
IOCPUSleepKernel(void)220 IOCPUSleepKernel(void)
221 {
222 #if defined(__x86_64__)
223 extern IOCPU *currentShutdownTarget;
224 #endif
225 unsigned int cnt, numCPUs;
226 IOCPU *target;
227 IOCPU *bootCPU = NULL;
228 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
229
230 printf("IOCPUSleepKernel enter\n");
231 sched_override_available_cores_for_sleep();
232
233 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
234 IOPlatformActionsPreSleep();
235 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
236
237 numCPUs = gIOCPUs->getCount();
238 #if defined(__x86_64__)
239 currentShutdownTarget = NULL;
240 #endif
241
242 integer_t old_pri;
243 thread_t self = current_thread();
244
245 /*
246 * We need to boost this thread's priority to the maximum kernel priority to
247 * ensure we can urgently preempt ANY thread currently executing on the
248 * target CPU. Note that realtime threads have their own mechanism to eventually
249 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
250 */
251 old_pri = thread_kern_get_pri(self);
252 thread_kern_set_pri(self, thread_kern_get_kernel_maxpri());
253
254 // Sleep the CPUs.
255 ml_set_is_quiescing(true);
256 cnt = numCPUs;
257 while (cnt--) {
258 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
259
260 // We make certain that the bootCPU is the last to sleep
261 // We'll skip it for now, and halt it after finishing the
262 // non-boot CPU's.
263 if (target->getCPUNumber() == (UInt32)master_cpu) {
264 bootCPU = target;
265 } else if (target->getCPUState() == kIOCPUStateRunning) {
266 #if defined(__x86_64__)
267 currentShutdownTarget = target;
268 #endif
269 target->haltCPU();
270 }
271 }
272
273 assert(bootCPU != NULL);
274 assert(cpu_number() == master_cpu);
275
276 console_suspend();
277
278 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
279 rootDomain->stop_watchdog_timer();
280
281 /*
282 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
283 * The system sleeps here.
284 */
285
286 bootCPU->haltCPU();
287 ml_set_is_quiescing(false);
288
289 /*
290 * The system is now coming back from sleep on the boot CPU.
291 * The kQueueActive actions have already been called.
292 */
293
294 rootDomain->start_watchdog_timer();
295
296 console_resume();
297
298 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
299
300 // Wake the other CPUs.
301 for (cnt = 0; cnt < numCPUs; cnt++) {
302 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
303
304 // Skip the already-woken boot CPU.
305 if (target->getCPUNumber() != (UInt32)master_cpu) {
306 if (target->getCPUState() == kIOCPUStateRunning) {
307 panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
308 }
309
310 if (target->getCPUState() == kIOCPUStateStopped) {
311 processor_start(target->getMachProcessor());
312 }
313 }
314 }
315
316 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
317 IOPlatformActionsPostResume();
318
319 sched_restore_available_cores_after_sleep();
320
321 thread_kern_set_pri(self, old_pri);
322 printf("IOCPUSleepKernel exit\n");
323 }
324
325 static bool
is_IOCPU_disabled(void)326 is_IOCPU_disabled(void)
327 {
328 return false;
329 }
330 #else /* !USE_APPLEARMSMP */
331 static bool
is_IOCPU_disabled(void)332 is_IOCPU_disabled(void)
333 {
334 return true;
335 }
336 #endif /* !USE_APPLEARMSMP */
337
338 bool
start(IOService * provider)339 IOCPU::start(IOService *provider)
340 {
341 if (is_IOCPU_disabled()) {
342 return false;
343 }
344
345 if (!super::start(provider)) {
346 return false;
347 }
348
349 _cpuGroup = gIOCPUs;
350 cpuNub = provider;
351
352 IOLockLock(gIOCPUsLock);
353 gIOCPUs->setObject(this);
354 IOLockUnlock(gIOCPUsLock);
355
356 // Correct the bus, cpu and timebase frequencies in the device tree.
357 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
358 OSSharedPtr<OSData> busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
359 provider->setProperty("bus-frequency", busFrequency.get());
360 } else {
361 OSSharedPtr<OSData> busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
362 provider->setProperty("bus-frequency", busFrequency.get());
363 }
364
365 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
366 OSSharedPtr<OSData> cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
367 provider->setProperty("clock-frequency", cpuFrequency.get());
368 } else {
369 OSSharedPtr<OSData> cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
370 provider->setProperty("clock-frequency", cpuFrequency.get());
371 }
372
373 OSSharedPtr<OSData> timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
374 provider->setProperty("timebase-frequency", timebaseFrequency.get());
375
376 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8);
377
378 setCPUNumber(0);
379 setCPUState(kIOCPUStateUnregistered);
380
381 return true;
382 }
383
384 void
detach(IOService * provider)385 IOCPU::detach(IOService *provider)
386 {
387 if (is_IOCPU_disabled()) {
388 return;
389 }
390
391 super::detach(provider);
392 IOLockLock(gIOCPUsLock);
393 unsigned int index = gIOCPUs->getNextIndexOfObject(this, 0);
394 if (index != (unsigned int)-1) {
395 gIOCPUs->removeObject(index);
396 }
397 IOLockUnlock(gIOCPUsLock);
398 }
399
400 OSObject *
getProperty(const OSSymbol * aKey) const401 IOCPU::getProperty(const OSSymbol *aKey) const
402 {
403 if (aKey == gIOCPUStateKey) {
404 return gIOCPUStateNames[_cpuState].get();
405 }
406 #pragma clang diagnostic push
407 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
408 return super::getProperty(aKey);
409 #pragma clang diagnostic pop
410 }
411
412 bool
setProperty(const OSSymbol * aKey,OSObject * anObject)413 IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
414 {
415 if (aKey == gIOCPUStateKey) {
416 return false;
417 }
418
419 return super::setProperty(aKey, anObject);
420 }
421
422 bool
serializeProperties(OSSerialize * serialize) const423 IOCPU::serializeProperties(OSSerialize *serialize) const
424 {
425 bool result;
426 OSSharedPtr<OSDictionary> dict = dictionaryWithProperties();
427 if (!dict) {
428 return false;
429 }
430 dict->setObject(gIOCPUStateKey.get(), gIOCPUStateNames[_cpuState].get());
431 result = dict->serialize(serialize);
432 return result;
433 }
434
435 IOReturn
setProperties(OSObject * properties)436 IOCPU::setProperties(OSObject *properties)
437 {
438 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
439 OSString *stateStr;
440 IOReturn result;
441
442 if (dict == NULL) {
443 return kIOReturnUnsupported;
444 }
445
446 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey.get()));
447 if (stateStr != NULL) {
448 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
449 if (result != kIOReturnSuccess) {
450 return result;
451 }
452
453 if (setProperty(gIOCPUStateKey.get(), stateStr)) {
454 return kIOReturnSuccess;
455 }
456
457 return kIOReturnUnsupported;
458 }
459
460 return kIOReturnUnsupported;
461 }
462
463 void
signalCPU(IOCPU *)464 IOCPU::signalCPU(IOCPU */*target*/)
465 {
466 }
467
468 void
signalCPUDeferred(IOCPU * target)469 IOCPU::signalCPUDeferred(IOCPU *target)
470 {
471 // Our CPU may not support deferred IPIs,
472 // so send a regular IPI by default
473 signalCPU(target);
474 }
475
476 void
signalCPUCancel(IOCPU *)477 IOCPU::signalCPUCancel(IOCPU */*target*/)
478 {
479 // Meant to cancel signals sent by
480 // signalCPUDeferred; unsupported
481 // by default
482 }
483
484 void
enableCPUTimeBase(bool)485 IOCPU::enableCPUTimeBase(bool /*enable*/)
486 {
487 }
488
489 UInt32
getCPUNumber(void)490 IOCPU::getCPUNumber(void)
491 {
492 return _cpuNumber;
493 }
494
495 void
setCPUNumber(UInt32 cpuNumber)496 IOCPU::setCPUNumber(UInt32 cpuNumber)
497 {
498 _cpuNumber = cpuNumber;
499 super::setProperty("IOCPUNumber", _cpuNumber, 32);
500 }
501
502 UInt32
getCPUState(void)503 IOCPU::getCPUState(void)
504 {
505 return _cpuState;
506 }
507
508 void
setCPUState(UInt32 cpuState)509 IOCPU::setCPUState(UInt32 cpuState)
510 {
511 if (cpuState < kIOCPUStateCount) {
512 _cpuState = cpuState;
513 }
514 }
515
516 OSArray *
getCPUGroup(void)517 IOCPU::getCPUGroup(void)
518 {
519 return _cpuGroup.get();
520 }
521
522 UInt32
getCPUGroupSize(void)523 IOCPU::getCPUGroupSize(void)
524 {
525 return _cpuGroup->getCount();
526 }
527
528 processor_t
getMachProcessor(void)529 IOCPU::getMachProcessor(void)
530 {
531 return machProcessor;
532 }
533
534
535 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
536
537 #undef super
538 #define super IOInterruptController
539
540 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
541
542 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
543 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
544 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
545 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
546 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
547
548
549
550 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
551
552 IOReturn
initCPUInterruptController(int sources)553 IOCPUInterruptController::initCPUInterruptController(int sources)
554 {
555 return initCPUInterruptController(sources, sources);
556 }
557
558 IOReturn
initCPUInterruptController(int sources,int cpus)559 IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
560 {
561 int cnt;
562
563 if (!super::init()) {
564 return kIOReturnInvalid;
565 }
566
567 numSources = sources;
568 numCPUs = cpus;
569
570 vectors = (IOInterruptVector *)zalloc_permanent(numSources *
571 sizeof(IOInterruptVector), ZALIGN(IOInterruptVector));
572
573 // Allocate a lock for each vector
574 for (cnt = 0; cnt < numSources; cnt++) {
575 vectors[cnt].interruptLock = IOLockAlloc();
576 if (vectors[cnt].interruptLock == NULL) {
577 for (cnt = 0; cnt < numSources; cnt++) {
578 if (vectors[cnt].interruptLock != NULL) {
579 IOLockFree(vectors[cnt].interruptLock);
580 }
581 }
582 return kIOReturnNoResources;
583 }
584 }
585
586 ml_set_max_cpus(numSources);
587 return kIOReturnSuccess;
588 }
589
590 void
registerCPUInterruptController(void)591 IOCPUInterruptController::registerCPUInterruptController(void)
592 {
593 setProperty(gPlatformInterruptControllerName, kOSBooleanTrue);
594 registerService();
595
596 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
597 this);
598 }
599
600 void
setCPUInterruptProperties(IOService * service)601 IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
602 {
603 int cnt;
604 OSSharedPtr<OSArray> specifier;
605 OSSharedPtr<OSArray> controller;
606 long tmpLong;
607
608 if ((service->propertyExists(gIOInterruptControllersKey)) &&
609 (service->propertyExists(gIOInterruptSpecifiersKey))) {
610 return;
611 }
612
613 // Create the interrupt specifer array.
614 specifier = OSArray::withCapacity(numSources);
615 for (cnt = 0; cnt < numSources; cnt++) {
616 tmpLong = cnt;
617 OSSharedPtr<OSData> tmpData = OSData::withValue(tmpLong);
618 specifier->setObject(tmpData.get());
619 }
620
621 // Create the interrupt controller array.
622 controller = OSArray::withCapacity(numSources);
623 for (cnt = 0; cnt < numSources; cnt++) {
624 controller->setObject(gPlatformInterruptControllerName);
625 }
626
627 // Put the two arrays into the property table.
628 service->setProperty(gIOInterruptControllersKey, controller.get());
629 service->setProperty(gIOInterruptSpecifiersKey, specifier.get());
630 }
631
632 void
enableCPUInterrupt(IOCPU * cpu)633 IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
634 {
635 IOInterruptHandler handler = OSMemberFunctionCast(
636 IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
637
638 assert(numCPUs > 0);
639
640 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, NULL);
641
642 IOTakeLock(vectors[0].interruptLock);
643 ++enabledCPUs;
644
645 if (enabledCPUs == numCPUs) {
646 IOService::cpusRunning();
647 thread_wakeup(this);
648 }
649 IOUnlock(vectors[0].interruptLock);
650 }
651
652 IOReturn
registerInterrupt(IOService * nub,int source,void * target,IOInterruptHandler handler,void * refCon)653 IOCPUInterruptController::registerInterrupt(IOService *nub,
654 int source,
655 void *target,
656 IOInterruptHandler handler,
657 void *refCon)
658 {
659 IOInterruptVector *vector;
660
661 // Interrupts must be enabled, as this can allocate memory.
662 assert(ml_get_interrupts_enabled() == TRUE);
663
664 if (source >= numSources) {
665 return kIOReturnNoResources;
666 }
667
668 vector = &vectors[source];
669
670 // Get the lock for this vector.
671 IOTakeLock(vector->interruptLock);
672
673 // Make sure the vector is not in use.
674 if (vector->interruptRegistered) {
675 IOUnlock(vector->interruptLock);
676 return kIOReturnNoResources;
677 }
678
679 // Fill in vector with the client's info.
680 vector->handler = handler;
681 vector->nub = nub;
682 vector->source = source;
683 vector->target = target;
684 vector->refCon = refCon;
685
686 // Get the vector ready. It starts hard disabled.
687 vector->interruptDisabledHard = 1;
688 vector->interruptDisabledSoft = 1;
689 vector->interruptRegistered = 1;
690
691 IOUnlock(vector->interruptLock);
692
693 IOTakeLock(vectors[0].interruptLock);
694 if (enabledCPUs != numCPUs) {
695 assert_wait(this, THREAD_UNINT);
696 IOUnlock(vectors[0].interruptLock);
697 thread_block(THREAD_CONTINUE_NULL);
698 } else {
699 IOUnlock(vectors[0].interruptLock);
700 }
701
702 return kIOReturnSuccess;
703 }
704
705 IOReturn
getInterruptType(IOService *,int,int * interruptType)706 IOCPUInterruptController::getInterruptType(IOService */*nub*/,
707 int /*source*/,
708 int *interruptType)
709 {
710 if (interruptType == NULL) {
711 return kIOReturnBadArgument;
712 }
713
714 *interruptType = kIOInterruptTypeLevel;
715
716 return kIOReturnSuccess;
717 }
718
719 IOReturn
enableInterrupt(IOService *,int)720 IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
721 int /*source*/)
722 {
723 // ml_set_interrupts_enabled(true);
724 return kIOReturnSuccess;
725 }
726
727 IOReturn
disableInterrupt(IOService *,int)728 IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
729 int /*source*/)
730 {
731 // ml_set_interrupts_enabled(false);
732 return kIOReturnSuccess;
733 }
734
735 IOReturn
causeInterrupt(IOService *,int)736 IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
737 int /*source*/)
738 {
739 ml_cause_interrupt();
740 return kIOReturnSuccess;
741 }
742
743 IOReturn
handleInterrupt(void *,IOService *,int source)744 IOCPUInterruptController::handleInterrupt(void */*refCon*/,
745 IOService */*nub*/,
746 int source)
747 {
748 IOInterruptVector *vector;
749
750 vector = &vectors[source];
751
752 if (!vector->interruptRegistered) {
753 return kIOReturnInvalid;
754 }
755
756 vector->handler(vector->target, vector->refCon,
757 vector->nub, vector->source);
758
759 return kIOReturnSuccess;
760 }
761
762 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
763