1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/bits.h>
69 #include <kern/cpu_number.h>
70 #include <kern/host.h>
71 #include <kern/ipc_host.h>
72 #include <kern/ipc_tt.h>
73 #include <kern/kalloc.h>
74 #include <kern/kern_types.h>
75 #include <kern/machine.h>
76 #include <kern/misc_protos.h>
77 #include <kern/processor.h>
78 #include <kern/sched.h>
79 #include <kern/smr.h>
80 #include <kern/task.h>
81 #include <kern/thread.h>
82 #include <kern/timer.h>
83 #if KPERF
84 #include <kperf/kperf.h>
85 #endif /* KPERF */
86 #include <ipc/ipc_port.h>
87 #include <machine/commpage.h>
88
89 #include <security/mac_mach_internal.h>
90
91 #if defined(CONFIG_XNUPOST)
92
93 #include <tests/xnupost.h>
94
95 #endif /* CONFIG_XNUPOST */
96
97 /*
98 * Exported interface
99 */
100 #include <mach/mach_host_server.h>
101 #include <mach/processor_set_server.h>
102 #include <san/kcov.h>
103
104 #if __AMP__
105
106 /*
107 * For AMP platforms, all psets of the same type are part of
108 * the same pset_node. This allows for easier CPU selection logic.
109 *
110 * The nodes in pset_nodes are indexed in pset boot order and
111 * initialization is protected by pset_node_lock.
112 */
113 struct pset_node pset_nodes[MAX_AMP_CLUSTER_TYPES];
114 static int next_pset_node_index = 1;
115 static _Atomic pset_node_t pset_nodes_by_cluster_type[MAX_AMP_CLUSTER_TYPES];
116
117 /* pset_node_lock must be held */
118 static void
pset_node_set_for_pset_cluster_type(pset_node_t node,pset_cluster_type_t pset_cluster_type)119 pset_node_set_for_pset_cluster_type(pset_node_t node, pset_cluster_type_t pset_cluster_type)
120 {
121 assert3p(os_atomic_load(&pset_nodes_by_cluster_type[pset_cluster_type - 1], relaxed), ==, PSET_NODE_NULL);
122 os_atomic_store(&pset_nodes_by_cluster_type[pset_cluster_type - 1], node, release);
123 }
124
125 pset_node_t
pset_node_for_pset_cluster_type(pset_cluster_type_t pset_cluster_type)126 pset_node_for_pset_cluster_type(pset_cluster_type_t pset_cluster_type)
127 {
128 assert3u(pset_cluster_type, !=, PSET_SMP);
129 return os_atomic_load(&pset_nodes_by_cluster_type[pset_cluster_type - 1], acquire);
130 }
131
132 #else /* !__AMP__ */
133
134 /* The boot node */
135 struct pset_node pset_node0;
136
137 #endif /* !__AMP__ */
138
139 /* The boot pset */
140 struct processor_set pset0;
141
142 LCK_SPIN_DECLARE(pset_node_lock, LCK_GRP_NULL);
143
144 LCK_GRP_DECLARE(pset_lck_grp, "pset");
145
146 queue_head_t tasks;
147 queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */
148 queue_head_t corpse_tasks;
149 int tasks_count;
150 int terminated_tasks_count;
151 queue_head_t threads;
152 queue_head_t terminated_threads;
153 int threads_count;
154 int terminated_threads_count;
155 LCK_GRP_DECLARE(task_lck_grp, "task");
156 LCK_ATTR_DECLARE(task_lck_attr, 0, 0);
157 LCK_MTX_DECLARE_ATTR(tasks_threads_lock, &task_lck_grp, &task_lck_attr);
158 LCK_MTX_DECLARE_ATTR(tasks_corpse_lock, &task_lck_grp, &task_lck_attr);
159
160 processor_t processor_list;
161 unsigned int processor_count;
162 static processor_t processor_list_tail;
163 SIMPLE_LOCK_DECLARE(processor_list_lock, 0);
164 SIMPLE_LOCK_DECLARE(processor_start_state_lock, 0);
165
166 uint32_t processor_avail_count;
167 uint32_t processor_avail_count_user;
168 #if CONFIG_SCHED_SMT
169 uint32_t primary_processor_avail_count_user;
170 #endif /* CONFIG_SCHED_SMT */
171
172 #if XNU_SUPPORT_BOOTCPU_SHUTDOWN
173 TUNABLE(bool, support_bootcpu_shutdown, "support_bootcpu_shutdown", true);
174 #else
175 TUNABLE(bool, support_bootcpu_shutdown, "support_bootcpu_shutdown", false);
176 #endif
177
178 #if __x86_64__ || XNU_ENABLE_PROCESSOR_EXIT
179 TUNABLE(bool, enable_processor_exit, "processor_exit", true);
180 #else
181 TUNABLE(bool, enable_processor_exit, "processor_exit", false);
182 #endif
183
184 SECURITY_READ_ONLY_LATE(int) master_cpu = 0;
185
186 struct processor PERCPU_DATA(processor);
187 processor_t processor_array[MAX_SCHED_CPUS] = { 0 };
188 processor_set_t pset_array[MAX_PSETS] = { 0 };
189
190 static timer_call_func_t running_timer_funcs[] = {
191 [RUNNING_TIMER_QUANTUM] = thread_quantum_expire,
192 [RUNNING_TIMER_PREEMPT] = thread_preempt_expire,
193 [RUNNING_TIMER_KPERF] = kperf_timer_expire,
194 [RUNNING_TIMER_PERFCONTROL] = perfcontrol_timer_expire,
195 };
196 static_assert(sizeof(running_timer_funcs) / sizeof(running_timer_funcs[0])
197 == RUNNING_TIMER_MAX, "missing running timer function");
198
199 #if defined(CONFIG_XNUPOST)
200 kern_return_t ipi_test(void);
201 extern void arm64_ipi_test(void);
202
203 kern_return_t
ipi_test()204 ipi_test()
205 {
206 #if __arm64__
207 processor_t p;
208
209 for (p = processor_list; p != NULL; p = p->processor_list) {
210 thread_bind(p);
211 thread_block(THREAD_CONTINUE_NULL);
212 kprintf("Running IPI test on cpu %d\n", p->cpu_id);
213 arm64_ipi_test();
214 }
215
216 /* unbind thread from specific cpu */
217 thread_bind(PROCESSOR_NULL);
218 thread_block(THREAD_CONTINUE_NULL);
219
220 T_PASS("Done running IPI tests");
221 #else
222 T_PASS("Unsupported platform. Not running IPI tests");
223
224 #endif /* __arm64__ */
225
226 return KERN_SUCCESS;
227 }
228 #endif /* defined(CONFIG_XNUPOST) */
229
230 int sched_enable_smt = 1;
231
232 cpumap_t processor_offline_state_map[PROCESSOR_OFFLINE_MAX];
233
234 void
processor_update_offline_state_locked(processor_t processor,processor_offline_state_t new_state)235 processor_update_offline_state_locked(processor_t processor,
236 processor_offline_state_t new_state)
237 {
238 simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
239
240 processor_offline_state_t old_state = processor->processor_offline_state;
241
242 uint cpuid = (uint)processor->cpu_id;
243
244 assert(old_state < PROCESSOR_OFFLINE_MAX);
245 assert(new_state < PROCESSOR_OFFLINE_MAX);
246
247 processor->processor_offline_state = new_state;
248
249 bit_clear(processor_offline_state_map[old_state], cpuid);
250 bit_set(processor_offline_state_map[new_state], cpuid);
251 }
252
253 void
processor_update_offline_state(processor_t processor,processor_offline_state_t new_state)254 processor_update_offline_state(processor_t processor,
255 processor_offline_state_t new_state)
256 {
257 spl_t s = splsched();
258 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
259 processor_update_offline_state_locked(processor, new_state);
260 simple_unlock(&sched_available_cores_lock);
261 splx(s);
262 }
263
264 void
processor_bootstrap(void)265 processor_bootstrap(void)
266 {
267 simple_lock_init(&sched_available_cores_lock, 0);
268 simple_lock_init(&processor_start_state_lock, 0);
269
270 /* Initialize boot pset and node */
271 #if __AMP__
272 /*
273 * Since this is an AMP system, fill up cluster type and ID information; this should do the
274 * same kind of initialization done via ml_processor_register()
275 */
276 const ml_topology_info_t *topology_info = ml_get_topology_info();
277 ml_topology_cluster_t *boot_cluster = topology_info->boot_cluster;
278 pset_cluster_type_t boot_cluster_type = cluster_type_to_pset_cluster_type(boot_cluster->cluster_type);
279 pset0.pset_id = boot_cluster->cluster_id;
280 pset0.pset_cluster_id = boot_cluster->cluster_id;
281 pset_node0.pset_cluster_type = boot_cluster_type;
282 pset0.pset_cluster_type = boot_cluster_type;
283 pset_node_set_for_pset_cluster_type(&pset_node0, boot_cluster_type);
284 #else /* !__AMP__ */
285 pset0.pset_id = 0;
286 pset0.pset_cluster_id = 0;
287 pset_node0.pset_cluster_type = PSET_SMP;
288 pset0.pset_cluster_type = PSET_SMP;
289 #endif /* !__AMP__ */
290
291 pset_init(&pset0, &pset_node0);
292 queue_init(&tasks);
293 queue_init(&terminated_tasks);
294 queue_init(&threads);
295 queue_init(&terminated_threads);
296 queue_init(&corpse_tasks);
297
298 processor_init(master_processor, master_cpu, &pset0);
299 }
300
301 /*
302 * Initialize the given processor for the cpu
303 * indicated by cpu_id, and assign to the
304 * specified processor set.
305 */
306 void
processor_init(processor_t processor,int cpu_id,processor_set_t pset)307 processor_init(
308 processor_t processor,
309 int cpu_id,
310 processor_set_t pset)
311 {
312 spl_t s;
313
314 assert(cpu_id < MAX_SCHED_CPUS);
315 processor->cpu_id = cpu_id;
316
317 if (processor != master_processor) {
318 /* Scheduler state for master_processor initialized in sched_init() */
319 SCHED(processor_init)(processor);
320 smr_cpu_init(processor);
321 }
322
323 processor->state = PROCESSOR_OFF_LINE;
324 processor->active_thread = processor->startup_thread = processor->idle_thread = THREAD_NULL;
325 processor->processor_set = pset;
326 processor_state_update_idle(processor);
327 processor->starting_pri = MINPRI;
328 processor->quantum_end = UINT64_MAX;
329 processor->deadline = UINT64_MAX;
330 processor->first_timeslice = FALSE;
331 processor->processor_online = false;
332 #if CONFIG_SCHED_SMT
333 processor->processor_primary = processor; /* no SMT relationship known at this point */
334 processor->processor_secondary = NULL;
335 processor->is_SMT = false;
336 #endif /* CONFIG_SCHED_SMT */
337 processor->processor_self = IP_NULL;
338 processor->processor_list = NULL;
339 processor->must_idle = false;
340 processor->next_idle_short = false;
341 processor->last_startup_reason = REASON_SYSTEM;
342 processor->last_shutdown_reason = REASON_NONE;
343 processor->shutdown_temporary = false;
344 processor->processor_inshutdown = false;
345 processor->processor_instartup = false;
346 processor->last_derecommend_reason = REASON_NONE;
347 processor->running_timers_active = false;
348 for (int i = 0; i < RUNNING_TIMER_MAX; i++) {
349 timer_call_setup(&processor->running_timers[i],
350 running_timer_funcs[i], processor);
351 running_timer_clear(processor, i);
352 }
353 recount_processor_init(processor);
354
355 #if CONFIG_SCHED_EDGE
356 os_atomic_init(&processor->stir_the_pot_inbox_cpu, -1);
357 #endif /* CONFIG_SCHED_EDGE */
358
359 s = splsched();
360 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
361
362 pset_lock(pset);
363 bit_set(pset->cpu_bitmask, cpu_id);
364 bit_set(pset->recommended_bitmask, cpu_id);
365 atomic_bit_set(&pset->node->pset_recommended_map, pset->pset_id, memory_order_relaxed);
366 #if CONFIG_SCHED_SMT
367 bit_set(pset->primary_map, cpu_id);
368 #endif /* CONFIG_SCHED_SMT */
369 bit_set(pset->cpu_state_map[PROCESSOR_OFF_LINE], cpu_id);
370 if (pset->cpu_set_count++ == 0) {
371 pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
372 } else {
373 pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
374 pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
375 }
376
377 processor->last_recommend_reason = REASON_SYSTEM;
378 sched_processor_change_mode_locked(processor, PCM_RECOMMENDED, true);
379 pset_unlock(pset);
380
381 processor->processor_offline_state = PROCESSOR_OFFLINE_NOT_BOOTED;
382 bit_set(processor_offline_state_map[processor->processor_offline_state], cpu_id);
383
384 if (processor == master_processor) {
385 processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_STARTING);
386 }
387
388 simple_unlock(&sched_available_cores_lock);
389 splx(s);
390
391 simple_lock(&processor_list_lock, LCK_GRP_NULL);
392 if (processor_list == NULL) {
393 processor_list = processor;
394 } else {
395 processor_list_tail->processor_list = processor;
396 }
397 processor_list_tail = processor;
398 processor_count++;
399 simple_unlock(&processor_list_lock);
400 processor_array[cpu_id] = processor;
401 }
402
403 #if CONFIG_SCHED_SMT
404 bool system_is_SMT = false;
405
406 void
processor_set_primary(processor_t processor,processor_t primary)407 processor_set_primary(
408 processor_t processor,
409 processor_t primary)
410 {
411 assert(processor->processor_primary == primary || processor->processor_primary == processor);
412 /* Re-adjust primary point for this (possibly) secondary processor */
413 processor->processor_primary = primary;
414
415 assert(primary->processor_secondary == NULL || primary->processor_secondary == processor);
416 if (primary != processor) {
417 /* Link primary to secondary, assumes a 2-way SMT model
418 * We'll need to move to a queue if any future architecture
419 * requires otherwise.
420 */
421 assert(processor->processor_secondary == NULL);
422 primary->processor_secondary = processor;
423 /* Mark both processors as SMT siblings */
424 primary->is_SMT = TRUE;
425 processor->is_SMT = TRUE;
426
427 if (!system_is_SMT) {
428 system_is_SMT = true;
429 sched_rt_n_backup_processors = SCHED_DEFAULT_BACKUP_PROCESSORS_SMT;
430 }
431
432 processor_set_t pset = processor->processor_set;
433 spl_t s = splsched();
434 pset_lock(pset);
435 if (!pset->is_SMT) {
436 pset->is_SMT = true;
437 }
438 bit_clear(pset->primary_map, processor->cpu_id);
439 pset_unlock(pset);
440 splx(s);
441 }
442 }
443 #endif /* CONFIG_SCHED_SMT */
444
445 processor_set_t
processor_pset(processor_t processor)446 processor_pset(
447 processor_t processor)
448 {
449 return processor->processor_set;
450 }
451
452 cpumap_t
pset_available_cpumap(processor_set_t pset)453 pset_available_cpumap(processor_set_t pset)
454 {
455 return pset->cpu_available_map & pset->recommended_bitmask;
456 }
457
458 #if CONFIG_SCHED_EDGE
459
460 /* Returns the scheduling type for the pset */
461 cluster_type_t
pset_type_for_id(uint32_t cluster_id)462 pset_type_for_id(uint32_t cluster_id)
463 {
464 return pset_array[cluster_id]->pset_type;
465 }
466
467 /*
468 * Processor foreign threads
469 *
470 * With the Edge scheduler, each pset maintains a bitmap of processors running threads
471 * which are foreign to the pset/cluster. A thread is defined as foreign for a cluster
472 * if its of a different type than its preferred cluster type (E/P). The bitmap should
473 * be updated every time a new thread is assigned to run on a processor. Cluster shared
474 * resource intensive threads are also not counted as foreign threads since these
475 * threads should not be rebalanced when running on non-preferred clusters.
476 *
477 * This bitmap allows the Edge scheduler to quickly find CPUs running foreign threads
478 * for rebalancing.
479 */
480 static void
processor_state_update_running_foreign(processor_t processor,thread_t thread)481 processor_state_update_running_foreign(processor_t processor, thread_t thread)
482 {
483 cluster_type_t current_processor_type = pset_type_for_id(processor->processor_set->pset_cluster_id);
484 cluster_type_t thread_type = pset_type_for_id(sched_edge_thread_preferred_cluster(thread));
485
486 boolean_t non_rt_thr = (processor->current_pri < BASEPRI_RTQUEUES);
487 boolean_t non_bound_thr = (thread->bound_processor == PROCESSOR_NULL);
488 if (non_rt_thr && non_bound_thr && (current_processor_type != thread_type)) {
489 bit_set(processor->processor_set->cpu_running_foreign, processor->cpu_id);
490 } else {
491 bit_clear(processor->processor_set->cpu_running_foreign, processor->cpu_id);
492 }
493 }
494
495 /*
496 * Cluster shared resource intensive threads
497 *
498 * With the Edge scheduler, each pset maintains a bitmap of processors running
499 * threads that are shared resource intensive. This per-thread property is set
500 * by the performance controller or explicitly via dispatch SPIs. The bitmap
501 * allows the Edge scheduler to calculate the cluster shared resource load on
502 * any given cluster and load balance intensive threads accordingly.
503 */
504 static void
processor_state_update_running_cluster_shared_rsrc(processor_t processor,thread_t thread)505 processor_state_update_running_cluster_shared_rsrc(processor_t processor, thread_t thread)
506 {
507 if (thread_shared_rsrc_policy_get(thread, CLUSTER_SHARED_RSRC_TYPE_RR)) {
508 bit_set(processor->processor_set->cpu_running_cluster_shared_rsrc_thread[CLUSTER_SHARED_RSRC_TYPE_RR], processor->cpu_id);
509 } else {
510 bit_clear(processor->processor_set->cpu_running_cluster_shared_rsrc_thread[CLUSTER_SHARED_RSRC_TYPE_RR], processor->cpu_id);
511 }
512 if (thread_shared_rsrc_policy_get(thread, CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST)) {
513 bit_set(processor->processor_set->cpu_running_cluster_shared_rsrc_thread[CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST], processor->cpu_id);
514 } else {
515 bit_clear(processor->processor_set->cpu_running_cluster_shared_rsrc_thread[CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST], processor->cpu_id);
516 }
517 }
518
519 #endif /* CONFIG_SCHED_EDGE */
520
521 void
processor_state_update_idle(processor_t processor)522 processor_state_update_idle(processor_t processor)
523 {
524 processor->current_pri = IDLEPRI;
525 processor->current_sfi_class = SFI_CLASS_KERNEL;
526 processor->current_recommended_pset_type = PSET_SMP;
527 #if CONFIG_THREAD_GROUPS
528 processor->current_thread_group = NULL;
529 #endif
530 processor->current_perfctl_class = PERFCONTROL_CLASS_IDLE;
531 processor->current_urgency = THREAD_URGENCY_NONE;
532 #if CONFIG_SCHED_SMT
533 processor->current_is_NO_SMT = false;
534 #endif /* CONFIG_SCHED_SMT */
535 processor->current_is_bound = false;
536 processor->current_is_eagerpreempt = false;
537 #if CONFIG_SCHED_EDGE
538 os_atomic_store(&processor->processor_set->cpu_running_buckets[processor->cpu_id], TH_BUCKET_SCHED_MAX, relaxed);
539 bit_clear(processor->processor_set->cpu_running_cluster_shared_rsrc_thread[CLUSTER_SHARED_RSRC_TYPE_RR], processor->cpu_id);
540 bit_clear(processor->processor_set->cpu_running_cluster_shared_rsrc_thread[CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST], processor->cpu_id);
541 sched_edge_stir_the_pot_clear_registry_entry();
542 #endif /* CONFIG_SCHED_EDGE */
543 sched_update_pset_load_average(processor->processor_set, 0);
544 }
545
546 void
processor_state_update_from_thread(processor_t processor,thread_t thread,boolean_t pset_lock_held)547 processor_state_update_from_thread(processor_t processor, thread_t thread, boolean_t pset_lock_held)
548 {
549 processor->current_pri = thread->sched_pri;
550 processor->current_sfi_class = thread->sfi_class;
551 processor->current_recommended_pset_type = recommended_pset_type(thread);
552 #if CONFIG_SCHED_EDGE
553 processor_state_update_running_foreign(processor, thread);
554 processor_state_update_running_cluster_shared_rsrc(processor, thread);
555 /* Since idle and bound threads are not tracked by the edge scheduler, ignore when those threads go on-core */
556 sched_bucket_t bucket = ((thread->state & TH_IDLE) || (thread->bound_processor != PROCESSOR_NULL)) ? TH_BUCKET_SCHED_MAX : thread->th_sched_bucket;
557 os_atomic_store(&processor->processor_set->cpu_running_buckets[processor->cpu_id], bucket, relaxed);
558 sched_edge_stir_the_pot_update_registry_state(thread);
559 #endif /* CONFIG_SCHED_EDGE */
560
561 #if CONFIG_THREAD_GROUPS
562 processor->current_thread_group = thread_group_get(thread);
563 #endif
564 processor->current_perfctl_class = thread_get_perfcontrol_class(thread);
565 processor->current_urgency = thread_get_urgency(thread, NULL, NULL);
566 #if CONFIG_SCHED_SMT
567 processor->current_is_NO_SMT = thread_no_smt(thread);
568 #endif /* CONFIG_SCHED_SMT */
569 processor->current_is_bound = thread->bound_processor != PROCESSOR_NULL;
570 processor->current_is_eagerpreempt = thread_is_eager_preempt(thread);
571 if (pset_lock_held) {
572 /* Only update the pset load average when the pset lock is held */
573 sched_update_pset_load_average(processor->processor_set, 0);
574 }
575 }
576
577 pset_node_t
pset_node_root(void)578 pset_node_root(void)
579 {
580 return &pset_node0;
581 }
582
583 #if __AMP__
584
585 /*
586 * Only need to dynamically initialize pset nodes when
587 * there are multiple cluster types.
588 */
589 static pset_node_t
pset_node_create(cluster_type_t cluster_type)590 pset_node_create(cluster_type_t cluster_type)
591 {
592 lck_spin_lock(&pset_node_lock);
593 assert3u(cluster_type, !=, CLUSTER_TYPE_SMP);
594
595 pset_node_t node;
596 pset_cluster_type_t pset_cluster_type = cluster_type_to_pset_cluster_type(cluster_type);
597 /*
598 * Check if we raced with another booting pset of the same type,
599 * and this node has already been created.
600 */
601 if ((node = pset_node_for_pset_cluster_type(pset_cluster_type)) != PSET_NODE_NULL) {
602 lck_spin_unlock(&pset_node_lock);
603 return node;
604 }
605
606 assert3u(next_pset_node_index, <, MAX_AMP_CLUSTER_TYPES);
607 node = &pset_nodes[next_pset_node_index++];
608 node->psets = PROCESSOR_SET_NULL;
609 node->pset_cluster_type = pset_cluster_type;
610 /* Insert into node linked list */
611 pset_nodes[next_pset_node_index - 2].node_list = node;
612 pset_node_set_for_pset_cluster_type(node, pset_cluster_type);
613
614 lck_spin_unlock(&pset_node_lock);
615 return node;
616 }
617
618 #endif /* __AMP__*/
619
620 LCK_GRP_DECLARE(pset_create_grp, "pset_create");
621 LCK_MTX_DECLARE(pset_create_lock, &pset_create_grp);
622
623 processor_set_t
pset_create(cluster_type_t cluster_type,uint32_t pset_cluster_id,int pset_id)624 pset_create(
625 cluster_type_t cluster_type,
626 uint32_t pset_cluster_id,
627 int pset_id)
628 {
629 /* some schedulers do not support multiple psets */
630 if (SCHED(multiple_psets_enabled) == FALSE) {
631 return processor_pset(master_processor);
632 }
633
634 pset_node_t node;
635 pset_cluster_type_t pset_cluster_type;
636 #if __AMP__
637 pset_cluster_type = cluster_type_to_pset_cluster_type(cluster_type);
638 node = pset_node_for_pset_cluster_type(pset_cluster_type);
639 if (node == PSET_NODE_NULL) {
640 /* First pset of this cluster type */
641 node = pset_node_create(cluster_type);
642 }
643 #else /* !__AMP__ */
644 pset_cluster_type = PSET_SMP;
645 node = &pset_node0;
646 (void)cluster_type;
647 #endif /* !__AMP__ */
648
649 processor_set_t pset = zalloc_permanent_type(struct processor_set);
650 if (pset == PROCESSOR_SET_NULL) {
651 panic("Failed to allocate struct processor_set");
652 }
653 pset->pset_cluster_type = pset_cluster_type;
654 pset->pset_cluster_id = pset_cluster_id;
655 pset->pset_id = pset_id;
656 pset_init(pset, node);
657
658 return pset;
659 }
660
661 /*
662 * Find processor set with specified cluster_id.
663 * Returns default_pset if not found.
664 */
665 processor_set_t
pset_find(uint32_t cluster_id,processor_set_t default_pset)666 pset_find(
667 uint32_t cluster_id,
668 processor_set_t default_pset)
669 {
670 lck_spin_lock(&pset_node_lock);
671 pset_node_t node = &pset_node0;
672 processor_set_t pset = NULL;
673
674 do {
675 pset = node->psets;
676 while (pset != NULL) {
677 if (pset->pset_cluster_id == cluster_id) {
678 break;
679 }
680 pset = pset->pset_list;
681 }
682 } while (pset == NULL && (node = node->node_list) != NULL);
683 lck_spin_unlock(&pset_node_lock);
684 if (pset == NULL) {
685 return default_pset;
686 }
687 return pset;
688 }
689
690 /*
691 * Initialize the given processor_set structure.
692 */
693 void
pset_init(processor_set_t pset,pset_node_t node)694 pset_init(
695 processor_set_t pset,
696 pset_node_t node)
697 {
698 pset->online_processor_count = 0;
699 #if CONFIG_SCHED_EDGE
700 bzero(&pset->pset_load_average, sizeof(pset->pset_load_average));
701 bzero(&pset->pset_runnable_depth, sizeof(pset->pset_runnable_depth));
702 #else /* !CONFIG_SCHED_EDGE */
703 pset->load_average = 0;
704 #endif /* CONFIG_SCHED_EDGE */
705 pset->cpu_set_low = pset->cpu_set_hi = 0;
706 pset->cpu_set_count = 0;
707 pset->last_chosen = -1;
708 pset->cpu_bitmask = 0;
709 pset->recommended_bitmask = 0;
710 #if CONFIG_SCHED_SMT
711 pset->primary_map = 0;
712 #endif /* CONFIG_SCHED_SMT */
713 pset->realtime_map = 0;
714 pset->cpu_available_map = 0;
715
716 for (uint i = 0; i < PROCESSOR_STATE_LEN; i++) {
717 pset->cpu_state_map[i] = 0;
718 }
719 pset->pending_AST_URGENT_cpu_mask = 0;
720 pset->pending_AST_PREEMPT_cpu_mask = 0;
721 #if defined(CONFIG_SCHED_DEFERRED_AST)
722 pset->pending_deferred_AST_cpu_mask = 0;
723 #endif
724 pset->pending_spill_cpu_mask = 0;
725 pset->rt_pending_spill_cpu_mask = 0;
726 pset_lock_init(pset);
727 pset->pset_self = IP_NULL;
728 pset->pset_name_self = IP_NULL;
729 pset->pset_list = PROCESSOR_SET_NULL;
730 #if CONFIG_SCHED_SMT
731 pset->is_SMT = false;
732 #endif /* CONFIG_SCHED_SMT */
733 #if CONFIG_SCHED_EDGE
734 bzero(&pset->pset_execution_time, sizeof(pset->pset_execution_time));
735 pset->cpu_running_foreign = 0;
736 for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
737 pset->cpu_running_cluster_shared_rsrc_thread[shared_rsrc_type] = 0;
738 pset->pset_cluster_shared_rsrc_load[shared_rsrc_type] = 0;
739 }
740 #endif /* CONFIG_SCHED_EDGE */
741
742 /*
743 * No initial preferences or forced migrations, so use the least numbered
744 * available idle core when picking amongst idle cores in a cluster.
745 */
746 pset->perfcontrol_cpu_preferred_bitmask = 0;
747 pset->perfcontrol_cpu_migration_bitmask = 0;
748 pset->cpu_preferred_last_chosen = -1;
749
750 if (pset != &pset0) {
751 /*
752 * Scheduler runqueue initialization for non-boot psets.
753 * This initialization for pset0 happens in sched_init().
754 */
755 SCHED(pset_init)(pset);
756 SCHED(rt_init_pset)(pset);
757 }
758
759 /*
760 * Because the pset_node_lock is not taken by every client of the pset_map,
761 * we need to make sure that the initialized pset contents are visible to any
762 * client that loads a non-NULL value from pset_array.
763 */
764 os_atomic_store(&pset_array[pset->pset_id], pset, release);
765
766 lck_spin_lock(&pset_node_lock);
767
768 /* Initialize pset node state regarding this pset */
769 bit_set(node->pset_map, pset->pset_id);
770 pset->node = node;
771
772 processor_set_t *prev = &node->psets;
773 while (*prev != PROCESSOR_SET_NULL) {
774 prev = &(*prev)->pset_list;
775 }
776 *prev = pset;
777
778 lck_spin_unlock(&pset_node_lock);
779 }
780
781 kern_return_t
processor_info_count(processor_flavor_t flavor,mach_msg_type_number_t * count)782 processor_info_count(
783 processor_flavor_t flavor,
784 mach_msg_type_number_t *count)
785 {
786 switch (flavor) {
787 case PROCESSOR_BASIC_INFO:
788 *count = PROCESSOR_BASIC_INFO_COUNT;
789 break;
790
791 case PROCESSOR_CPU_LOAD_INFO:
792 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
793 break;
794
795 default:
796 return cpu_info_count(flavor, count);
797 }
798
799 return KERN_SUCCESS;
800 }
801
802 void
processor_cpu_load_info(processor_t processor,natural_t ticks[static CPU_STATE_MAX])803 processor_cpu_load_info(processor_t processor,
804 natural_t ticks[static CPU_STATE_MAX])
805 {
806 struct recount_usage usage = { 0 };
807 uint64_t idle_time = 0;
808 recount_processor_usage(&processor->pr_recount, &usage, &idle_time);
809
810 ticks[CPU_STATE_USER] += (uint32_t)(usage.ru_metrics[RCT_LVL_USER].rm_time_mach /
811 hz_tick_interval);
812 ticks[CPU_STATE_SYSTEM] += (uint32_t)(
813 recount_usage_system_time_mach(&usage) / hz_tick_interval);
814 ticks[CPU_STATE_IDLE] += (uint32_t)(idle_time / hz_tick_interval);
815 }
816
817 kern_return_t
processor_info(processor_t processor,processor_flavor_t flavor,host_t * host,processor_info_t info,mach_msg_type_number_t * count)818 processor_info(
819 processor_t processor,
820 processor_flavor_t flavor,
821 host_t *host,
822 processor_info_t info,
823 mach_msg_type_number_t *count)
824 {
825 int cpu_id, state;
826 kern_return_t result;
827
828 if (processor == PROCESSOR_NULL) {
829 return KERN_INVALID_ARGUMENT;
830 }
831
832 cpu_id = processor->cpu_id;
833
834 switch (flavor) {
835 case PROCESSOR_BASIC_INFO:
836 {
837 processor_basic_info_t basic_info;
838
839 if (*count < PROCESSOR_BASIC_INFO_COUNT) {
840 return KERN_FAILURE;
841 }
842
843 basic_info = (processor_basic_info_t) info;
844 basic_info->cpu_type = slot_type(cpu_id);
845 basic_info->cpu_subtype = slot_subtype(cpu_id);
846 state = processor->state;
847 if (((state == PROCESSOR_OFF_LINE || state == PROCESSOR_PENDING_OFFLINE) && !processor->shutdown_temporary)
848 #if defined(__x86_64__)
849 || !processor->is_recommended
850 #endif
851 ) {
852 basic_info->running = FALSE;
853 } else {
854 basic_info->running = TRUE;
855 }
856 basic_info->slot_num = cpu_id;
857 if (processor == master_processor) {
858 basic_info->is_master = TRUE;
859 } else {
860 basic_info->is_master = FALSE;
861 }
862
863 *count = PROCESSOR_BASIC_INFO_COUNT;
864 *host = &realhost;
865
866 return KERN_SUCCESS;
867 }
868
869 case PROCESSOR_CPU_LOAD_INFO:
870 {
871 processor_cpu_load_info_t cpu_load_info;
872
873 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT) {
874 return KERN_FAILURE;
875 }
876
877 cpu_load_info = (processor_cpu_load_info_t) info;
878
879 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
880 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
881 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
882 processor_cpu_load_info(processor, cpu_load_info->cpu_ticks);
883 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
884
885 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
886 *host = &realhost;
887
888 return KERN_SUCCESS;
889 }
890
891 default:
892 result = cpu_info(flavor, cpu_id, info, count);
893 if (result == KERN_SUCCESS) {
894 *host = &realhost;
895 }
896
897 return result;
898 }
899 }
900
901 /*
902 * Now that we're enforcing all CPUs actually boot, we may need a way to
903 * relax the timeout.
904 */
905 TUNABLE(uint32_t, cpu_boot_timeout_secs, "cpu_boot_timeout_secs", 1); /* seconds, default to 1 second */
906
907 static const char *
908 processor_start_panic_strings[] = {
909 [PROCESSOR_FIRST_BOOT] = "boot for the first time",
910 [PROCESSOR_BEFORE_ENTERING_SLEEP] = "come online while entering system sleep",
911 [PROCESSOR_WAKE_FROM_SLEEP] = "come online after returning from system sleep",
912 [PROCESSOR_CLUSTER_POWERDOWN_SUSPEND] = "come online while disabling cluster powerdown",
913 [PROCESSOR_CLUSTER_POWERDOWN_RESUME] = "come online before enabling cluster powerdown",
914 [PROCESSOR_POWERED_CORES_CHANGE] = "come online during dynamic cluster power state change",
915 };
916
917 void
processor_wait_for_start(processor_t processor,processor_start_kind_t start_kind)918 processor_wait_for_start(processor_t processor, processor_start_kind_t start_kind)
919 {
920 if (!processor->processor_booted) {
921 panic("processor_boot() missing for cpu %d", processor->cpu_id);
922 }
923
924 uint32_t boot_timeout_extended = cpu_boot_timeout_secs *
925 debug_cpu_performance_degradation_factor;
926
927 spl_t s = splsched();
928 simple_lock(&processor_start_state_lock, LCK_GRP_NULL);
929 while (processor->processor_instartup) {
930 assert_wait_timeout((event_t)&processor->processor_instartup,
931 THREAD_UNINT, boot_timeout_extended, NSEC_PER_SEC);
932 simple_unlock(&processor_start_state_lock);
933 splx(s);
934
935 wait_result_t wait_result = thread_block(THREAD_CONTINUE_NULL);
936 if (wait_result == THREAD_TIMED_OUT) {
937 panic("cpu %d failed to %s, waited %d seconds\n",
938 processor->cpu_id,
939 processor_start_panic_strings[start_kind],
940 boot_timeout_extended);
941 }
942
943 s = splsched();
944 simple_lock(&processor_start_state_lock, LCK_GRP_NULL);
945 }
946
947 if (processor->processor_inshutdown) {
948 panic("%s>cpu %d still in shutdown",
949 __func__, processor->cpu_id);
950 }
951
952 simple_unlock(&processor_start_state_lock);
953
954 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
955
956 if (!processor->processor_online) {
957 panic("%s>cpu %d not online",
958 __func__, processor->cpu_id);
959 }
960
961 if (processor->processor_offline_state == PROCESSOR_OFFLINE_STARTED_NOT_WAITED) {
962 processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_RUNNING);
963 } else {
964 assert(processor->processor_offline_state == PROCESSOR_OFFLINE_RUNNING);
965 }
966
967 simple_unlock(&sched_available_cores_lock);
968 splx(s);
969 }
970
971 LCK_GRP_DECLARE(processor_updown_grp, "processor_updown");
972 LCK_MTX_DECLARE(processor_updown_lock, &processor_updown_grp);
973
974 static void
processor_dostartup(processor_t processor,bool first_boot)975 processor_dostartup(
976 processor_t processor,
977 bool first_boot)
978 {
979 if (!processor->processor_booted && !first_boot) {
980 panic("processor %d not booted", processor->cpu_id);
981 }
982
983 lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
984 lck_mtx_assert(&processor_updown_lock, LCK_MTX_ASSERT_OWNED);
985
986 processor_set_t pset = processor->processor_set;
987
988 assert(processor->processor_self);
989
990 spl_t s = splsched();
991
992 simple_lock(&processor_start_state_lock, LCK_GRP_NULL);
993 assert(processor->processor_inshutdown || first_boot);
994 processor->processor_inshutdown = false;
995 assert(processor->processor_instartup == false);
996 processor->processor_instartup = true;
997 simple_unlock(&processor_start_state_lock);
998
999 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
1000
1001 pset_lock(pset);
1002
1003 if (first_boot) {
1004 assert(processor->processor_offline_state == PROCESSOR_OFFLINE_NOT_BOOTED);
1005 } else {
1006 assert(processor->processor_offline_state == PROCESSOR_OFFLINE_FULLY_OFFLINE);
1007 }
1008
1009 processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_STARTING);
1010
1011 assert(processor->state == PROCESSOR_OFF_LINE);
1012
1013 pset_update_processor_state(pset, processor, PROCESSOR_START);
1014 pset_unlock(pset);
1015
1016 simple_unlock(&sched_available_cores_lock);
1017
1018 splx(s);
1019
1020 ml_cpu_power_enable(processor->cpu_id);
1021 ml_cpu_begin_state_transition(processor->cpu_id);
1022 ml_broadcast_cpu_event(CPU_BOOT_REQUESTED, processor->cpu_id);
1023
1024 cpu_start(processor->cpu_id);
1025
1026 s = splsched();
1027 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
1028
1029 if (processor->processor_offline_state == PROCESSOR_OFFLINE_STARTING) {
1030 processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_STARTED_NOT_RUNNING);
1031 } else {
1032 assert(processor->processor_offline_state == PROCESSOR_OFFLINE_STARTED_NOT_WAITED);
1033 }
1034
1035 simple_unlock(&sched_available_cores_lock);
1036 splx(s);
1037
1038 ml_cpu_end_state_transition(processor->cpu_id);
1039 /*
1040 * Note: Because the actual wait-for-start happens sometime later,
1041 * this races with processor_up calling CPU_BOOTED.
1042 * To fix that, this should happen after the first wait for start
1043 * confirms the CPU has booted.
1044 */
1045 ml_broadcast_cpu_event(CPU_ACTIVE, processor->cpu_id);
1046 }
1047
1048 void
processor_exit_reason(processor_t processor,processor_reason_t reason,bool is_system_sleep)1049 processor_exit_reason(processor_t processor, processor_reason_t reason, bool is_system_sleep)
1050 {
1051 assert(processor);
1052 assert(processor->processor_set);
1053
1054 lck_mtx_lock(&processor_updown_lock);
1055
1056 if (sched_is_in_sleep()) {
1057 assert(reason == REASON_SYSTEM);
1058 }
1059
1060 assert((processor != master_processor) || (reason == REASON_SYSTEM) || support_bootcpu_shutdown);
1061
1062 processor->last_shutdown_reason = reason;
1063
1064 bool is_final_system_sleep = is_system_sleep && (processor == master_processor);
1065
1066 processor_doshutdown(processor, is_final_system_sleep);
1067
1068 lck_mtx_unlock(&processor_updown_lock);
1069 }
1070
1071 /*
1072 * Called `processor_exit` in Unsupported KPI.
1073 * AppleARMCPU and AppleACPIPlatform call this in response to haltCPU().
1074 *
1075 * Behavior change: on both platforms, now xnu does the processor_sleep,
1076 * and ignores processor_exit calls from kexts.
1077 */
1078 kern_return_t
processor_exit_from_kext(__unused processor_t processor)1079 processor_exit_from_kext(
1080 __unused processor_t processor)
1081 {
1082 /* This is a no-op now. */
1083 return KERN_FAILURE;
1084 }
1085
1086 void
processor_sleep(processor_t processor)1087 processor_sleep(
1088 processor_t processor)
1089 {
1090 lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
1091
1092 processor_exit_reason(processor, REASON_SYSTEM, true);
1093 }
1094
1095 kern_return_t
processor_exit_from_user(processor_t processor)1096 processor_exit_from_user(
1097 processor_t processor)
1098 {
1099 if (processor == PROCESSOR_NULL) {
1100 return KERN_INVALID_ARGUMENT;
1101 }
1102
1103 kern_return_t result;
1104
1105 lck_mtx_lock(&cluster_powerdown_lock);
1106
1107 result = sched_processor_exit_user(processor);
1108
1109 lck_mtx_unlock(&cluster_powerdown_lock);
1110
1111 return result;
1112 }
1113
1114 void
processor_start_reason(processor_t processor,processor_reason_t reason)1115 processor_start_reason(processor_t processor, processor_reason_t reason)
1116 {
1117 lck_mtx_lock(&processor_updown_lock);
1118
1119 assert(processor);
1120 assert(processor->processor_set);
1121 assert(processor->processor_booted);
1122
1123 if (sched_is_in_sleep()) {
1124 assert(reason == REASON_SYSTEM);
1125 }
1126
1127 processor->last_startup_reason = reason;
1128
1129 processor_dostartup(processor, false);
1130
1131 lck_mtx_unlock(&processor_updown_lock);
1132 }
1133
1134 /*
1135 * Called `processor_start` in Unsupported KPI.
1136 * AppleARMCPU calls this to boot processors.
1137 * AppleACPIPlatform expects ml_processor_register to call processor_boot.
1138 *
1139 * Behavior change: now ml_processor_register also boots CPUs on ARM, and xnu
1140 * ignores processor_start calls from kexts.
1141 */
1142 kern_return_t
processor_start_from_kext(__unused processor_t processor)1143 processor_start_from_kext(
1144 __unused processor_t processor)
1145 {
1146 /* This is a no-op now. */
1147 return KERN_FAILURE;
1148 }
1149
1150 kern_return_t
processor_start_from_user(processor_t processor)1151 processor_start_from_user(
1152 processor_t processor)
1153 {
1154 if (processor == PROCESSOR_NULL) {
1155 return KERN_INVALID_ARGUMENT;
1156 }
1157
1158 kern_return_t result;
1159
1160 lck_mtx_lock(&cluster_powerdown_lock);
1161
1162 result = sched_processor_start_user(processor);
1163
1164 lck_mtx_unlock(&cluster_powerdown_lock);
1165
1166 return result;
1167 }
1168
1169 /*
1170 * Boot up a processor for the first time.
1171 *
1172 * This will also be called against the main processor during system boot,
1173 * even though it's already running.
1174 */
1175 void
processor_boot(processor_t processor)1176 processor_boot(
1177 processor_t processor)
1178 {
1179 lck_mtx_lock(&cluster_powerdown_lock);
1180 lck_mtx_lock(&processor_updown_lock);
1181
1182 assert(!sched_is_in_sleep());
1183 assert(!sched_is_cpu_init_completed());
1184
1185 if (processor->processor_booted) {
1186 panic("processor %d already booted", processor->cpu_id);
1187 }
1188
1189 if (processor == master_processor) {
1190 assert(processor->processor_offline_state == PROCESSOR_OFFLINE_STARTED_NOT_WAITED);
1191 } else {
1192 assert(processor->processor_offline_state == PROCESSOR_OFFLINE_NOT_BOOTED);
1193 }
1194
1195 /*
1196 * Create the idle processor thread.
1197 */
1198 if (processor->idle_thread == THREAD_NULL) {
1199 idle_thread_create(processor, processor_start_thread);
1200 }
1201
1202 if (processor->processor_self == IP_NULL) {
1203 ipc_processor_init(processor);
1204 }
1205
1206 if (processor == master_processor) {
1207 processor->last_startup_reason = REASON_SYSTEM;
1208
1209 ml_cpu_power_enable(processor->cpu_id);
1210
1211 processor_t prev = thread_bind(processor);
1212 thread_block(THREAD_CONTINUE_NULL);
1213
1214 cpu_start(processor->cpu_id);
1215
1216 assert(processor->state == PROCESSOR_RUNNING);
1217 processor_update_offline_state(processor, PROCESSOR_OFFLINE_RUNNING);
1218
1219 thread_bind(prev);
1220 } else {
1221 processor->last_startup_reason = REASON_SYSTEM;
1222
1223 /*
1224 * We don't wait for startup to finish, so all CPUs can start
1225 * in parallel.
1226 */
1227 processor_dostartup(processor, true);
1228 }
1229
1230 processor->processor_booted = true;
1231
1232 lck_mtx_unlock(&processor_updown_lock);
1233 lck_mtx_unlock(&cluster_powerdown_lock);
1234 }
1235
1236 /*
1237 * Wake a previously booted processor from a temporarily powered off state.
1238 */
1239 void
processor_wake(processor_t processor)1240 processor_wake(
1241 processor_t processor)
1242 {
1243 lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
1244
1245 assert(processor->processor_booted);
1246 processor_start_reason(processor, REASON_SYSTEM);
1247 }
1248
1249 #if CONFIG_SCHED_SMT
1250 kern_return_t
enable_smt_processors(bool enable)1251 enable_smt_processors(bool enable)
1252 {
1253 if (machine_info.logical_cpu_max == machine_info.physical_cpu_max) {
1254 /* Not an SMT system */
1255 return KERN_INVALID_ARGUMENT;
1256 }
1257
1258 int ncpus = machine_info.logical_cpu_max;
1259
1260 for (int i = 1; i < ncpus; i++) {
1261 processor_t processor = processor_array[i];
1262
1263 if (processor->processor_primary != processor) {
1264 if (enable) {
1265 processor_start_from_user(processor);
1266 } else { /* Disable */
1267 processor_exit_from_user(processor);
1268 }
1269 }
1270 }
1271
1272 #define BSD_HOST 1
1273 host_basic_info_data_t hinfo;
1274 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1275 kern_return_t kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
1276 if (kret != KERN_SUCCESS) {
1277 return kret;
1278 }
1279
1280 if (enable && (hinfo.logical_cpu != hinfo.logical_cpu_max)) {
1281 return KERN_FAILURE;
1282 }
1283
1284 if (!enable && (hinfo.logical_cpu != hinfo.physical_cpu)) {
1285 return KERN_FAILURE;
1286 }
1287
1288 return KERN_SUCCESS;
1289 }
1290 #endif /* CONFIG_SCHED_SMT */
1291
1292 bool
processor_should_kprintf(processor_t processor,bool starting)1293 processor_should_kprintf(processor_t processor, bool starting)
1294 {
1295 processor_reason_t reason = starting ? processor->last_startup_reason : processor->last_shutdown_reason;
1296
1297 return reason != REASON_CLPC_SYSTEM;
1298 }
1299
1300 kern_return_t
processor_control(processor_t processor,processor_info_t info,mach_msg_type_number_t count)1301 processor_control(
1302 processor_t processor,
1303 processor_info_t info,
1304 mach_msg_type_number_t count)
1305 {
1306 if (processor == PROCESSOR_NULL) {
1307 return KERN_INVALID_ARGUMENT;
1308 }
1309
1310 return cpu_control(processor->cpu_id, info, count);
1311 }
1312
1313 kern_return_t
processor_get_assignment(processor_t processor,processor_set_t * pset)1314 processor_get_assignment(
1315 processor_t processor,
1316 processor_set_t *pset)
1317 {
1318 int state;
1319
1320 if (processor == PROCESSOR_NULL) {
1321 return KERN_INVALID_ARGUMENT;
1322 }
1323
1324 state = processor->state;
1325 if (state == PROCESSOR_OFF_LINE || state == PROCESSOR_PENDING_OFFLINE) {
1326 return KERN_FAILURE;
1327 }
1328
1329 *pset = &pset0;
1330
1331 return KERN_SUCCESS;
1332 }
1333
1334 kern_return_t
processor_set_info(processor_set_t pset,int flavor,host_t * host,processor_set_info_t info,mach_msg_type_number_t * count)1335 processor_set_info(
1336 processor_set_t pset,
1337 int flavor,
1338 host_t *host,
1339 processor_set_info_t info,
1340 mach_msg_type_number_t *count)
1341 {
1342 if (pset == PROCESSOR_SET_NULL) {
1343 return KERN_INVALID_ARGUMENT;
1344 }
1345
1346 if (flavor == PROCESSOR_SET_BASIC_INFO) {
1347 processor_set_basic_info_t basic_info;
1348
1349 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT) {
1350 return KERN_FAILURE;
1351 }
1352
1353 basic_info = (processor_set_basic_info_t) info;
1354 #if defined(__x86_64__)
1355 basic_info->processor_count = processor_avail_count_user;
1356 #else
1357 basic_info->processor_count = processor_avail_count;
1358 #endif
1359 basic_info->default_policy = POLICY_TIMESHARE;
1360
1361 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
1362 *host = &realhost;
1363 return KERN_SUCCESS;
1364 } else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
1365 policy_timeshare_base_t ts_base;
1366
1367 if (*count < POLICY_TIMESHARE_BASE_COUNT) {
1368 return KERN_FAILURE;
1369 }
1370
1371 ts_base = (policy_timeshare_base_t) info;
1372 ts_base->base_priority = BASEPRI_DEFAULT;
1373
1374 *count = POLICY_TIMESHARE_BASE_COUNT;
1375 *host = &realhost;
1376 return KERN_SUCCESS;
1377 } else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
1378 policy_fifo_base_t fifo_base;
1379
1380 if (*count < POLICY_FIFO_BASE_COUNT) {
1381 return KERN_FAILURE;
1382 }
1383
1384 fifo_base = (policy_fifo_base_t) info;
1385 fifo_base->base_priority = BASEPRI_DEFAULT;
1386
1387 *count = POLICY_FIFO_BASE_COUNT;
1388 *host = &realhost;
1389 return KERN_SUCCESS;
1390 } else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
1391 policy_rr_base_t rr_base;
1392
1393 if (*count < POLICY_RR_BASE_COUNT) {
1394 return KERN_FAILURE;
1395 }
1396
1397 rr_base = (policy_rr_base_t) info;
1398 rr_base->base_priority = BASEPRI_DEFAULT;
1399 rr_base->quantum = 1;
1400
1401 *count = POLICY_RR_BASE_COUNT;
1402 *host = &realhost;
1403 return KERN_SUCCESS;
1404 } else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
1405 policy_timeshare_limit_t ts_limit;
1406
1407 if (*count < POLICY_TIMESHARE_LIMIT_COUNT) {
1408 return KERN_FAILURE;
1409 }
1410
1411 ts_limit = (policy_timeshare_limit_t) info;
1412 ts_limit->max_priority = MAXPRI_KERNEL;
1413
1414 *count = POLICY_TIMESHARE_LIMIT_COUNT;
1415 *host = &realhost;
1416 return KERN_SUCCESS;
1417 } else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
1418 policy_fifo_limit_t fifo_limit;
1419
1420 if (*count < POLICY_FIFO_LIMIT_COUNT) {
1421 return KERN_FAILURE;
1422 }
1423
1424 fifo_limit = (policy_fifo_limit_t) info;
1425 fifo_limit->max_priority = MAXPRI_KERNEL;
1426
1427 *count = POLICY_FIFO_LIMIT_COUNT;
1428 *host = &realhost;
1429 return KERN_SUCCESS;
1430 } else if (flavor == PROCESSOR_SET_RR_LIMITS) {
1431 policy_rr_limit_t rr_limit;
1432
1433 if (*count < POLICY_RR_LIMIT_COUNT) {
1434 return KERN_FAILURE;
1435 }
1436
1437 rr_limit = (policy_rr_limit_t) info;
1438 rr_limit->max_priority = MAXPRI_KERNEL;
1439
1440 *count = POLICY_RR_LIMIT_COUNT;
1441 *host = &realhost;
1442 return KERN_SUCCESS;
1443 } else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
1444 int *enabled;
1445
1446 if (*count < (sizeof(*enabled) / sizeof(int))) {
1447 return KERN_FAILURE;
1448 }
1449
1450 enabled = (int *) info;
1451 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
1452
1453 *count = sizeof(*enabled) / sizeof(int);
1454 *host = &realhost;
1455 return KERN_SUCCESS;
1456 }
1457
1458
1459 *host = HOST_NULL;
1460 return KERN_INVALID_ARGUMENT;
1461 }
1462
1463 /*
1464 * processor_set_statistics
1465 *
1466 * Returns scheduling statistics for a processor set.
1467 */
1468 kern_return_t
processor_set_statistics(processor_set_t pset,int flavor,processor_set_info_t info,mach_msg_type_number_t * count)1469 processor_set_statistics(
1470 processor_set_t pset,
1471 int flavor,
1472 processor_set_info_t info,
1473 mach_msg_type_number_t *count)
1474 {
1475 if (pset == PROCESSOR_SET_NULL || pset != &pset0) {
1476 return KERN_INVALID_PROCESSOR_SET;
1477 }
1478
1479 if (flavor == PROCESSOR_SET_LOAD_INFO) {
1480 processor_set_load_info_t load_info;
1481
1482 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT) {
1483 return KERN_FAILURE;
1484 }
1485
1486 load_info = (processor_set_load_info_t) info;
1487
1488 load_info->mach_factor = sched_mach_factor;
1489 load_info->load_average = sched_load_average;
1490
1491 load_info->task_count = tasks_count;
1492 load_info->thread_count = threads_count;
1493
1494 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
1495 return KERN_SUCCESS;
1496 }
1497
1498 return KERN_INVALID_ARGUMENT;
1499 }
1500
1501 /*
1502 * processor_set_things:
1503 *
1504 * Common internals for processor_set_{threads,tasks}
1505 */
1506 static kern_return_t
processor_set_things(processor_set_t pset,mach_port_array_t * thing_list,mach_msg_type_number_t * countp,int type,mach_task_flavor_t flavor)1507 processor_set_things(
1508 processor_set_t pset,
1509 mach_port_array_t *thing_list,
1510 mach_msg_type_number_t *countp,
1511 int type,
1512 mach_task_flavor_t flavor)
1513 {
1514 unsigned int i;
1515 task_t task;
1516 thread_t thread;
1517
1518 mach_port_array_t task_addr;
1519 task_t *task_list;
1520 vm_size_t actual_tasks, task_count_cur, task_count_needed;
1521
1522 mach_port_array_t thread_addr;
1523 thread_t *thread_list;
1524 vm_size_t actual_threads, thread_count_cur, thread_count_needed;
1525
1526 mach_port_array_t addr, newaddr;
1527 vm_size_t count, count_needed;
1528
1529 if (pset == PROCESSOR_SET_NULL || pset != &pset0) {
1530 return KERN_INVALID_ARGUMENT;
1531 }
1532
1533 task_count_cur = 0;
1534 task_count_needed = 0;
1535 task_list = NULL;
1536 task_addr = NULL;
1537 actual_tasks = 0;
1538
1539 thread_count_cur = 0;
1540 thread_count_needed = 0;
1541 thread_list = NULL;
1542 thread_addr = NULL;
1543 actual_threads = 0;
1544
1545 for (;;) {
1546 lck_mtx_lock(&tasks_threads_lock);
1547
1548 /* do we have the memory we need? */
1549 if (type == PSET_THING_THREAD) {
1550 thread_count_needed = threads_count;
1551 }
1552 #if !CONFIG_MACF
1553 else
1554 #endif
1555 task_count_needed = tasks_count;
1556
1557 if (task_count_needed <= task_count_cur &&
1558 thread_count_needed <= thread_count_cur) {
1559 break;
1560 }
1561
1562 /* unlock and allocate more memory */
1563 lck_mtx_unlock(&tasks_threads_lock);
1564
1565 /* grow task array */
1566 if (task_count_needed > task_count_cur) {
1567 mach_port_array_free(task_addr, task_count_cur);
1568 assert(task_count_needed > 0);
1569 task_count_cur = task_count_needed;
1570
1571 task_addr = mach_port_array_alloc(task_count_cur,
1572 Z_WAITOK | Z_ZERO);
1573 if (task_addr == NULL) {
1574 mach_port_array_free(thread_addr, thread_count_cur);
1575 return KERN_RESOURCE_SHORTAGE;
1576 }
1577 task_list = (task_t *)task_addr;
1578 }
1579
1580 /* grow thread array */
1581 if (thread_count_needed > thread_count_cur) {
1582 mach_port_array_free(thread_addr, thread_count_cur);
1583 assert(thread_count_needed > 0);
1584 thread_count_cur = thread_count_needed;
1585
1586 thread_addr = mach_port_array_alloc(thread_count_cur,
1587 Z_WAITOK | Z_ZERO);
1588 if (thread_addr == NULL) {
1589 mach_port_array_free(task_addr, task_count_cur);
1590 return KERN_RESOURCE_SHORTAGE;
1591 }
1592 thread_list = (thread_t *)thread_addr;
1593 }
1594 }
1595
1596 /* OK, have memory and the list locked */
1597
1598 /* If we need it, get the thread list */
1599 if (type == PSET_THING_THREAD) {
1600 queue_iterate(&threads, thread, thread_t, threads) {
1601 task = get_threadtask(thread);
1602 #if defined(SECURE_KERNEL)
1603 if (task == kernel_task) {
1604 /* skip threads belonging to kernel_task */
1605 continue;
1606 }
1607 #endif
1608 if (!task->ipc_active || task_is_exec_copy(task)) {
1609 /* skip threads in inactive tasks (in the middle of exec/fork/spawn) */
1610 continue;
1611 }
1612
1613 thread_reference(thread);
1614 thread_list[actual_threads++] = thread;
1615 }
1616 }
1617 #if !CONFIG_MACF
1618 else
1619 #endif
1620 {
1621 /* get a list of the tasks */
1622 queue_iterate(&tasks, task, task_t, tasks) {
1623 #if defined(SECURE_KERNEL)
1624 if (task == kernel_task) {
1625 /* skip kernel_task */
1626 continue;
1627 }
1628 #endif
1629 if (!task->ipc_active || task_is_exec_copy(task)) {
1630 /* skip inactive tasks (in the middle of exec/fork/spawn) */
1631 continue;
1632 }
1633
1634 task_reference(task);
1635 task_list[actual_tasks++] = task;
1636 }
1637 }
1638
1639 lck_mtx_unlock(&tasks_threads_lock);
1640
1641 #if CONFIG_MACF
1642 unsigned int j, used;
1643
1644 /* for each task, make sure we are allowed to examine it */
1645 for (i = used = 0; i < actual_tasks; i++) {
1646 if (mac_task_check_expose_task(task_list[i], flavor)) {
1647 task_deallocate(task_list[i]);
1648 continue;
1649 }
1650 task_list[used++] = task_list[i];
1651 }
1652 actual_tasks = used;
1653 task_count_needed = actual_tasks;
1654
1655 if (type == PSET_THING_THREAD) {
1656 /* for each thread (if any), make sure it's task is in the allowed list */
1657 for (i = used = 0; i < actual_threads; i++) {
1658 boolean_t found_task = FALSE;
1659
1660 task = get_threadtask(thread_list[i]);
1661 for (j = 0; j < actual_tasks; j++) {
1662 if (task_list[j] == task) {
1663 found_task = TRUE;
1664 break;
1665 }
1666 }
1667 if (found_task) {
1668 thread_list[used++] = thread_list[i];
1669 } else {
1670 thread_deallocate(thread_list[i]);
1671 }
1672 }
1673 actual_threads = used;
1674 thread_count_needed = actual_threads;
1675
1676 /* done with the task list */
1677 for (i = 0; i < actual_tasks; i++) {
1678 task_deallocate(task_list[i]);
1679 }
1680 mach_port_array_free(task_addr, task_count_cur);
1681 task_list = NULL;
1682 task_count_cur = 0;
1683 actual_tasks = 0;
1684 }
1685 #endif
1686
1687 if (type == PSET_THING_THREAD) {
1688 if (actual_threads == 0) {
1689 /* no threads available to return */
1690 assert(task_count_cur == 0);
1691 mach_port_array_free(thread_addr, thread_count_cur);
1692 thread_list = NULL;
1693 *thing_list = NULL;
1694 *countp = 0;
1695 return KERN_SUCCESS;
1696 }
1697 count_needed = actual_threads;
1698 count = thread_count_cur;
1699 addr = thread_addr;
1700 } else {
1701 if (actual_tasks == 0) {
1702 /* no tasks available to return */
1703 assert(thread_count_cur == 0);
1704 mach_port_array_free(task_addr, task_count_cur);
1705 *thing_list = NULL;
1706 *countp = 0;
1707 return KERN_SUCCESS;
1708 }
1709 count_needed = actual_tasks;
1710 count = task_count_cur;
1711 addr = task_addr;
1712 }
1713
1714 /* if we allocated too much, must copy */
1715 if (count_needed < count) {
1716 newaddr = mach_port_array_alloc(count_needed, Z_WAITOK | Z_ZERO);
1717 if (newaddr == NULL) {
1718 for (i = 0; i < actual_tasks; i++) {
1719 if (type == PSET_THING_THREAD) {
1720 thread_deallocate(thread_list[i]);
1721 } else {
1722 task_deallocate(task_list[i]);
1723 }
1724 }
1725 mach_port_array_free(addr, count);
1726 return KERN_RESOURCE_SHORTAGE;
1727 }
1728
1729 bcopy(addr, newaddr, count_needed * sizeof(void *));
1730 mach_port_array_free(addr, count);
1731
1732 addr = newaddr;
1733 count = count_needed;
1734 }
1735
1736 *thing_list = addr;
1737 *countp = (mach_msg_type_number_t)count;
1738
1739 return KERN_SUCCESS;
1740 }
1741
1742 /*
1743 * processor_set_tasks:
1744 *
1745 * List all tasks in the processor set.
1746 */
1747 static kern_return_t
processor_set_tasks_internal(processor_set_t pset,task_array_t * task_list,mach_msg_type_number_t * count,mach_task_flavor_t flavor)1748 processor_set_tasks_internal(
1749 processor_set_t pset,
1750 task_array_t *task_list,
1751 mach_msg_type_number_t *count,
1752 mach_task_flavor_t flavor)
1753 {
1754 kern_return_t ret;
1755
1756 ret = processor_set_things(pset, task_list, count, PSET_THING_TASK, flavor);
1757 if (ret != KERN_SUCCESS) {
1758 return ret;
1759 }
1760
1761 /* do the conversion that Mig should handle */
1762 convert_task_array_to_ports(*task_list, *count, flavor);
1763 return KERN_SUCCESS;
1764 }
1765
1766 kern_return_t
processor_set_tasks(processor_set_t pset,task_array_t * task_list,mach_msg_type_number_t * count)1767 processor_set_tasks(
1768 processor_set_t pset,
1769 task_array_t *task_list,
1770 mach_msg_type_number_t *count)
1771 {
1772 return processor_set_tasks_internal(pset, task_list, count, TASK_FLAVOR_CONTROL);
1773 }
1774
1775 /*
1776 * processor_set_tasks_with_flavor:
1777 *
1778 * Based on flavor, return task/inspect/read port to all tasks in the processor set.
1779 */
1780 kern_return_t
processor_set_tasks_with_flavor(processor_set_t pset,mach_task_flavor_t flavor,task_array_t * task_list,mach_msg_type_number_t * count)1781 processor_set_tasks_with_flavor(
1782 processor_set_t pset,
1783 mach_task_flavor_t flavor,
1784 task_array_t *task_list,
1785 mach_msg_type_number_t *count)
1786 {
1787 switch (flavor) {
1788 case TASK_FLAVOR_CONTROL:
1789 case TASK_FLAVOR_READ:
1790 case TASK_FLAVOR_INSPECT:
1791 case TASK_FLAVOR_NAME:
1792 return processor_set_tasks_internal(pset, task_list, count, flavor);
1793 default:
1794 return KERN_INVALID_ARGUMENT;
1795 }
1796 }
1797
1798 /*
1799 * processor_set_threads:
1800 *
1801 * List all threads in the processor set.
1802 */
1803 #if defined(SECURE_KERNEL)
1804 kern_return_t
processor_set_threads(__unused processor_set_t pset,__unused thread_act_array_t * thread_list,__unused mach_msg_type_number_t * count)1805 processor_set_threads(
1806 __unused processor_set_t pset,
1807 __unused thread_act_array_t *thread_list,
1808 __unused mach_msg_type_number_t *count)
1809 {
1810 return KERN_FAILURE;
1811 }
1812 #elif !defined(XNU_TARGET_OS_OSX)
1813 kern_return_t
processor_set_threads(__unused processor_set_t pset,__unused thread_act_array_t * thread_list,__unused mach_msg_type_number_t * count)1814 processor_set_threads(
1815 __unused processor_set_t pset,
1816 __unused thread_act_array_t *thread_list,
1817 __unused mach_msg_type_number_t *count)
1818 {
1819 return KERN_NOT_SUPPORTED;
1820 }
1821 #else
1822 kern_return_t
processor_set_threads(processor_set_t pset,thread_act_array_t * thread_list,mach_msg_type_number_t * count)1823 processor_set_threads(
1824 processor_set_t pset,
1825 thread_act_array_t *thread_list,
1826 mach_msg_type_number_t *count)
1827 {
1828 kern_return_t ret;
1829
1830 ret = processor_set_things(pset, thread_list, count,
1831 PSET_THING_THREAD, TASK_FLAVOR_CONTROL);
1832 if (ret != KERN_SUCCESS) {
1833 return ret;
1834 }
1835
1836 /* do the conversion that Mig should handle */
1837 convert_thread_array_to_ports(*thread_list, *count, TASK_FLAVOR_CONTROL);
1838 return KERN_SUCCESS;
1839 }
1840 #endif
1841
1842 pset_cluster_type_t
recommended_pset_type(thread_t thread)1843 recommended_pset_type(thread_t thread)
1844 {
1845 /* Only used by the AMP scheduler policy */
1846 #if CONFIG_THREAD_GROUPS && __AMP__ && !CONFIG_SCHED_EDGE
1847 if (thread == THREAD_NULL) {
1848 return PSET_AMP_E;
1849 }
1850
1851 #if DEVELOPMENT || DEBUG
1852 extern bool system_ecore_only;
1853 extern int enable_task_set_cluster_type;
1854 task_t task = get_threadtask(thread);
1855 if (enable_task_set_cluster_type && (task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE)) {
1856 processor_set_t pset_hint = task->pset_hint;
1857 if (pset_hint) {
1858 return pset_hint->pset_cluster_type;
1859 }
1860 }
1861
1862 if (system_ecore_only) {
1863 return PSET_AMP_E;
1864 }
1865 #endif
1866
1867 if (thread->th_bound_cluster_id != THREAD_BOUND_CLUSTER_NONE) {
1868 return pset_array[thread->th_bound_cluster_id]->pset_cluster_type;
1869 }
1870
1871 if (thread->base_pri <= MAXPRI_THROTTLE) {
1872 if (os_atomic_load(&sched_perfctl_policy_bg, relaxed) != SCHED_PERFCTL_POLICY_FOLLOW_GROUP) {
1873 return PSET_AMP_E;
1874 }
1875 } else if (thread->base_pri <= BASEPRI_UTILITY) {
1876 if (os_atomic_load(&sched_perfctl_policy_util, relaxed) != SCHED_PERFCTL_POLICY_FOLLOW_GROUP) {
1877 return PSET_AMP_E;
1878 }
1879 }
1880
1881 struct thread_group *tg = thread_group_get(thread);
1882 cluster_type_t recommendation = thread_group_recommendation(tg);
1883 switch (recommendation) {
1884 case CLUSTER_TYPE_SMP:
1885 default:
1886 if (get_threadtask(thread) == kernel_task) {
1887 return PSET_AMP_E;
1888 }
1889 return PSET_AMP_P;
1890 case CLUSTER_TYPE_E:
1891 return PSET_AMP_E;
1892 case CLUSTER_TYPE_P:
1893 return PSET_AMP_P;
1894 }
1895 #else /* !CONFIG_THREAD_GROUPS || !__AMP__ || CONFIG_SCHED_EDGE */
1896 (void)thread;
1897 return PSET_SMP;
1898 #endif /* !CONFIG_THREAD_GROUPS || !__AMP__ || CONFIG_SCHED_EDGE */
1899 }
1900
1901 #if __arm64__
1902
1903 cluster_type_t
pset_cluster_type_to_cluster_type(pset_cluster_type_t pset_cluster_type)1904 pset_cluster_type_to_cluster_type(pset_cluster_type_t pset_cluster_type)
1905 {
1906 switch (pset_cluster_type) {
1907 #if __AMP__
1908 case PSET_AMP_E:
1909 return CLUSTER_TYPE_E;
1910 case PSET_AMP_P:
1911 return CLUSTER_TYPE_P;
1912 #endif /* __AMP__ */
1913 case PSET_SMP:
1914 return CLUSTER_TYPE_SMP;
1915 default:
1916 panic("Unexpected pset cluster type %d", pset_cluster_type);
1917 }
1918 }
1919
1920 pset_cluster_type_t
cluster_type_to_pset_cluster_type(cluster_type_t cluster_type)1921 cluster_type_to_pset_cluster_type(cluster_type_t cluster_type)
1922 {
1923 switch (cluster_type) {
1924 #if __AMP__
1925 case CLUSTER_TYPE_E:
1926 return PSET_AMP_E;
1927 case CLUSTER_TYPE_P:
1928 return PSET_AMP_P;
1929 #endif /* __AMP__ */
1930 case CLUSTER_TYPE_SMP:
1931 return PSET_SMP;
1932 default:
1933 panic("Unexpected cluster type %d", cluster_type);
1934 }
1935 }
1936
1937 #endif /* __arm64__ */
1938
1939 #if CONFIG_THREAD_GROUPS && __AMP__ && !CONFIG_SCHED_EDGE
1940
1941 void
sched_perfcontrol_inherit_recommendation_from_tg(perfcontrol_class_t perfctl_class,boolean_t inherit)1942 sched_perfcontrol_inherit_recommendation_from_tg(perfcontrol_class_t perfctl_class, boolean_t inherit)
1943 {
1944 sched_perfctl_class_policy_t sched_policy = inherit ? SCHED_PERFCTL_POLICY_FOLLOW_GROUP : SCHED_PERFCTL_POLICY_RESTRICT_E;
1945
1946 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_PERFCTL_POLICY_CHANGE) | DBG_FUNC_NONE, perfctl_class, sched_policy, 0, 0);
1947
1948 switch (perfctl_class) {
1949 case PERFCONTROL_CLASS_UTILITY:
1950 os_atomic_store(&sched_perfctl_policy_util, sched_policy, relaxed);
1951 break;
1952 case PERFCONTROL_CLASS_BACKGROUND:
1953 os_atomic_store(&sched_perfctl_policy_bg, sched_policy, relaxed);
1954 break;
1955 default:
1956 panic("perfctl_class invalid");
1957 break;
1958 }
1959 }
1960
1961 #elif defined(__arm64__)
1962
1963 /* Define a stub routine since this symbol is exported on all arm64 platforms */
1964 void
sched_perfcontrol_inherit_recommendation_from_tg(__unused perfcontrol_class_t perfctl_class,__unused boolean_t inherit)1965 sched_perfcontrol_inherit_recommendation_from_tg(__unused perfcontrol_class_t perfctl_class, __unused boolean_t inherit)
1966 {
1967 }
1968
1969 #endif /* defined(__arm64__) */
1970