xref: /xnu-8792.81.2/osfmk/kern/sched_amp.c (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * Copyright (c) 2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 
32 #include <machine/machine_routines.h>
33 #include <machine/sched_param.h>
34 #include <machine/machine_cpu.h>
35 
36 #include <kern/kern_types.h>
37 #include <kern/debug.h>
38 #include <kern/machine.h>
39 #include <kern/misc_protos.h>
40 #include <kern/processor.h>
41 #include <kern/queue.h>
42 #include <kern/sched.h>
43 #include <kern/sched_prim.h>
44 #include <kern/task.h>
45 #include <kern/thread.h>
46 #include <kern/thread_group.h>
47 #include <kern/sched_amp_common.h>
48 
49 #include <sys/kdebug.h>
50 
51 #if __AMP__
52 
53 static thread_t
54 sched_amp_steal_thread(processor_set_t pset);
55 
56 static void
57 sched_amp_thread_update_scan(sched_update_scan_context_t scan_context);
58 
59 static boolean_t
60 sched_amp_processor_enqueue(processor_t processor, thread_t thread,
61     sched_options_t options);
62 
63 static boolean_t
64 sched_amp_processor_queue_remove(processor_t processor, thread_t thread);
65 
66 static ast_t
67 sched_amp_processor_csw_check(processor_t processor);
68 
69 static boolean_t
70 sched_amp_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
71 
72 static int
73 sched_amp_runq_count(processor_t processor);
74 
75 static boolean_t
76 sched_amp_processor_queue_empty(processor_t processor);
77 
78 static uint64_t
79 sched_amp_runq_stats_count_sum(processor_t processor);
80 
81 static int
82 sched_amp_processor_bound_count(processor_t processor);
83 
84 static void
85 sched_amp_pset_init(processor_set_t pset);
86 
87 static void
88 sched_amp_processor_init(processor_t processor);
89 
90 static thread_t
91 sched_amp_choose_thread(processor_t processor, int priority, ast_t reason);
92 
93 static void
94 sched_amp_processor_queue_shutdown(processor_t processor);
95 
96 static sched_mode_t
97 sched_amp_initial_thread_sched_mode(task_t parent_task);
98 
99 static processor_t
100 sched_amp_choose_processor(processor_set_t pset, processor_t processor, thread_t thread);
101 
102 static bool
103 sched_amp_thread_avoid_processor(processor_t processor, thread_t thread);
104 
105 static bool
106 sched_amp_thread_should_yield(processor_t processor, thread_t thread);
107 
108 static void
109 sched_amp_thread_group_recommendation_change(struct thread_group *tg, cluster_type_t new_recommendation);
110 
111 static bool
112 sched_amp_thread_eligible_for_pset(thread_t thread, processor_set_t pset);
113 
114 static void
115 sched_amp_cpu_init_completed(void);
116 
117 const struct sched_dispatch_table sched_amp_dispatch = {
118 	.sched_name                                     = "amp",
119 	.init                                           = sched_amp_init,
120 	.timebase_init                                  = sched_timeshare_timebase_init,
121 	.processor_init                                 = sched_amp_processor_init,
122 	.pset_init                                      = sched_amp_pset_init,
123 	.maintenance_continuation                       = sched_timeshare_maintenance_continue,
124 	.choose_thread                                  = sched_amp_choose_thread,
125 	.steal_thread_enabled                           = sched_amp_steal_thread_enabled,
126 	.steal_thread                                   = sched_amp_steal_thread,
127 	.compute_timeshare_priority                     = sched_compute_timeshare_priority,
128 	.choose_node                                    = sched_amp_choose_node,
129 	.choose_processor                               = sched_amp_choose_processor,
130 	.processor_enqueue                              = sched_amp_processor_enqueue,
131 	.processor_queue_shutdown                       = sched_amp_processor_queue_shutdown,
132 	.processor_queue_remove                         = sched_amp_processor_queue_remove,
133 	.processor_queue_empty                          = sched_amp_processor_queue_empty,
134 	.priority_is_urgent                             = priority_is_urgent,
135 	.processor_csw_check                            = sched_amp_processor_csw_check,
136 	.processor_queue_has_priority                   = sched_amp_processor_queue_has_priority,
137 	.initial_quantum_size                           = sched_timeshare_initial_quantum_size,
138 	.initial_thread_sched_mode                      = sched_amp_initial_thread_sched_mode,
139 	.can_update_priority                            = can_update_priority,
140 	.update_priority                                = update_priority,
141 	.lightweight_update_priority                    = lightweight_update_priority,
142 	.quantum_expire                                 = sched_default_quantum_expire,
143 	.processor_runq_count                           = sched_amp_runq_count,
144 	.processor_runq_stats_count_sum                 = sched_amp_runq_stats_count_sum,
145 	.processor_bound_count                          = sched_amp_processor_bound_count,
146 	.thread_update_scan                             = sched_amp_thread_update_scan,
147 	.multiple_psets_enabled                         = TRUE,
148 	.sched_groups_enabled                           = FALSE,
149 	.avoid_processor_enabled                        = TRUE,
150 	.thread_avoid_processor                         = sched_amp_thread_avoid_processor,
151 	.processor_balance                              = sched_amp_balance,
152 
153 	.rt_runq                                        = sched_rtlocal_runq,
154 	.rt_init                                        = sched_rtlocal_init,
155 	.rt_queue_shutdown                              = sched_rtlocal_queue_shutdown,
156 	.rt_runq_scan                                   = sched_rtlocal_runq_scan,
157 	.rt_runq_count_sum                              = sched_rtlocal_runq_count_sum,
158 	.rt_steal_thread                                = sched_rtlocal_steal_thread,
159 
160 	.qos_max_parallelism                            = sched_amp_qos_max_parallelism,
161 	.check_spill                                    = sched_amp_check_spill,
162 	.ipi_policy                                     = sched_amp_ipi_policy,
163 	.thread_should_yield                            = sched_amp_thread_should_yield,
164 	.run_count_incr                                 = sched_run_incr,
165 	.run_count_decr                                 = sched_run_decr,
166 	.update_thread_bucket                           = sched_update_thread_bucket,
167 	.pset_made_schedulable                          = sched_pset_made_schedulable,
168 	.thread_group_recommendation_change             = sched_amp_thread_group_recommendation_change,
169 	.cpu_init_completed                             = sched_amp_cpu_init_completed,
170 	.thread_eligible_for_pset                       = sched_amp_thread_eligible_for_pset,
171 };
172 
173 extern processor_set_t ecore_set;
174 extern processor_set_t pcore_set;
175 
176 __attribute__((always_inline))
177 static inline run_queue_t
amp_main_runq(processor_t processor)178 amp_main_runq(processor_t processor)
179 {
180 	return &processor->processor_set->pset_runq;
181 }
182 
183 __attribute__((always_inline))
184 static inline run_queue_t
amp_bound_runq(processor_t processor)185 amp_bound_runq(processor_t processor)
186 {
187 	return &processor->runq;
188 }
189 
190 __attribute__((always_inline))
191 static inline run_queue_t
amp_runq_for_thread(processor_t processor,thread_t thread)192 amp_runq_for_thread(processor_t processor, thread_t thread)
193 {
194 	if (thread->bound_processor == PROCESSOR_NULL) {
195 		return amp_main_runq(processor);
196 	} else {
197 		assert(thread->bound_processor == processor);
198 		return amp_bound_runq(processor);
199 	}
200 }
201 
202 static sched_mode_t
sched_amp_initial_thread_sched_mode(task_t parent_task)203 sched_amp_initial_thread_sched_mode(task_t parent_task)
204 {
205 	if (parent_task == kernel_task) {
206 		return TH_MODE_FIXED;
207 	} else {
208 		return TH_MODE_TIMESHARE;
209 	}
210 }
211 
212 static void
sched_amp_processor_init(processor_t processor)213 sched_amp_processor_init(processor_t processor)
214 {
215 	run_queue_init(&processor->runq);
216 }
217 
218 static void
sched_amp_pset_init(processor_set_t pset)219 sched_amp_pset_init(processor_set_t pset)
220 {
221 	if (pset->pset_cluster_type == PSET_AMP_P) {
222 		pset->pset_type = CLUSTER_TYPE_P;
223 		pcore_set = pset;
224 	} else {
225 		assert(pset->pset_cluster_type == PSET_AMP_E);
226 		pset->pset_type = CLUSTER_TYPE_E;
227 		ecore_set = pset;
228 	}
229 	run_queue_init(&pset->pset_runq);
230 }
231 
232 static thread_t
sched_amp_choose_thread(processor_t processor,int priority,__unused ast_t reason)233 sched_amp_choose_thread(
234 	processor_t      processor,
235 	int              priority,
236 	__unused ast_t            reason)
237 {
238 	processor_set_t pset = processor->processor_set;
239 	bool spill_pending = false;
240 	int spill_pri = -1;
241 
242 	if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) {
243 		spill_pending = true;
244 		spill_pri = pcore_set->pset_runq.highq;
245 	}
246 
247 	run_queue_t main_runq  = amp_main_runq(processor);
248 	run_queue_t bound_runq = amp_bound_runq(processor);
249 	run_queue_t chosen_runq;
250 
251 	if ((bound_runq->highq < priority) &&
252 	    (main_runq->highq < priority) &&
253 	    (spill_pri < priority)) {
254 		return THREAD_NULL;
255 	}
256 
257 	if ((spill_pri > bound_runq->highq) &&
258 	    (spill_pri > main_runq->highq)) {
259 		/*
260 		 * There is a higher priority thread on the P-core runq,
261 		 * so returning THREAD_NULL here will cause thread_select()
262 		 * to call sched_amp_steal_thread() to try to get it.
263 		 */
264 		return THREAD_NULL;
265 	}
266 
267 	if (bound_runq->highq >= main_runq->highq) {
268 		chosen_runq = bound_runq;
269 	} else {
270 		chosen_runq = main_runq;
271 	}
272 
273 	return run_queue_dequeue(chosen_runq, SCHED_HEADQ);
274 }
275 
276 static boolean_t
sched_amp_processor_enqueue(processor_t processor,thread_t thread,sched_options_t options)277 sched_amp_processor_enqueue(
278 	processor_t       processor,
279 	thread_t          thread,
280 	sched_options_t   options)
281 {
282 	run_queue_t     rq = amp_runq_for_thread(processor, thread);
283 	boolean_t       result;
284 
285 	result = run_queue_enqueue(rq, thread, options);
286 	thread->runq = processor;
287 
288 	return result;
289 }
290 
291 static boolean_t
sched_amp_processor_queue_empty(processor_t processor)292 sched_amp_processor_queue_empty(processor_t processor)
293 {
294 	processor_set_t pset = processor->processor_set;
295 	bool spill_pending = bit_test(pset->pending_spill_cpu_mask, processor->cpu_id);
296 
297 	return (amp_main_runq(processor)->count == 0) &&
298 	       (amp_bound_runq(processor)->count == 0) &&
299 	       !spill_pending;
300 }
301 
302 static bool
sched_amp_thread_should_yield(processor_t processor,thread_t thread)303 sched_amp_thread_should_yield(processor_t processor, thread_t thread)
304 {
305 	if (!sched_amp_processor_queue_empty(processor) || (rt_runq_count(processor->processor_set) > 0)) {
306 		return true;
307 	}
308 
309 	if ((processor->processor_set->pset_cluster_type == PSET_AMP_E) && (recommended_pset_type(thread) == PSET_AMP_P)) {
310 		return pcore_set && pcore_set->pset_runq.count > 0;
311 	}
312 
313 	return false;
314 }
315 
316 static ast_t
sched_amp_processor_csw_check(processor_t processor)317 sched_amp_processor_csw_check(processor_t processor)
318 {
319 	boolean_t       has_higher;
320 	int             pri;
321 
322 	run_queue_t main_runq  = amp_main_runq(processor);
323 	run_queue_t bound_runq = amp_bound_runq(processor);
324 
325 	assert(processor->active_thread != NULL);
326 
327 	processor_set_t pset = processor->processor_set;
328 	bool spill_pending = false;
329 	int spill_pri = -1;
330 	int spill_urgency = 0;
331 
332 	if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) {
333 		spill_pending = true;
334 		spill_pri = pcore_set->pset_runq.highq;
335 		spill_urgency = pcore_set->pset_runq.urgency;
336 	}
337 
338 	pri = MAX(main_runq->highq, bound_runq->highq);
339 	if (spill_pending) {
340 		pri = MAX(pri, spill_pri);
341 	}
342 
343 	if (processor->first_timeslice) {
344 		has_higher = (pri > processor->current_pri);
345 	} else {
346 		has_higher = (pri >= processor->current_pri);
347 	}
348 
349 	if (has_higher) {
350 		if (main_runq->urgency > 0) {
351 			return AST_PREEMPT | AST_URGENT;
352 		}
353 
354 		if (bound_runq->urgency > 0) {
355 			return AST_PREEMPT | AST_URGENT;
356 		}
357 
358 		if (spill_urgency > 0) {
359 			return AST_PREEMPT | AST_URGENT;
360 		}
361 
362 		return AST_PREEMPT;
363 	}
364 
365 	return AST_NONE;
366 }
367 
368 static boolean_t
sched_amp_processor_queue_has_priority(processor_t processor,int priority,boolean_t gte)369 sched_amp_processor_queue_has_priority(processor_t    processor,
370     int            priority,
371     boolean_t      gte)
372 {
373 	bool spill_pending = false;
374 	int spill_pri = -1;
375 	processor_set_t pset = processor->processor_set;
376 
377 	if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) {
378 		spill_pending = true;
379 		spill_pri = pcore_set->pset_runq.highq;
380 	}
381 	run_queue_t main_runq  = amp_main_runq(processor);
382 	run_queue_t bound_runq = amp_bound_runq(processor);
383 
384 	int qpri = MAX(main_runq->highq, bound_runq->highq);
385 	if (spill_pending) {
386 		qpri = MAX(qpri, spill_pri);
387 	}
388 
389 	if (gte) {
390 		return qpri >= priority;
391 	} else {
392 		return qpri > priority;
393 	}
394 }
395 
396 static int
sched_amp_runq_count(processor_t processor)397 sched_amp_runq_count(processor_t processor)
398 {
399 	return amp_main_runq(processor)->count + amp_bound_runq(processor)->count;
400 }
401 
402 static uint64_t
sched_amp_runq_stats_count_sum(processor_t processor)403 sched_amp_runq_stats_count_sum(processor_t processor)
404 {
405 	uint64_t bound_sum = amp_bound_runq(processor)->runq_stats.count_sum;
406 
407 	if (processor->cpu_id == processor->processor_set->cpu_set_low) {
408 		return bound_sum + amp_main_runq(processor)->runq_stats.count_sum;
409 	} else {
410 		return bound_sum;
411 	}
412 }
413 static int
sched_amp_processor_bound_count(processor_t processor)414 sched_amp_processor_bound_count(processor_t processor)
415 {
416 	return amp_bound_runq(processor)->count;
417 }
418 
419 static void
sched_amp_processor_queue_shutdown(processor_t processor)420 sched_amp_processor_queue_shutdown(processor_t processor)
421 {
422 	processor_set_t pset = processor->processor_set;
423 	run_queue_t     rq   = amp_main_runq(processor);
424 	thread_t        thread;
425 	queue_head_t    tqueue;
426 
427 	/* We only need to migrate threads if this is the last active or last recommended processor in the pset */
428 	if ((pset->online_processor_count > 0) && pset_is_recommended(pset)) {
429 		pset_unlock(pset);
430 		return;
431 	}
432 
433 	queue_init(&tqueue);
434 
435 	while (rq->count > 0) {
436 		thread = run_queue_dequeue(rq, SCHED_HEADQ);
437 		enqueue_tail(&tqueue, &thread->runq_links);
438 	}
439 
440 	pset_unlock(pset);
441 
442 	qe_foreach_element_safe(thread, &tqueue, runq_links) {
443 		remqueue(&thread->runq_links);
444 
445 		thread_lock(thread);
446 
447 		thread_setrun(thread, SCHED_TAILQ);
448 
449 		thread_unlock(thread);
450 	}
451 }
452 
453 static boolean_t
sched_amp_processor_queue_remove(processor_t processor,thread_t thread)454 sched_amp_processor_queue_remove(
455 	processor_t processor,
456 	thread_t    thread)
457 {
458 	run_queue_t             rq;
459 	processor_set_t         pset = processor->processor_set;
460 
461 	pset_lock(pset);
462 
463 	rq = amp_runq_for_thread(processor, thread);
464 
465 	if (processor == thread->runq) {
466 		/*
467 		 * Thread is on a run queue and we have a lock on
468 		 * that run queue.
469 		 */
470 		run_queue_remove(rq, thread);
471 	} else {
472 		/*
473 		 * The thread left the run queue before we could
474 		 * lock the run queue.
475 		 */
476 		assert(thread->runq == PROCESSOR_NULL);
477 		processor = PROCESSOR_NULL;
478 	}
479 
480 	pset_unlock(pset);
481 
482 	return processor != PROCESSOR_NULL;
483 }
484 
485 /*
486  * sched_amp_steal_thread()
487  *
488  */
489 thread_t
sched_amp_steal_thread(processor_set_t pset)490 sched_amp_steal_thread(processor_set_t pset)
491 {
492 	thread_t thread = THREAD_NULL;
493 	processor_set_t nset = pset;
494 
495 	assert(pset->pset_cluster_type != PSET_AMP_P);
496 
497 	processor_t processor = current_processor();
498 	assert(pset == processor->processor_set);
499 
500 	bool spill_pending = bit_test(pset->pending_spill_cpu_mask, processor->cpu_id);
501 	bit_clear(pset->pending_spill_cpu_mask, processor->cpu_id);
502 
503 	if (!pcore_set) {
504 		return THREAD_NULL;
505 	}
506 
507 	nset = pcore_set;
508 
509 	assert(nset != pset);
510 
511 	if (sched_get_pset_load_average(nset, 0) >= sched_amp_steal_threshold(nset, spill_pending)) {
512 		pset_unlock(pset);
513 
514 		pset = nset;
515 
516 		pset_lock(pset);
517 
518 		/* Allow steal if load average still OK, no idle cores, and more threads on runq than active cores DISPATCHING */
519 		if ((sched_get_pset_load_average(pset, 0) >= sched_amp_steal_threshold(pset, spill_pending)) &&
520 		    (pset->pset_runq.count > bit_count(pset->cpu_state_map[PROCESSOR_DISPATCHING])) &&
521 		    (bit_count(pset->recommended_bitmask & pset->cpu_state_map[PROCESSOR_IDLE]) == 0)) {
522 			thread = run_queue_dequeue(&pset->pset_runq, SCHED_HEADQ);
523 			KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_STEAL) | DBG_FUNC_NONE, spill_pending, 0, 0, 0);
524 			sched_update_pset_load_average(pset, 0);
525 		}
526 	}
527 
528 	pset_unlock(pset);
529 	return thread;
530 }
531 
532 
533 
534 static void
sched_amp_thread_update_scan(sched_update_scan_context_t scan_context)535 sched_amp_thread_update_scan(sched_update_scan_context_t scan_context)
536 {
537 	boolean_t               restart_needed = FALSE;
538 	processor_t             processor;
539 	processor_set_t         pset;
540 	thread_t                thread;
541 	spl_t                   s;
542 
543 	/*
544 	 *  We update the threads associated with each processor (bound and idle threads)
545 	 *  and then update the threads in each pset runqueue.
546 	 */
547 
548 	do {
549 		for (int i = 0; i < machine_info.logical_cpu_max; i++) {
550 			processor = processor_array[i];
551 			if (processor == NULL) {
552 				continue;
553 			}
554 
555 			pset = processor->processor_set;
556 
557 			s = splsched();
558 			pset_lock(pset);
559 
560 			restart_needed = runq_scan(amp_bound_runq(processor), scan_context);
561 
562 			pset_unlock(pset);
563 			splx(s);
564 
565 			if (restart_needed) {
566 				break;
567 			}
568 
569 			thread = processor->idle_thread;
570 			if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
571 				if (thread_update_add_thread(thread) == FALSE) {
572 					restart_needed = TRUE;
573 					break;
574 				}
575 			}
576 		}
577 
578 		/* Ok, we now have a collection of candidates -- fix them. */
579 		thread_update_process_threads();
580 	} while (restart_needed);
581 
582 	pset_node_t node = &pset_node0;
583 	pset = node->psets;
584 
585 	do {
586 		do {
587 			restart_needed = FALSE;
588 			while (pset != NULL) {
589 				s = splsched();
590 				pset_lock(pset);
591 
592 				restart_needed = runq_scan(&pset->pset_runq, scan_context);
593 
594 				pset_unlock(pset);
595 				splx(s);
596 
597 				if (restart_needed) {
598 					break;
599 				}
600 
601 				pset = pset->pset_list;
602 			}
603 
604 			if (restart_needed) {
605 				break;
606 			}
607 		} while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
608 
609 		/* Ok, we now have a collection of candidates -- fix them. */
610 		thread_update_process_threads();
611 	} while (restart_needed);
612 }
613 
614 static bool
pcores_recommended(thread_t thread)615 pcores_recommended(thread_t thread)
616 {
617 	if (!pcore_set) {
618 		return false;
619 	}
620 
621 	if (pcore_set->online_processor_count == 0) {
622 		/* No pcores available */
623 		return false;
624 	}
625 
626 	if (!pset_is_recommended(ecore_set)) {
627 		/* No E cores recommended, must use P cores */
628 		return true;
629 	}
630 
631 	if (recommended_pset_type(thread) == PSET_AMP_E) {
632 		return false;
633 	}
634 
635 	return pset_is_recommended(pcore_set);
636 }
637 
638 /* Return true if this thread should not continue running on this processor */
639 static bool
sched_amp_thread_avoid_processor(processor_t processor,thread_t thread)640 sched_amp_thread_avoid_processor(processor_t processor, thread_t thread)
641 {
642 	if (processor->processor_set->pset_cluster_type == PSET_AMP_E) {
643 		if (pcores_recommended(thread)) {
644 			return true;
645 		}
646 	} else if (processor->processor_set->pset_cluster_type == PSET_AMP_P) {
647 		if (!pcores_recommended(thread)) {
648 			return true;
649 		}
650 	}
651 
652 	return false;
653 }
654 
655 static processor_t
sched_amp_choose_processor(processor_set_t pset,processor_t processor,thread_t thread)656 sched_amp_choose_processor(processor_set_t pset, processor_t processor, thread_t thread)
657 {
658 	/* Bound threads don't call this function */
659 	assert(thread->bound_processor == PROCESSOR_NULL);
660 
661 	processor_set_t nset = pset;
662 	bool choose_pcores;
663 
664 
665 	choose_pcores = pcores_recommended(thread);
666 
667 	if (choose_pcores && (pset->pset_cluster_type != PSET_AMP_P)) {
668 		nset = pcore_set;
669 		assert(nset != NULL);
670 	} else if (!choose_pcores && (pset->pset_cluster_type != PSET_AMP_E)) {
671 		nset = ecore_set;
672 		assert(nset != NULL);
673 	}
674 
675 	if (nset != pset) {
676 		pset_unlock(pset);
677 		pset_lock(nset);
678 	}
679 
680 	/* Now that the chosen pset is definitely locked, make sure nothing important has changed */
681 	if (!pset_is_recommended(nset)) {
682 		pset_unlock(nset);
683 		return PROCESSOR_NULL;
684 	}
685 
686 	return choose_processor(nset, processor, thread);
687 }
688 
689 void
sched_amp_thread_group_recommendation_change(struct thread_group * tg,cluster_type_t new_recommendation)690 sched_amp_thread_group_recommendation_change(struct thread_group *tg, cluster_type_t new_recommendation)
691 {
692 	thread_group_update_recommendation(tg, new_recommendation);
693 
694 	if (new_recommendation != CLUSTER_TYPE_P) {
695 		return;
696 	}
697 
698 	sched_amp_bounce_thread_group_from_ecores(ecore_set, tg);
699 }
700 
701 static bool
sched_amp_thread_eligible_for_pset(thread_t thread,processor_set_t pset)702 sched_amp_thread_eligible_for_pset(thread_t thread, processor_set_t pset)
703 {
704 	if (recommended_pset_type(thread) == PSET_AMP_P) {
705 		/* P-recommended threads are eligible to execute on either E or P clusters */
706 		return true;
707 	} else {
708 		/* E-recommended threads are eligible to execute on E clusters only */
709 		return pset->pset_cluster_type == PSET_AMP_E;
710 	}
711 }
712 
713 static char *pct_name[] = {
714 	"PSET_SMP",
715 	"PSET_AMP_E",
716 	"PSET_AMP_P"
717 };
718 
719 static void
sched_amp_cpu_init_completed(void)720 sched_amp_cpu_init_completed(void)
721 {
722 	assert(pset_array[0] != NULL);
723 	assert(pset_array[1] != NULL);
724 
725 	assert(ecore_set != NULL);
726 	assert(pcore_set != NULL);
727 
728 	if (pset_array[0] == ecore_set) {
729 		assert(pset_array[1] == pcore_set);
730 	} else {
731 		assert(pset_array[0] == pcore_set);
732 		assert(pset_array[1] == ecore_set);
733 	}
734 
735 	for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
736 		processor_set_t pset = p->processor_set;
737 		kprintf("%s>cpu_id %02d in pset_id %02d type %s\n", __FUNCTION__, p->cpu_id, pset->pset_id,
738 		    pct_name[pset->pset_cluster_type]);
739 
740 		assert(p == processor_array[p->cpu_id]);
741 		assert(pset->pset_cluster_type != PSET_SMP);
742 		if (pset->pset_cluster_type == PSET_AMP_E) {
743 			assert(pset->pset_type == CLUSTER_TYPE_E);
744 			assert(pset == ecore_set);
745 		} else {
746 			assert(pset->pset_cluster_type == PSET_AMP_P);
747 			assert(pset->pset_type == CLUSTER_TYPE_P);
748 			assert(pset == pcore_set);
749 		}
750 	}
751 }
752 
753 #if DEVELOPMENT || DEBUG
754 
755 extern char sysctl_get_bound_cluster_type(void);
756 char
sysctl_get_bound_cluster_type(void)757 sysctl_get_bound_cluster_type(void)
758 {
759 	thread_t self = current_thread();
760 
761 	if (self->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) {
762 		return '0';
763 	} else if (pset_array[self->th_bound_cluster_id]->pset_cluster_type == PSET_AMP_E) {
764 		return 'E';
765 	} else {
766 		return 'P';
767 	}
768 }
769 
770 extern void sysctl_thread_bind_cluster_type(char cluster_type);
771 void
sysctl_thread_bind_cluster_type(char cluster_type)772 sysctl_thread_bind_cluster_type(char cluster_type)
773 {
774 	thread_bind_cluster_type(current_thread(), cluster_type, false);
775 }
776 
777 #endif /* DEVELOPMENT || DEBUG */
778 
779 #endif /* __AMP__ */
780