xref: /xnu-8019.80.24/osfmk/kern/sched_amp.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 
32 #include <machine/machine_routines.h>
33 #include <machine/sched_param.h>
34 #include <machine/machine_cpu.h>
35 
36 #include <kern/kern_types.h>
37 #include <kern/debug.h>
38 #include <kern/machine.h>
39 #include <kern/misc_protos.h>
40 #include <kern/processor.h>
41 #include <kern/queue.h>
42 #include <kern/sched.h>
43 #include <kern/sched_prim.h>
44 #include <kern/task.h>
45 #include <kern/thread.h>
46 #include <kern/thread_group.h>
47 #include <kern/sched_amp_common.h>
48 
49 #include <sys/kdebug.h>
50 
51 #if __AMP__
52 
53 static thread_t
54 sched_amp_steal_thread(processor_set_t pset);
55 
56 static void
57 sched_amp_thread_update_scan(sched_update_scan_context_t scan_context);
58 
59 static boolean_t
60 sched_amp_processor_enqueue(processor_t processor, thread_t thread,
61     sched_options_t options);
62 
63 static boolean_t
64 sched_amp_processor_queue_remove(processor_t processor, thread_t thread);
65 
66 static ast_t
67 sched_amp_processor_csw_check(processor_t processor);
68 
69 static boolean_t
70 sched_amp_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
71 
72 static int
73 sched_amp_runq_count(processor_t processor);
74 
75 static boolean_t
76 sched_amp_processor_queue_empty(processor_t processor);
77 
78 static uint64_t
79 sched_amp_runq_stats_count_sum(processor_t processor);
80 
81 static int
82 sched_amp_processor_bound_count(processor_t processor);
83 
84 static void
85 sched_amp_pset_init(processor_set_t pset);
86 
87 static void
88 sched_amp_processor_init(processor_t processor);
89 
90 static thread_t
91 sched_amp_choose_thread(processor_t processor, int priority, ast_t reason);
92 
93 static void
94 sched_amp_processor_queue_shutdown(processor_t processor);
95 
96 static sched_mode_t
97 sched_amp_initial_thread_sched_mode(task_t parent_task);
98 
99 static processor_t
100 sched_amp_choose_processor(processor_set_t pset, processor_t processor, thread_t thread);
101 
102 static bool
103 sched_amp_thread_avoid_processor(processor_t processor, thread_t thread);
104 
105 static bool
106 sched_amp_thread_should_yield(processor_t processor, thread_t thread);
107 
108 static void
109 sched_amp_thread_group_recommendation_change(struct thread_group *tg, cluster_type_t new_recommendation);
110 
111 static bool
112 sched_amp_thread_eligible_for_pset(thread_t thread, processor_set_t pset);
113 
114 const struct sched_dispatch_table sched_amp_dispatch = {
115 	.sched_name                                     = "amp",
116 	.init                                           = sched_amp_init,
117 	.timebase_init                                  = sched_timeshare_timebase_init,
118 	.processor_init                                 = sched_amp_processor_init,
119 	.pset_init                                      = sched_amp_pset_init,
120 	.maintenance_continuation                       = sched_timeshare_maintenance_continue,
121 	.choose_thread                                  = sched_amp_choose_thread,
122 	.steal_thread_enabled                           = sched_amp_steal_thread_enabled,
123 	.steal_thread                                   = sched_amp_steal_thread,
124 	.compute_timeshare_priority                     = sched_compute_timeshare_priority,
125 	.choose_node                                    = sched_amp_choose_node,
126 	.choose_processor                               = sched_amp_choose_processor,
127 	.processor_enqueue                              = sched_amp_processor_enqueue,
128 	.processor_queue_shutdown                       = sched_amp_processor_queue_shutdown,
129 	.processor_queue_remove                         = sched_amp_processor_queue_remove,
130 	.processor_queue_empty                          = sched_amp_processor_queue_empty,
131 	.priority_is_urgent                             = priority_is_urgent,
132 	.processor_csw_check                            = sched_amp_processor_csw_check,
133 	.processor_queue_has_priority                   = sched_amp_processor_queue_has_priority,
134 	.initial_quantum_size                           = sched_timeshare_initial_quantum_size,
135 	.initial_thread_sched_mode                      = sched_amp_initial_thread_sched_mode,
136 	.can_update_priority                            = can_update_priority,
137 	.update_priority                                = update_priority,
138 	.lightweight_update_priority                    = lightweight_update_priority,
139 	.quantum_expire                                 = sched_default_quantum_expire,
140 	.processor_runq_count                           = sched_amp_runq_count,
141 	.processor_runq_stats_count_sum                 = sched_amp_runq_stats_count_sum,
142 	.processor_bound_count                          = sched_amp_processor_bound_count,
143 	.thread_update_scan                             = sched_amp_thread_update_scan,
144 	.multiple_psets_enabled                         = TRUE,
145 	.sched_groups_enabled                           = FALSE,
146 	.avoid_processor_enabled                        = TRUE,
147 	.thread_avoid_processor                         = sched_amp_thread_avoid_processor,
148 	.processor_balance                              = sched_amp_balance,
149 
150 	.rt_runq                                        = sched_rtlocal_runq,
151 	.rt_init                                        = sched_rtlocal_init,
152 	.rt_queue_shutdown                              = sched_rtlocal_queue_shutdown,
153 	.rt_runq_scan                                   = sched_rtlocal_runq_scan,
154 	.rt_runq_count_sum                              = sched_rtlocal_runq_count_sum,
155 	.rt_steal_thread                                = sched_rtlocal_steal_thread,
156 
157 	.qos_max_parallelism                            = sched_amp_qos_max_parallelism,
158 	.check_spill                                    = sched_amp_check_spill,
159 	.ipi_policy                                     = sched_amp_ipi_policy,
160 	.thread_should_yield                            = sched_amp_thread_should_yield,
161 	.run_count_incr                                 = sched_run_incr,
162 	.run_count_decr                                 = sched_run_decr,
163 	.update_thread_bucket                           = sched_update_thread_bucket,
164 	.pset_made_schedulable                          = sched_pset_made_schedulable,
165 	.thread_group_recommendation_change             = sched_amp_thread_group_recommendation_change,
166 	.cpu_init_completed                             = NULL,
167 	.thread_eligible_for_pset                       = sched_amp_thread_eligible_for_pset,
168 };
169 
170 extern processor_set_t ecore_set;
171 extern processor_set_t pcore_set;
172 
173 __attribute__((always_inline))
174 static inline run_queue_t
amp_main_runq(processor_t processor)175 amp_main_runq(processor_t processor)
176 {
177 	return &processor->processor_set->pset_runq;
178 }
179 
180 __attribute__((always_inline))
181 static inline run_queue_t
amp_bound_runq(processor_t processor)182 amp_bound_runq(processor_t processor)
183 {
184 	return &processor->runq;
185 }
186 
187 __attribute__((always_inline))
188 static inline run_queue_t
amp_runq_for_thread(processor_t processor,thread_t thread)189 amp_runq_for_thread(processor_t processor, thread_t thread)
190 {
191 	if (thread->bound_processor == PROCESSOR_NULL) {
192 		return amp_main_runq(processor);
193 	} else {
194 		assert(thread->bound_processor == processor);
195 		return amp_bound_runq(processor);
196 	}
197 }
198 
199 static sched_mode_t
sched_amp_initial_thread_sched_mode(task_t parent_task)200 sched_amp_initial_thread_sched_mode(task_t parent_task)
201 {
202 	if (parent_task == kernel_task) {
203 		return TH_MODE_FIXED;
204 	} else {
205 		return TH_MODE_TIMESHARE;
206 	}
207 }
208 
209 static void
sched_amp_processor_init(processor_t processor)210 sched_amp_processor_init(processor_t processor)
211 {
212 	run_queue_init(&processor->runq);
213 }
214 
215 static void
sched_amp_pset_init(processor_set_t pset)216 sched_amp_pset_init(processor_set_t pset)
217 {
218 	if (pset->pset_cluster_type == PSET_AMP_P) {
219 		pcore_set = pset;
220 	} else {
221 		ecore_set = pset;
222 	}
223 	run_queue_init(&pset->pset_runq);
224 }
225 
226 static thread_t
sched_amp_choose_thread(processor_t processor,int priority,__unused ast_t reason)227 sched_amp_choose_thread(
228 	processor_t      processor,
229 	int              priority,
230 	__unused ast_t            reason)
231 {
232 	processor_set_t pset = processor->processor_set;
233 	bool spill_pending = false;
234 	int spill_pri = -1;
235 
236 	if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) {
237 		spill_pending = true;
238 		spill_pri = pcore_set->pset_runq.highq;
239 	}
240 
241 	run_queue_t main_runq  = amp_main_runq(processor);
242 	run_queue_t bound_runq = amp_bound_runq(processor);
243 	run_queue_t chosen_runq;
244 
245 	if ((bound_runq->highq < priority) &&
246 	    (main_runq->highq < priority) &&
247 	    (spill_pri < priority)) {
248 		return THREAD_NULL;
249 	}
250 
251 	if ((spill_pri > bound_runq->highq) &&
252 	    (spill_pri > main_runq->highq)) {
253 		/*
254 		 * There is a higher priority thread on the P-core runq,
255 		 * so returning THREAD_NULL here will cause thread_select()
256 		 * to call sched_amp_steal_thread() to try to get it.
257 		 */
258 		return THREAD_NULL;
259 	}
260 
261 	if (bound_runq->highq >= main_runq->highq) {
262 		chosen_runq = bound_runq;
263 	} else {
264 		chosen_runq = main_runq;
265 	}
266 
267 	return run_queue_dequeue(chosen_runq, SCHED_HEADQ);
268 }
269 
270 static boolean_t
sched_amp_processor_enqueue(processor_t processor,thread_t thread,sched_options_t options)271 sched_amp_processor_enqueue(
272 	processor_t       processor,
273 	thread_t          thread,
274 	sched_options_t   options)
275 {
276 	run_queue_t     rq = amp_runq_for_thread(processor, thread);
277 	boolean_t       result;
278 
279 	result = run_queue_enqueue(rq, thread, options);
280 	thread->runq = processor;
281 
282 	return result;
283 }
284 
285 static boolean_t
sched_amp_processor_queue_empty(processor_t processor)286 sched_amp_processor_queue_empty(processor_t processor)
287 {
288 	processor_set_t pset = processor->processor_set;
289 	bool spill_pending = bit_test(pset->pending_spill_cpu_mask, processor->cpu_id);
290 
291 	return (amp_main_runq(processor)->count == 0) &&
292 	       (amp_bound_runq(processor)->count == 0) &&
293 	       !spill_pending;
294 }
295 
296 static bool
sched_amp_thread_should_yield(processor_t processor,thread_t thread)297 sched_amp_thread_should_yield(processor_t processor, thread_t thread)
298 {
299 	if (!sched_amp_processor_queue_empty(processor) || (rt_runq_count(processor->processor_set) > 0)) {
300 		return true;
301 	}
302 
303 	if ((processor->processor_set->pset_cluster_type == PSET_AMP_E) && (recommended_pset_type(thread) == PSET_AMP_P)) {
304 		return pcore_set && pcore_set->pset_runq.count > 0;
305 	}
306 
307 	return false;
308 }
309 
310 static ast_t
sched_amp_processor_csw_check(processor_t processor)311 sched_amp_processor_csw_check(processor_t processor)
312 {
313 	boolean_t       has_higher;
314 	int             pri;
315 
316 	run_queue_t main_runq  = amp_main_runq(processor);
317 	run_queue_t bound_runq = amp_bound_runq(processor);
318 
319 	assert(processor->active_thread != NULL);
320 
321 	processor_set_t pset = processor->processor_set;
322 	bool spill_pending = false;
323 	int spill_pri = -1;
324 	int spill_urgency = 0;
325 
326 	if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) {
327 		spill_pending = true;
328 		spill_pri = pcore_set->pset_runq.highq;
329 		spill_urgency = pcore_set->pset_runq.urgency;
330 	}
331 
332 	pri = MAX(main_runq->highq, bound_runq->highq);
333 	if (spill_pending) {
334 		pri = MAX(pri, spill_pri);
335 	}
336 
337 	if (processor->first_timeslice) {
338 		has_higher = (pri > processor->current_pri);
339 	} else {
340 		has_higher = (pri >= processor->current_pri);
341 	}
342 
343 	if (has_higher) {
344 		if (main_runq->urgency > 0) {
345 			return AST_PREEMPT | AST_URGENT;
346 		}
347 
348 		if (bound_runq->urgency > 0) {
349 			return AST_PREEMPT | AST_URGENT;
350 		}
351 
352 		if (spill_urgency > 0) {
353 			return AST_PREEMPT | AST_URGENT;
354 		}
355 
356 		return AST_PREEMPT;
357 	}
358 
359 	return AST_NONE;
360 }
361 
362 static boolean_t
sched_amp_processor_queue_has_priority(processor_t processor,int priority,boolean_t gte)363 sched_amp_processor_queue_has_priority(processor_t    processor,
364     int            priority,
365     boolean_t      gte)
366 {
367 	bool spill_pending = false;
368 	int spill_pri = -1;
369 	processor_set_t pset = processor->processor_set;
370 
371 	if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) {
372 		spill_pending = true;
373 		spill_pri = pcore_set->pset_runq.highq;
374 	}
375 	run_queue_t main_runq  = amp_main_runq(processor);
376 	run_queue_t bound_runq = amp_bound_runq(processor);
377 
378 	int qpri = MAX(main_runq->highq, bound_runq->highq);
379 	if (spill_pending) {
380 		qpri = MAX(qpri, spill_pri);
381 	}
382 
383 	if (gte) {
384 		return qpri >= priority;
385 	} else {
386 		return qpri > priority;
387 	}
388 }
389 
390 static int
sched_amp_runq_count(processor_t processor)391 sched_amp_runq_count(processor_t processor)
392 {
393 	return amp_main_runq(processor)->count + amp_bound_runq(processor)->count;
394 }
395 
396 static uint64_t
sched_amp_runq_stats_count_sum(processor_t processor)397 sched_amp_runq_stats_count_sum(processor_t processor)
398 {
399 	uint64_t bound_sum = amp_bound_runq(processor)->runq_stats.count_sum;
400 
401 	if (processor->cpu_id == processor->processor_set->cpu_set_low) {
402 		return bound_sum + amp_main_runq(processor)->runq_stats.count_sum;
403 	} else {
404 		return bound_sum;
405 	}
406 }
407 static int
sched_amp_processor_bound_count(processor_t processor)408 sched_amp_processor_bound_count(processor_t processor)
409 {
410 	return amp_bound_runq(processor)->count;
411 }
412 
413 static void
sched_amp_processor_queue_shutdown(processor_t processor)414 sched_amp_processor_queue_shutdown(processor_t processor)
415 {
416 	processor_set_t pset = processor->processor_set;
417 	run_queue_t     rq   = amp_main_runq(processor);
418 	thread_t        thread;
419 	queue_head_t    tqueue;
420 
421 	/* We only need to migrate threads if this is the last active or last recommended processor in the pset */
422 	if ((pset->online_processor_count > 0) && pset_is_recommended(pset)) {
423 		pset_unlock(pset);
424 		return;
425 	}
426 
427 	queue_init(&tqueue);
428 
429 	while (rq->count > 0) {
430 		thread = run_queue_dequeue(rq, SCHED_HEADQ);
431 		enqueue_tail(&tqueue, &thread->runq_links);
432 	}
433 
434 	pset_unlock(pset);
435 
436 	qe_foreach_element_safe(thread, &tqueue, runq_links) {
437 		remqueue(&thread->runq_links);
438 
439 		thread_lock(thread);
440 
441 		thread_setrun(thread, SCHED_TAILQ);
442 
443 		thread_unlock(thread);
444 	}
445 }
446 
447 static boolean_t
sched_amp_processor_queue_remove(processor_t processor,thread_t thread)448 sched_amp_processor_queue_remove(
449 	processor_t processor,
450 	thread_t    thread)
451 {
452 	run_queue_t             rq;
453 	processor_set_t         pset = processor->processor_set;
454 
455 	pset_lock(pset);
456 
457 	rq = amp_runq_for_thread(processor, thread);
458 
459 	if (processor == thread->runq) {
460 		/*
461 		 * Thread is on a run queue and we have a lock on
462 		 * that run queue.
463 		 */
464 		run_queue_remove(rq, thread);
465 	} else {
466 		/*
467 		 * The thread left the run queue before we could
468 		 * lock the run queue.
469 		 */
470 		assert(thread->runq == PROCESSOR_NULL);
471 		processor = PROCESSOR_NULL;
472 	}
473 
474 	pset_unlock(pset);
475 
476 	return processor != PROCESSOR_NULL;
477 }
478 
479 /*
480  * sched_amp_steal_thread()
481  *
482  */
483 thread_t
sched_amp_steal_thread(processor_set_t pset)484 sched_amp_steal_thread(processor_set_t pset)
485 {
486 	thread_t thread = THREAD_NULL;
487 	processor_set_t nset = pset;
488 
489 	assert(pset->pset_cluster_type != PSET_AMP_P);
490 
491 	processor_t processor = current_processor();
492 	assert(pset == processor->processor_set);
493 
494 	bool spill_pending = bit_test(pset->pending_spill_cpu_mask, processor->cpu_id);
495 	bit_clear(pset->pending_spill_cpu_mask, processor->cpu_id);
496 
497 	if (!pcore_set) {
498 		return THREAD_NULL;
499 	}
500 
501 	nset = pcore_set;
502 
503 	assert(nset != pset);
504 
505 	if (sched_get_pset_load_average(nset, 0) >= sched_amp_steal_threshold(nset, spill_pending)) {
506 		pset_unlock(pset);
507 
508 		pset = nset;
509 
510 		pset_lock(pset);
511 
512 		/* Allow steal if load average still OK, no idle cores, and more threads on runq than active cores DISPATCHING */
513 		if ((sched_get_pset_load_average(pset, 0) >= sched_amp_steal_threshold(pset, spill_pending)) &&
514 		    (pset->pset_runq.count > bit_count(pset->cpu_state_map[PROCESSOR_DISPATCHING])) &&
515 		    (bit_count(pset->recommended_bitmask & pset->cpu_state_map[PROCESSOR_IDLE]) == 0)) {
516 			thread = run_queue_dequeue(&pset->pset_runq, SCHED_HEADQ);
517 			KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_STEAL) | DBG_FUNC_NONE, spill_pending, 0, 0, 0);
518 			sched_update_pset_load_average(pset, 0);
519 		}
520 	}
521 
522 	pset_unlock(pset);
523 	return thread;
524 }
525 
526 
527 
528 static void
sched_amp_thread_update_scan(sched_update_scan_context_t scan_context)529 sched_amp_thread_update_scan(sched_update_scan_context_t scan_context)
530 {
531 	boolean_t               restart_needed = FALSE;
532 	processor_t             processor;
533 	processor_set_t         pset;
534 	thread_t                thread;
535 	spl_t                   s;
536 
537 	/*
538 	 *  We update the threads associated with each processor (bound and idle threads)
539 	 *  and then update the threads in each pset runqueue.
540 	 */
541 
542 	do {
543 		for (int i = 0; i < machine_info.logical_cpu_max; i++) {
544 			processor = processor_array[i];
545 			if (processor == NULL) {
546 				continue;
547 			}
548 
549 			pset = processor->processor_set;
550 
551 			s = splsched();
552 			pset_lock(pset);
553 
554 			restart_needed = runq_scan(amp_bound_runq(processor), scan_context);
555 
556 			pset_unlock(pset);
557 			splx(s);
558 
559 			if (restart_needed) {
560 				break;
561 			}
562 
563 			thread = processor->idle_thread;
564 			if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
565 				if (thread_update_add_thread(thread) == FALSE) {
566 					restart_needed = TRUE;
567 					break;
568 				}
569 			}
570 		}
571 
572 		/* Ok, we now have a collection of candidates -- fix them. */
573 		thread_update_process_threads();
574 	} while (restart_needed);
575 
576 	pset_node_t node = &pset_node0;
577 	pset = node->psets;
578 
579 	do {
580 		do {
581 			restart_needed = FALSE;
582 			while (pset != NULL) {
583 				s = splsched();
584 				pset_lock(pset);
585 
586 				restart_needed = runq_scan(&pset->pset_runq, scan_context);
587 
588 				pset_unlock(pset);
589 				splx(s);
590 
591 				if (restart_needed) {
592 					break;
593 				}
594 
595 				pset = pset->pset_list;
596 			}
597 
598 			if (restart_needed) {
599 				break;
600 			}
601 		} while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
602 
603 		/* Ok, we now have a collection of candidates -- fix them. */
604 		thread_update_process_threads();
605 	} while (restart_needed);
606 }
607 
608 static bool
pcores_recommended(thread_t thread)609 pcores_recommended(thread_t thread)
610 {
611 	if (!pcore_set) {
612 		return false;
613 	}
614 
615 	if (pcore_set->online_processor_count == 0) {
616 		/* No pcores available */
617 		return false;
618 	}
619 
620 	if (!pset_is_recommended(ecore_set)) {
621 		/* No E cores recommended, must use P cores */
622 		return true;
623 	}
624 
625 	if (recommended_pset_type(thread) == PSET_AMP_E) {
626 		return false;
627 	}
628 
629 	return pset_is_recommended(pcore_set);
630 }
631 
632 /* Return true if this thread should not continue running on this processor */
633 static bool
sched_amp_thread_avoid_processor(processor_t processor,thread_t thread)634 sched_amp_thread_avoid_processor(processor_t processor, thread_t thread)
635 {
636 	if (processor->processor_set->pset_cluster_type == PSET_AMP_E) {
637 		if (pcores_recommended(thread)) {
638 			return true;
639 		}
640 	} else if (processor->processor_set->pset_cluster_type == PSET_AMP_P) {
641 		if (!pcores_recommended(thread)) {
642 			return true;
643 		}
644 	}
645 
646 	return false;
647 }
648 
649 static processor_t
sched_amp_choose_processor(processor_set_t pset,processor_t processor,thread_t thread)650 sched_amp_choose_processor(processor_set_t pset, processor_t processor, thread_t thread)
651 {
652 	/* Bound threads don't call this function */
653 	assert(thread->bound_processor == PROCESSOR_NULL);
654 
655 	processor_set_t nset = pset;
656 	bool choose_pcores;
657 
658 
659 again:
660 	choose_pcores = pcores_recommended(thread);
661 
662 	if (choose_pcores && (pset->pset_cluster_type != PSET_AMP_P)) {
663 		nset = pcore_set;
664 		assert(nset != NULL);
665 	} else if (!choose_pcores && (pset->pset_cluster_type != PSET_AMP_E)) {
666 		nset = ecore_set;
667 		assert(nset != NULL);
668 	}
669 
670 	if (nset != pset) {
671 		pset_unlock(pset);
672 		pset_lock(nset);
673 	}
674 
675 	/* Now that the chosen pset is definitely locked, make sure nothing important has changed */
676 	if (!pset_is_recommended(nset)) {
677 		pset = nset;
678 		goto again;
679 	}
680 
681 	return choose_processor(nset, processor, thread);
682 }
683 
684 void
sched_amp_thread_group_recommendation_change(struct thread_group * tg,cluster_type_t new_recommendation)685 sched_amp_thread_group_recommendation_change(struct thread_group *tg, cluster_type_t new_recommendation)
686 {
687 	thread_group_update_recommendation(tg, new_recommendation);
688 
689 	if (new_recommendation != CLUSTER_TYPE_P) {
690 		return;
691 	}
692 
693 	sched_amp_bounce_thread_group_from_ecores(ecore_set, tg);
694 }
695 
696 static bool
sched_amp_thread_eligible_for_pset(thread_t thread,processor_set_t pset)697 sched_amp_thread_eligible_for_pset(thread_t thread, processor_set_t pset)
698 {
699 	if (recommended_pset_type(thread) == PSET_AMP_P) {
700 		/* P-recommended threads are eligible to execute on either E or P clusters */
701 		return true;
702 	} else {
703 		/* E-recommended threads are eligible to execute on E clusters only */
704 		return pset->pset_type == PSET_AMP_E;
705 	}
706 }
707 
708 #if DEVELOPMENT || DEBUG
709 
710 extern char sysctl_get_bound_cluster_type(void);
711 char
sysctl_get_bound_cluster_type(void)712 sysctl_get_bound_cluster_type(void)
713 {
714 	thread_t self = current_thread();
715 
716 	if (self->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) {
717 		return '0';
718 	} else if (pset_array[self->th_bound_cluster_id]->pset_cluster_type == PSET_AMP_E) {
719 		return 'E';
720 	} else {
721 		return 'P';
722 	}
723 }
724 
725 extern void sysctl_thread_bind_cluster_type(char cluster_type);
726 void
sysctl_thread_bind_cluster_type(char cluster_type)727 sysctl_thread_bind_cluster_type(char cluster_type)
728 {
729 	thread_bind_cluster_type(current_thread(), cluster_type, false);
730 }
731 
732 #endif /* DEVELOPMENT || DEBUG */
733 
734 #endif /* __AMP__ */
735