xref: /xnu-8020.121.3/osfmk/kern/sched_amp_common.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <machine/machine_routines.h>
32 #include <machine/sched_param.h>
33 #include <machine/machine_cpu.h>
34 #include <kern/kern_types.h>
35 #include <kern/debug.h>
36 #include <kern/machine.h>
37 #include <kern/misc_protos.h>
38 #include <kern/processor.h>
39 #include <kern/queue.h>
40 #include <kern/sched.h>
41 #include <kern/sched_prim.h>
42 #include <kern/task.h>
43 #include <kern/thread.h>
44 #include <machine/atomic.h>
45 #include <sys/kdebug.h>
46 #include <kern/sched_amp_common.h>
47 #include <stdatomic.h>
48 
49 #if __AMP__
50 
51 /* Exported globals */
52 processor_set_t ecore_set = NULL;
53 processor_set_t pcore_set = NULL;
54 
55 /*
56  * sched_amp_init()
57  *
58  * Initialize the pcore_set and ecore_set globals which describe the
59  * P/E processor sets.
60  */
61 void
sched_amp_init(void)62 sched_amp_init(void)
63 {
64 	sched_timeshare_init();
65 }
66 
67 /* Spill threshold load average is ncpus in pset + (sched_amp_spill_count/(1 << PSET_LOAD_FRACTIONAL_SHIFT) */
68 int sched_amp_spill_count = 3;
69 int sched_amp_idle_steal = 1;
70 int sched_amp_spill_steal = 1;
71 
72 /*
73  * We see performance gains from doing immediate IPIs to P-cores to run
74  * P-eligible threads and lesser P-E migrations from using deferred IPIs
75  * for spill.
76  */
77 int sched_amp_spill_deferred_ipi = 1;
78 int sched_amp_pcores_preempt_immediate_ipi = 1;
79 
80 /*
81  * sched_perfcontrol_inherit_recommendation_from_tg changes amp
82  * scheduling policy away from default and allows policy to be
83  * modified at run-time.
84  *
85  * once modified from default, the policy toggles between "follow
86  * thread group" and "restrict to e".
87  */
88 
89 _Atomic sched_perfctl_class_policy_t sched_perfctl_policy_util = SCHED_PERFCTL_POLICY_DEFAULT;
90 _Atomic sched_perfctl_class_policy_t sched_perfctl_policy_bg = SCHED_PERFCTL_POLICY_DEFAULT;
91 
92 /*
93  * sched_amp_spill_threshold()
94  *
95  * Routine to calulate spill threshold which decides if cluster should spill.
96  */
97 int
sched_amp_spill_threshold(processor_set_t pset)98 sched_amp_spill_threshold(processor_set_t pset)
99 {
100 	int recommended_processor_count = bit_count(pset->recommended_bitmask & pset->cpu_bitmask);
101 
102 	return (recommended_processor_count << PSET_LOAD_FRACTIONAL_SHIFT) + sched_amp_spill_count;
103 }
104 
105 /*
106  * pset_signal_spill()
107  *
108  * Routine to signal a running/idle CPU to cause a spill onto that CPU.
109  * Called with pset locked, returns unlocked
110  */
111 void
pset_signal_spill(processor_set_t pset,int spilled_thread_priority)112 pset_signal_spill(processor_set_t pset, int spilled_thread_priority)
113 {
114 	processor_t processor;
115 	sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
116 
117 	uint64_t idle_map = pset->recommended_bitmask & pset->cpu_state_map[PROCESSOR_IDLE];
118 	for (int cpuid = lsb_first(idle_map); cpuid >= 0; cpuid = lsb_next(idle_map, cpuid)) {
119 		processor = processor_array[cpuid];
120 		if (bit_set_if_clear(pset->pending_spill_cpu_mask, processor->cpu_id)) {
121 			KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_SIGNAL_SPILL) | DBG_FUNC_NONE, processor->cpu_id, 0, 0, 0);
122 
123 			processor->deadline = UINT64_MAX;
124 
125 			if (processor == current_processor()) {
126 				pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
127 				if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
128 					KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
129 					    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, 6);
130 				}
131 			} else {
132 				ipi_type = sched_ipi_action(processor, NULL, SCHED_IPI_EVENT_SPILL);
133 			}
134 			pset_unlock(pset);
135 			sched_ipi_perform(processor, ipi_type);
136 			return;
137 		}
138 	}
139 
140 	processor_t ast_processor = NULL;
141 	uint64_t running_map = pset->recommended_bitmask & pset->cpu_state_map[PROCESSOR_RUNNING];
142 	for (int cpuid = lsb_first(running_map); cpuid >= 0; cpuid = lsb_next(running_map, cpuid)) {
143 		processor = processor_array[cpuid];
144 		if (processor->current_recommended_pset_type == PSET_AMP_P) {
145 			/* Already running a spilled P-core recommended thread */
146 			continue;
147 		}
148 		if (bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) {
149 			/* Already received a spill signal */
150 			continue;
151 		}
152 		if (processor->current_pri >= spilled_thread_priority) {
153 			/* Already running a higher or equal priority thread */
154 			continue;
155 		}
156 
157 		/* Found a suitable processor */
158 		bit_set(pset->pending_spill_cpu_mask, processor->cpu_id);
159 		KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_SIGNAL_SPILL) | DBG_FUNC_NONE, processor->cpu_id, 1, 0, 0);
160 		if (processor == current_processor()) {
161 			ast_on(AST_PREEMPT);
162 		}
163 		ipi_type = sched_ipi_action(processor, NULL, SCHED_IPI_EVENT_SPILL);
164 		if (ipi_type != SCHED_IPI_NONE) {
165 			ast_processor = processor;
166 		}
167 		break;
168 	}
169 
170 	pset_unlock(pset);
171 	sched_ipi_perform(ast_processor, ipi_type);
172 }
173 
174 /*
175  * pset_should_accept_spilled_thread()
176  *
177  * Routine to decide if pset should accept spilled threads.
178  * This function must be safe to call (to use as a hint) without holding the pset lock.
179  */
180 bool
pset_should_accept_spilled_thread(processor_set_t pset,int spilled_thread_priority)181 pset_should_accept_spilled_thread(processor_set_t pset, int spilled_thread_priority)
182 {
183 	if (!pset) {
184 		return false;
185 	}
186 
187 	if ((pset->recommended_bitmask & pset->cpu_state_map[PROCESSOR_IDLE]) != 0) {
188 		return true;
189 	}
190 
191 	uint64_t cpu_map = (pset->recommended_bitmask & pset->cpu_state_map[PROCESSOR_RUNNING]);
192 
193 	for (int cpuid = lsb_first(cpu_map); cpuid >= 0; cpuid = lsb_next(cpu_map, cpuid)) {
194 		processor_t processor = processor_array[cpuid];
195 
196 		if (processor->current_recommended_pset_type == PSET_AMP_P) {
197 			/* This processor is already running a spilled thread */
198 			continue;
199 		}
200 
201 		if (processor->current_pri < spilled_thread_priority) {
202 			return true;
203 		}
204 	}
205 
206 	return false;
207 }
208 
209 /*
210  * should_spill_to_ecores()
211  *
212  * Spill policy is implemented here
213  */
214 bool
should_spill_to_ecores(processor_set_t nset,thread_t thread)215 should_spill_to_ecores(processor_set_t nset, thread_t thread)
216 {
217 	if (nset->pset_cluster_type == PSET_AMP_E) {
218 		/* Not relevant if ecores already preferred */
219 		return false;
220 	}
221 
222 	if (!pset_is_recommended(ecore_set)) {
223 		/* E cores must be recommended */
224 		return false;
225 	}
226 
227 	if (thread->th_bound_cluster_id == pcore_set->pset_id) {
228 		/* Thread bound to the P-cluster */
229 		return false;
230 	}
231 
232 	if (thread->sched_pri >= BASEPRI_RTQUEUES) {
233 		/* Never spill realtime threads */
234 		return false;
235 	}
236 
237 	if ((nset->recommended_bitmask & nset->cpu_state_map[PROCESSOR_IDLE]) != 0) {
238 		/* Don't spill if idle cores */
239 		return false;
240 	}
241 
242 	if ((sched_get_pset_load_average(nset, 0) >= sched_amp_spill_threshold(nset)) &&  /* There is already a load on P cores */
243 	    pset_should_accept_spilled_thread(ecore_set, thread->sched_pri)) { /* There are lower priority E cores */
244 		return true;
245 	}
246 
247 	return false;
248 }
249 
250 /*
251  * sched_amp_check_spill()
252  *
253  * Routine to check if the thread should be spilled and signal the pset if needed.
254  */
255 void
sched_amp_check_spill(processor_set_t pset,thread_t thread)256 sched_amp_check_spill(processor_set_t pset, thread_t thread)
257 {
258 	/* pset is unlocked */
259 
260 	/* Bound threads don't call this function */
261 	assert(thread->bound_processor == PROCESSOR_NULL);
262 
263 	if (should_spill_to_ecores(pset, thread)) {
264 		pset_lock(ecore_set);
265 
266 		pset_signal_spill(ecore_set, thread->sched_pri);
267 		/* returns with ecore_set unlocked */
268 	}
269 }
270 
271 /*
272  * sched_amp_steal_threshold()
273  *
274  * Routine to calculate the steal threshold
275  */
276 int
sched_amp_steal_threshold(processor_set_t pset,bool spill_pending)277 sched_amp_steal_threshold(processor_set_t pset, bool spill_pending)
278 {
279 	int recommended_processor_count = bit_count(pset->recommended_bitmask & pset->cpu_bitmask);
280 
281 	return (recommended_processor_count << PSET_LOAD_FRACTIONAL_SHIFT) + (spill_pending ? sched_amp_spill_steal : sched_amp_idle_steal);
282 }
283 
284 /*
285  * sched_amp_steal_thread_enabled()
286  *
287  */
288 bool
sched_amp_steal_thread_enabled(processor_set_t pset)289 sched_amp_steal_thread_enabled(processor_set_t pset)
290 {
291 	return (pset->pset_cluster_type == PSET_AMP_E) && (pcore_set != NULL) && (pcore_set->online_processor_count > 0);
292 }
293 
294 /*
295  * sched_amp_balance()
296  *
297  * Invoked with pset locked, returns with pset unlocked
298  */
299 void
sched_amp_balance(processor_t cprocessor,processor_set_t cpset)300 sched_amp_balance(processor_t cprocessor, processor_set_t cpset)
301 {
302 	assert(cprocessor == current_processor());
303 
304 	pset_unlock(cpset);
305 
306 	if (!ecore_set || cpset->pset_cluster_type == PSET_AMP_E || !cprocessor->is_recommended) {
307 		return;
308 	}
309 
310 	/*
311 	 * cprocessor is an idle, recommended P core processor.
312 	 * Look for P-eligible threads that have spilled to an E core
313 	 * and coax them to come back.
314 	 */
315 	processor_set_t pset = ecore_set;
316 
317 	pset_lock(pset);
318 
319 	processor_t eprocessor;
320 	uint64_t ast_processor_map = 0;
321 
322 	sched_ipi_type_t ipi_type[MAX_CPUS] = {SCHED_IPI_NONE};
323 	uint64_t running_map = pset->cpu_state_map[PROCESSOR_RUNNING];
324 	for (int cpuid = lsb_first(running_map); cpuid >= 0; cpuid = lsb_next(running_map, cpuid)) {
325 		eprocessor = processor_array[cpuid];
326 		if ((eprocessor->current_pri < BASEPRI_RTQUEUES) &&
327 		    (eprocessor->current_recommended_pset_type == PSET_AMP_P)) {
328 			ipi_type[eprocessor->cpu_id] = sched_ipi_action(eprocessor, NULL, SCHED_IPI_EVENT_REBALANCE);
329 			if (ipi_type[eprocessor->cpu_id] != SCHED_IPI_NONE) {
330 				bit_set(ast_processor_map, eprocessor->cpu_id);
331 				assert(eprocessor != cprocessor);
332 			}
333 		}
334 	}
335 
336 	pset_unlock(pset);
337 
338 	for (int cpuid = lsb_first(ast_processor_map); cpuid >= 0; cpuid = lsb_next(ast_processor_map, cpuid)) {
339 		processor_t ast_processor = processor_array[cpuid];
340 		sched_ipi_perform(ast_processor, ipi_type[cpuid]);
341 	}
342 }
343 
344 /*
345  * Helper function for sched_amp_thread_group_recommendation_change()
346  * Find all the cores in the pset running threads from the thread_group tg
347  * and send them a rebalance interrupt.
348  */
349 void
sched_amp_bounce_thread_group_from_ecores(processor_set_t pset,struct thread_group * tg)350 sched_amp_bounce_thread_group_from_ecores(processor_set_t pset, struct thread_group *tg)
351 {
352 	if (!pset) {
353 		return;
354 	}
355 
356 	assert(pset->pset_cluster_type == PSET_AMP_E);
357 	uint64_t ast_processor_map = 0;
358 	sched_ipi_type_t ipi_type[MAX_CPUS] = {SCHED_IPI_NONE};
359 
360 	spl_t s = splsched();
361 	pset_lock(pset);
362 
363 	uint64_t running_map = pset->cpu_state_map[PROCESSOR_RUNNING];
364 	for (int cpuid = lsb_first(running_map); cpuid >= 0; cpuid = lsb_next(running_map, cpuid)) {
365 		processor_t eprocessor = processor_array[cpuid];
366 		if (eprocessor->current_thread_group == tg) {
367 			ipi_type[eprocessor->cpu_id] = sched_ipi_action(eprocessor, NULL, SCHED_IPI_EVENT_REBALANCE);
368 			if (ipi_type[eprocessor->cpu_id] != SCHED_IPI_NONE) {
369 				bit_set(ast_processor_map, eprocessor->cpu_id);
370 			} else if (eprocessor == current_processor()) {
371 				ast_on(AST_PREEMPT);
372 				bit_set(pset->pending_AST_PREEMPT_cpu_mask, eprocessor->cpu_id);
373 			}
374 		}
375 	}
376 
377 	KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_RECOMMENDATION_CHANGE) | DBG_FUNC_NONE, tg, ast_processor_map, 0, 0);
378 
379 	pset_unlock(pset);
380 
381 	for (int cpuid = lsb_first(ast_processor_map); cpuid >= 0; cpuid = lsb_next(ast_processor_map, cpuid)) {
382 		processor_t ast_processor = processor_array[cpuid];
383 		sched_ipi_perform(ast_processor, ipi_type[cpuid]);
384 	}
385 
386 	splx(s);
387 }
388 
389 /*
390  * sched_amp_ipi_policy()
391  */
392 sched_ipi_type_t
sched_amp_ipi_policy(processor_t dst,thread_t thread,boolean_t dst_idle,sched_ipi_event_t event)393 sched_amp_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
394 {
395 	processor_set_t pset = dst->processor_set;
396 	assert(dst != current_processor());
397 
398 	boolean_t deferred_ipi_supported = false;
399 #if defined(CONFIG_SCHED_DEFERRED_AST)
400 	deferred_ipi_supported = true;
401 #endif /* CONFIG_SCHED_DEFERRED_AST */
402 
403 	switch (event) {
404 	case SCHED_IPI_EVENT_SPILL:
405 		/* For Spill event, use deferred IPIs if sched_amp_spill_deferred_ipi set */
406 		if (deferred_ipi_supported && sched_amp_spill_deferred_ipi) {
407 			return sched_ipi_deferred_policy(pset, dst, thread, event);
408 		}
409 		break;
410 	case SCHED_IPI_EVENT_PREEMPT:
411 		/* For preemption, the default policy is to use deferred IPIs
412 		 * for Non-RT P-core preemption. Override that behavior if
413 		 * sched_amp_pcores_preempt_immediate_ipi is set
414 		 */
415 		if (thread && thread->sched_pri < BASEPRI_RTQUEUES) {
416 			if (sched_amp_pcores_preempt_immediate_ipi && (pset == pcore_set)) {
417 				return dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
418 			}
419 		}
420 		break;
421 	default:
422 		break;
423 	}
424 	/* Default back to the global policy for all other scenarios */
425 	return sched_ipi_policy(dst, thread, dst_idle, event);
426 }
427 
428 /*
429  * sched_amp_qos_max_parallelism()
430  */
431 uint32_t
sched_amp_qos_max_parallelism(int qos,uint64_t options)432 sched_amp_qos_max_parallelism(int qos, uint64_t options)
433 {
434 	uint32_t ecount = ecore_set ? ecore_set->cpu_set_count : 0;
435 	uint32_t pcount = pcore_set ? pcore_set->cpu_set_count : 0;
436 
437 	/*
438 	 * The AMP scheduler does not support more than 1 of each type of cluster
439 	 * but the P-cluster is optional (e.g. watchOS)
440 	 */
441 	uint32_t ecluster_count = ecount ? 1 : 0;
442 	uint32_t pcluster_count = pcount ? 1 : 0;
443 
444 	if (options & QOS_PARALLELISM_REALTIME) {
445 		/* For realtime threads on AMP, we would want them
446 		 * to limit the width to just the P-cores since we
447 		 * do not spill/rebalance for RT threads.
448 		 */
449 		return (options & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) ? pcluster_count : pcount;
450 	}
451 
452 	/*
453 	 * The default AMP scheduler policy is to run utility and by
454 	 * threads on E-Cores only.  Run-time policy adjustment unlocks
455 	 * ability of utility and bg to threads to be scheduled based on
456 	 * run-time conditions.
457 	 */
458 	switch (qos) {
459 	case THREAD_QOS_UTILITY:
460 		if (os_atomic_load(&sched_perfctl_policy_util, relaxed) == SCHED_PERFCTL_POLICY_DEFAULT) {
461 			return (options & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) ? ecluster_count : ecount;
462 		} else {
463 			return (options & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) ? (ecluster_count + pcluster_count) : (ecount + pcount);
464 		}
465 	case THREAD_QOS_BACKGROUND:
466 	case THREAD_QOS_MAINTENANCE:
467 		if (os_atomic_load(&sched_perfctl_policy_bg, relaxed) == SCHED_PERFCTL_POLICY_DEFAULT) {
468 			return (options & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) ? ecluster_count : ecount;
469 		} else {
470 			return (options & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) ? (ecluster_count + pcluster_count) : (ecount + pcount);
471 		}
472 	default:
473 		return (options & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) ? (ecluster_count + pcluster_count) : (ecount + pcount);
474 	}
475 }
476 
477 pset_node_t
sched_amp_choose_node(thread_t thread)478 sched_amp_choose_node(thread_t thread)
479 {
480 	pset_node_t node = (recommended_pset_type(thread) == PSET_AMP_P) ? pcore_node : ecore_node;
481 	return ((node != NULL) && (node->pset_map != 0)) ? node : &pset_node0;
482 }
483 
484 #endif /* __AMP__ */
485