xref: /xnu-12377.1.9/tests/sched/sched_test_harness/shadow_headers/sched_prim.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	sched_prim.c
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	1986
62  *
63  *	Scheduling primitives
64  *
65  */
66 
67 /*
68  * This version of osfmk/kern/sched_prim.c contains the limited subset of dependencies strictly
69  * required by the Clutch runqueue test harness in tests/sched/sched_test_harness/sched_clutch_harness.c.
70  * The dependencies have been copied here in order to isolate maintenance of the Clutch test
71  * harness from the rest of xnu.
72  */
73 #if !SCHED_TEST_HARNESS
74 #error File only for use with the Clutch runqueue test harness
75 #endif
76 
77 #include <kern/sched_prim.h>
78 #include <kern/processor.h>
79 #include <kern/sched_rt.h>
80 
81 boolean_t
priority_is_urgent(int priority)82 priority_is_urgent(int priority)
83 {
84 	if (priority <= BASEPRI_FOREGROUND) {
85 		return FALSE;
86 	}
87 	if (priority < MINPRI_KERNEL) {
88 		return TRUE;
89 	}
90 	if (priority >= BASEPRI_PREEMPT) {
91 		return TRUE;
92 	}
93 	return FALSE;
94 }
95 
96 /*
97  *	run_queue_init:
98  *
99  *	Initialize a run queue before first use.
100  */
101 void
run_queue_init(run_queue_t rq)102 run_queue_init(
103 	run_queue_t             rq)
104 {
105 	rq->highq = NOPRI;
106 	for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
107 		rq->bitmap[i] = 0;
108 	}
109 	rq->urgency = rq->count = 0;
110 	for (int i = 0; i < NRQS; i++) {
111 		circle_queue_init(&rq->queues[i]);
112 	}
113 }
114 /*
115  *	run_queue_dequeue:
116  *
117  *	Perform a dequeue operation on a run queue,
118  *	and return the resulting thread.
119  *
120  *	The run queue must be locked (see thread_run_queue_remove()
121  *	for more info), and not empty.
122  */
123 thread_t
run_queue_dequeue(run_queue_t rq,sched_options_t options)124 run_queue_dequeue(
125 	run_queue_t     rq,
126 	sched_options_t options)
127 {
128 	thread_t        thread;
129 	circle_queue_t  queue = &rq->queues[rq->highq];
130 
131 	if (options & SCHED_HEADQ) {
132 		thread = cqe_dequeue_head(queue, struct thread, runq_links);
133 	} else {
134 		thread = cqe_dequeue_tail(queue, struct thread, runq_links);
135 	}
136 
137 	assert(thread != THREAD_NULL);
138 
139 	thread_clear_runq(thread);
140 	rq->count--;
141 	if (SCHED(priority_is_urgent)(rq->highq)) {
142 		rq->urgency--; assert(rq->urgency >= 0);
143 	}
144 	if (circle_queue_empty(queue)) {
145 		bitmap_clear(rq->bitmap, rq->highq);
146 		rq->highq = bitmap_first(rq->bitmap, NRQS);
147 	}
148 
149 	return thread;
150 }
151 
152 /*
153  *	run_queue_enqueue:
154  *
155  *	Perform a enqueue operation on a run queue.
156  *
157  *	The run queue must be locked (see thread_run_queue_remove()
158  *	for more info).
159  */
160 boolean_t
run_queue_enqueue(run_queue_t rq,thread_t thread,sched_options_t options)161 run_queue_enqueue(
162 	run_queue_t      rq,
163 	thread_t         thread,
164 	sched_options_t  options)
165 {
166 	circle_queue_t  queue = &rq->queues[thread->sched_pri];
167 	boolean_t       result = FALSE;
168 
169 	if (circle_queue_empty(queue)) {
170 		circle_enqueue_tail(queue, &thread->runq_links);
171 
172 		rq_bitmap_set(rq->bitmap, thread->sched_pri);
173 		if (thread->sched_pri > rq->highq) {
174 			rq->highq = thread->sched_pri;
175 			result = TRUE;
176 		}
177 	} else {
178 		if (options & SCHED_TAILQ) {
179 			circle_enqueue_tail(queue, &thread->runq_links);
180 		} else {
181 			circle_enqueue_head(queue, &thread->runq_links);
182 		}
183 	}
184 	if (SCHED(priority_is_urgent)(thread->sched_pri)) {
185 		rq->urgency++;
186 	}
187 	rq->count++;
188 
189 	return result;
190 }
191 
192 /*
193  *	run_queue_remove:
194  *
195  *	Remove a specific thread from a runqueue.
196  *
197  *	The run queue must be locked.
198  */
199 void
run_queue_remove(run_queue_t rq,thread_t thread)200 run_queue_remove(
201 	run_queue_t    rq,
202 	thread_t       thread)
203 {
204 	circle_queue_t  queue = &rq->queues[thread->sched_pri];
205 
206 	thread_assert_runq_nonnull(thread);
207 
208 	circle_dequeue(queue, &thread->runq_links);
209 	rq->count--;
210 	if (SCHED(priority_is_urgent)(thread->sched_pri)) {
211 		rq->urgency--; assert(rq->urgency >= 0);
212 	}
213 
214 	if (circle_queue_empty(queue)) {
215 		/* update run queue status */
216 		bitmap_clear(rq->bitmap, thread->sched_pri);
217 		rq->highq = bitmap_first(rq->bitmap, NRQS);
218 	}
219 
220 	thread_clear_runq(thread);
221 }
222 
223 /*
224  *      run_queue_peek
225  *
226  *      Peek at the runq and return the highest
227  *      priority thread from the runq.
228  *
229  *	The run queue must be locked.
230  */
231 thread_t
run_queue_peek(run_queue_t rq)232 run_queue_peek(
233 	run_queue_t    rq)
234 {
235 	if (rq->count > 0) {
236 		circle_queue_t queue = &rq->queues[rq->highq];
237 		thread_t thread = cqe_queue_first(queue, struct thread, runq_links);
238 		return thread;
239 	} else {
240 		return THREAD_NULL;
241 	}
242 }
243 
244 uint32_t          sched_run_buckets[TH_BUCKET_MAX];
245 _Atomic uint32_t  sched_tick = 0;
246 int8_t            sched_load_shifts[NRQS];
247 
248 #define         DEFAULT_PREEMPTION_RATE         100             /* (1/s) */
249 int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
250 #define         DEFAULT_BG_PREEMPTION_RATE      400             /* (1/s) */
251 static int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
252 
253 uint32_t        std_quantum;
254 uint32_t        min_std_quantum;
255 static uint32_t        bg_quantum;
256 
257 uint32_t        std_quantum_us;
258 static uint32_t        bg_quantum_us;
259 
260 uint32_t default_timeshare_constraint;
261 uint32_t sched_fixed_shift;
262 
263 void
sched_timeshare_init(void)264 sched_timeshare_init(void)
265 {
266 	/*
267 	 * Calculate the timeslicing quantum
268 	 * in us.
269 	 */
270 	if (default_preemption_rate < 1) {
271 		default_preemption_rate = DEFAULT_PREEMPTION_RATE;
272 	}
273 	std_quantum_us = (1000 * 1000) / default_preemption_rate;
274 
275 	if (default_bg_preemption_rate < 1) {
276 		default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
277 	}
278 	bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
279 }
280 
281 void
sched_timeshare_timebase_init(void)282 sched_timeshare_timebase_init(void)
283 {
284 	uint64_t        abstime;
285 	uint32_t        shift;
286 
287 	/* standard timeslicing quantum */
288 	clock_interval_to_absolutetime_interval(
289 		std_quantum_us, NSEC_PER_USEC, &abstime);
290 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
291 	std_quantum = (uint32_t)abstime;
292 
293 	/* smallest remaining quantum (250 us) */
294 	clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
295 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
296 	min_std_quantum = (uint32_t)abstime;
297 
298 	/* quantum for background tasks */
299 	clock_interval_to_absolutetime_interval(
300 		bg_quantum_us, NSEC_PER_USEC, &abstime);
301 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
302 	bg_quantum = (uint32_t)abstime;
303 
304 	/* scheduler tick interval */
305 	clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
306 	    NSEC_PER_USEC, &abstime);
307 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
308 
309 	/*
310 	 * Compute conversion factor from usage to
311 	 * timesharing priorities with 5/8 ** n aging.
312 	 */
313 	abstime = (abstime * 5) / 3;
314 	for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
315 		abstime >>= 1;
316 	}
317 	sched_fixed_shift = shift;
318 
319 	default_timeshare_constraint = std_quantum;
320 }
321 
322 /* Returns the scheduling type for the pset */
323 cluster_type_t
pset_type_for_id(uint32_t cluster_id)324 pset_type_for_id(uint32_t cluster_id)
325 {
326 	return pset_array[cluster_id]->pset_type;
327 }
328 
329 cluster_type_t
pset_cluster_type_to_cluster_type(pset_cluster_type_t pset_cluster_type)330 pset_cluster_type_to_cluster_type(pset_cluster_type_t pset_cluster_type)
331 {
332 	switch (pset_cluster_type) {
333 #if __AMP__
334 	case PSET_AMP_E:
335 		return CLUSTER_TYPE_E;
336 	case PSET_AMP_P:
337 		return CLUSTER_TYPE_P;
338 #endif /* __AMP__ */
339 	case PSET_SMP:
340 		return CLUSTER_TYPE_SMP;
341 	default:
342 		panic("Unexpected pset cluster type %d", pset_cluster_type);
343 	}
344 }
345 
346 #if CONFIG_SCHED_EDGE
347 
348 uint64_t
sched_pset_cluster_shared_rsrc_load(processor_set_t pset,cluster_shared_rsrc_type_t shared_rsrc_type)349 sched_pset_cluster_shared_rsrc_load(processor_set_t pset, cluster_shared_rsrc_type_t shared_rsrc_type)
350 {
351 	/* Prevent migrations to derecommended clusters */
352 	if (!pset_is_recommended(pset)) {
353 		return UINT64_MAX;
354 	}
355 	return os_atomic_load(&pset->pset_cluster_shared_rsrc_load[shared_rsrc_type], relaxed);
356 }
357 
358 #endif /* CONFIG_SCHED_EDGE */
359 
360 /* Mocked version */
361 processor_t
choose_processor(processor_set_t starting_pset,processor_t processor,thread_t thread,__unused sched_options_t * options)362 choose_processor(
363 	processor_set_t         starting_pset,
364 	processor_t             processor,
365 	thread_t                thread,
366 	__unused sched_options_t *options)
367 {
368 	(void)processor;
369 
370 	if (thread->sched_pri >= BASEPRI_RTQUEUES) {
371 		return sched_rt_choose_processor(starting_pset, processor, thread);
372 	}
373 
374 	if (thread->bound_processor != NULL) {
375 		return thread->bound_processor;
376 	}
377 	/* Choose the first-indexed processor in the pset */
378 	return processor_array[starting_pset->cpu_set_low];
379 }
380 
381 int
pset_available_cpu_count(processor_set_t pset)382 pset_available_cpu_count(processor_set_t pset)
383 {
384 	return bit_count(pset_available_cpumap(pset));
385 }
386 
387 bool
pset_is_recommended(processor_set_t pset)388 pset_is_recommended(processor_set_t pset)
389 {
390 	if (!pset) {
391 		return false;
392 	}
393 	return pset_available_cpu_count(pset) > 0;
394 }
395 
396 bool
pset_type_is_recommended(processor_set_t pset)397 pset_type_is_recommended(processor_set_t pset)
398 {
399 	if (!pset) {
400 		return false;
401 	}
402 	pset_map_t recommended_psets = os_atomic_load(&pset->node->pset_recommended_map, relaxed);
403 	return bit_count(recommended_psets) > 0;
404 }
405 
406 void
sched_update_pset_load_average(processor_set_t pset,uint64_t curtime)407 sched_update_pset_load_average(processor_set_t pset, uint64_t curtime)
408 {
409 	(void)pset;
410 	(void)curtime;
411 }
412 
413 void
thread_setrun(thread_t thread,sched_options_t options)414 thread_setrun(thread_t thread, sched_options_t options)
415 {
416 	(void)thread;
417 	(void)options;
418 	assertf(false, "unimplemented");
419 }
420 
421 bool
sched_steal_thread_enabled(processor_set_t pset)422 sched_steal_thread_enabled(processor_set_t pset)
423 {
424 	return bit_count(pset->node->pset_map) > 1;
425 }
426 
427 int sched_rt_n_backup_processors = SCHED_DEFAULT_BACKUP_PROCESSORS;
428