xref: /xnu-11417.121.6/tests/sched/sched_test_harness/shadow_headers/sched_prim.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	sched_prim.c
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	1986
62  *
63  *	Scheduling primitives
64  *
65  */
66 
67 /*
68  * This version of osfmk/kern/sched_prim.c contains the limited subset of dependencies strictly
69  * required by the Clutch runqueue test harness in tests/sched/sched_test_harness/sched_clutch_harness.c.
70  * The dependencies have been copied here in order to isolate maintenance of the Clutch test
71  * harness from the rest of xnu.
72  */
73 #if !SCHED_TEST_HARNESS
74 #error File only for use with the Clutch runqueue test harness
75 #endif
76 
77 #include <kern/sched_prim.h>
78 #include <kern/processor.h>
79 
80 boolean_t
priority_is_urgent(int priority)81 priority_is_urgent(int priority)
82 {
83 	if (priority <= BASEPRI_FOREGROUND) {
84 		return FALSE;
85 	}
86 	if (priority < MINPRI_KERNEL) {
87 		return TRUE;
88 	}
89 	if (priority >= BASEPRI_PREEMPT) {
90 		return TRUE;
91 	}
92 	return FALSE;
93 }
94 
95 /*
96  *	run_queue_init:
97  *
98  *	Initialize a run queue before first use.
99  */
100 void
run_queue_init(run_queue_t rq)101 run_queue_init(
102 	run_queue_t             rq)
103 {
104 	rq->highq = NOPRI;
105 	for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
106 		rq->bitmap[i] = 0;
107 	}
108 	rq->urgency = rq->count = 0;
109 	for (int i = 0; i < NRQS; i++) {
110 		circle_queue_init(&rq->queues[i]);
111 	}
112 }
113 /*
114  *	run_queue_dequeue:
115  *
116  *	Perform a dequeue operation on a run queue,
117  *	and return the resulting thread.
118  *
119  *	The run queue must be locked (see thread_run_queue_remove()
120  *	for more info), and not empty.
121  */
122 thread_t
run_queue_dequeue(run_queue_t rq,sched_options_t options)123 run_queue_dequeue(
124 	run_queue_t     rq,
125 	sched_options_t options)
126 {
127 	thread_t        thread;
128 	circle_queue_t  queue = &rq->queues[rq->highq];
129 
130 	if (options & SCHED_HEADQ) {
131 		thread = cqe_dequeue_head(queue, struct thread, runq_links);
132 	} else {
133 		thread = cqe_dequeue_tail(queue, struct thread, runq_links);
134 	}
135 
136 	assert(thread != THREAD_NULL);
137 
138 	thread_clear_runq(thread);
139 	rq->count--;
140 	if (SCHED(priority_is_urgent)(rq->highq)) {
141 		rq->urgency--; assert(rq->urgency >= 0);
142 	}
143 	if (circle_queue_empty(queue)) {
144 		bitmap_clear(rq->bitmap, rq->highq);
145 		rq->highq = bitmap_first(rq->bitmap, NRQS);
146 	}
147 
148 	return thread;
149 }
150 
151 /*
152  *	run_queue_enqueue:
153  *
154  *	Perform a enqueue operation on a run queue.
155  *
156  *	The run queue must be locked (see thread_run_queue_remove()
157  *	for more info).
158  */
159 boolean_t
run_queue_enqueue(run_queue_t rq,thread_t thread,sched_options_t options)160 run_queue_enqueue(
161 	run_queue_t      rq,
162 	thread_t         thread,
163 	sched_options_t  options)
164 {
165 	circle_queue_t  queue = &rq->queues[thread->sched_pri];
166 	boolean_t       result = FALSE;
167 
168 	if (circle_queue_empty(queue)) {
169 		circle_enqueue_tail(queue, &thread->runq_links);
170 
171 		rq_bitmap_set(rq->bitmap, thread->sched_pri);
172 		if (thread->sched_pri > rq->highq) {
173 			rq->highq = thread->sched_pri;
174 			result = TRUE;
175 		}
176 	} else {
177 		if (options & SCHED_TAILQ) {
178 			circle_enqueue_tail(queue, &thread->runq_links);
179 		} else {
180 			circle_enqueue_head(queue, &thread->runq_links);
181 		}
182 	}
183 	if (SCHED(priority_is_urgent)(thread->sched_pri)) {
184 		rq->urgency++;
185 	}
186 	rq->count++;
187 
188 	return result;
189 }
190 
191 /*
192  *	run_queue_remove:
193  *
194  *	Remove a specific thread from a runqueue.
195  *
196  *	The run queue must be locked.
197  */
198 void
run_queue_remove(run_queue_t rq,thread_t thread)199 run_queue_remove(
200 	run_queue_t    rq,
201 	thread_t       thread)
202 {
203 	circle_queue_t  queue = &rq->queues[thread->sched_pri];
204 
205 	thread_assert_runq_nonnull(thread);
206 
207 	circle_dequeue(queue, &thread->runq_links);
208 	rq->count--;
209 	if (SCHED(priority_is_urgent)(thread->sched_pri)) {
210 		rq->urgency--; assert(rq->urgency >= 0);
211 	}
212 
213 	if (circle_queue_empty(queue)) {
214 		/* update run queue status */
215 		bitmap_clear(rq->bitmap, thread->sched_pri);
216 		rq->highq = bitmap_first(rq->bitmap, NRQS);
217 	}
218 
219 	thread_clear_runq(thread);
220 }
221 
222 /*
223  *      run_queue_peek
224  *
225  *      Peek at the runq and return the highest
226  *      priority thread from the runq.
227  *
228  *	The run queue must be locked.
229  */
230 thread_t
run_queue_peek(run_queue_t rq)231 run_queue_peek(
232 	run_queue_t    rq)
233 {
234 	if (rq->count > 0) {
235 		circle_queue_t queue = &rq->queues[rq->highq];
236 		thread_t thread = cqe_queue_first(queue, struct thread, runq_links);
237 		return thread;
238 	} else {
239 		return THREAD_NULL;
240 	}
241 }
242 
243 uint32_t       sched_run_buckets[TH_BUCKET_MAX];
244 unsigned                sched_tick = 0;
245 int8_t          sched_load_shifts[NRQS];
246 
247 #define         DEFAULT_PREEMPTION_RATE         100             /* (1/s) */
248 int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
249 #define         DEFAULT_BG_PREEMPTION_RATE      400             /* (1/s) */
250 static int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
251 
252 uint32_t        std_quantum;
253 uint32_t        min_std_quantum;
254 static uint32_t        bg_quantum;
255 
256 uint32_t        std_quantum_us;
257 static uint32_t        bg_quantum_us;
258 
259 uint32_t default_timeshare_constraint;
260 uint32_t sched_fixed_shift;
261 
262 void
sched_timeshare_init(void)263 sched_timeshare_init(void)
264 {
265 	/*
266 	 * Calculate the timeslicing quantum
267 	 * in us.
268 	 */
269 	if (default_preemption_rate < 1) {
270 		default_preemption_rate = DEFAULT_PREEMPTION_RATE;
271 	}
272 	std_quantum_us = (1000 * 1000) / default_preemption_rate;
273 
274 	if (default_bg_preemption_rate < 1) {
275 		default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
276 	}
277 	bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
278 }
279 
280 void
sched_timeshare_timebase_init(void)281 sched_timeshare_timebase_init(void)
282 {
283 	uint64_t        abstime;
284 	uint32_t        shift;
285 
286 	/* standard timeslicing quantum */
287 	clock_interval_to_absolutetime_interval(
288 		std_quantum_us, NSEC_PER_USEC, &abstime);
289 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
290 	std_quantum = (uint32_t)abstime;
291 
292 	/* smallest remaining quantum (250 us) */
293 	clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
294 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
295 	min_std_quantum = (uint32_t)abstime;
296 
297 	/* quantum for background tasks */
298 	clock_interval_to_absolutetime_interval(
299 		bg_quantum_us, NSEC_PER_USEC, &abstime);
300 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
301 	bg_quantum = (uint32_t)abstime;
302 
303 	/* scheduler tick interval */
304 	clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
305 	    NSEC_PER_USEC, &abstime);
306 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
307 
308 	/*
309 	 * Compute conversion factor from usage to
310 	 * timesharing priorities with 5/8 ** n aging.
311 	 */
312 	abstime = (abstime * 5) / 3;
313 	for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
314 		abstime >>= 1;
315 	}
316 	sched_fixed_shift = shift;
317 
318 	default_timeshare_constraint = std_quantum;
319 }
320 
321 /* Returns the scheduling type for the pset */
322 cluster_type_t
pset_type_for_id(uint32_t cluster_id)323 pset_type_for_id(uint32_t cluster_id)
324 {
325 	return pset_array[cluster_id]->pset_type;
326 }
327 
328 #if CONFIG_SCHED_EDGE
329 
330 uint64_t
sched_pset_cluster_shared_rsrc_load(processor_set_t pset,cluster_shared_rsrc_type_t shared_rsrc_type)331 sched_pset_cluster_shared_rsrc_load(processor_set_t pset, cluster_shared_rsrc_type_t shared_rsrc_type)
332 {
333 	/* Prevent migrations to derecommended clusters */
334 	if (!pset_is_recommended(pset)) {
335 		return UINT64_MAX;
336 	}
337 	return os_atomic_load(&pset->pset_cluster_shared_rsrc_load[shared_rsrc_type], relaxed);
338 }
339 
340 #endif /* CONFIG_SCHED_EDGE */
341 
342 /* Mocked version */
343 processor_t
choose_processor(processor_set_t starting_pset,processor_t processor,thread_t thread)344 choose_processor(
345 	processor_set_t         starting_pset,
346 	processor_t             processor,
347 	thread_t                thread)
348 {
349 	(void)processor;
350 	if (thread->bound_processor != NULL) {
351 		return thread->bound_processor;
352 	}
353 	/* Choose the first-indexed processor in the pset */
354 	return processor_array[starting_pset->cpu_set_low];
355 }
356 
357 static cpumap_t
pset_available_cpumap(processor_set_t pset)358 pset_available_cpumap(processor_set_t pset)
359 {
360 	return pset->cpu_available_map & pset->recommended_bitmask;
361 }
362 
363 int
pset_available_cpu_count(processor_set_t pset)364 pset_available_cpu_count(processor_set_t pset)
365 {
366 	return bit_count(pset_available_cpumap(pset));
367 }
368 
369 bool
pset_is_recommended(processor_set_t pset)370 pset_is_recommended(processor_set_t pset)
371 {
372 	if (!pset) {
373 		return false;
374 	}
375 	return pset_available_cpu_count(pset) > 0;
376 }
377 
378 bool
pset_type_is_recommended(processor_set_t pset)379 pset_type_is_recommended(processor_set_t pset)
380 {
381 	if (!pset) {
382 		return false;
383 	}
384 	pset_map_t recommended_psets = os_atomic_load(&pset->node->pset_recommended_map, relaxed);
385 	return bit_count(recommended_psets) > 0;
386 }
387 
388 void
sched_update_pset_load_average(processor_set_t pset,uint64_t curtime)389 sched_update_pset_load_average(processor_set_t pset, uint64_t curtime)
390 {
391 	(void)pset;
392 	(void)curtime;
393 }
394 
395 int
rt_runq_count(processor_set_t pset)396 rt_runq_count(processor_set_t pset)
397 {
398 	(void)pset;
399 	return 0;
400 }
401