1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67 /*
68 * This version of osfmk/kern/sched_prim.c contains the limited subset of dependencies strictly
69 * required by the Clutch runqueue test harness in tests/sched/sched_test_harness/sched_clutch_harness.c.
70 * The dependencies have been copied here in order to isolate maintenance of the Clutch test
71 * harness from the rest of xnu.
72 */
73 #if !SCHED_TEST_HARNESS
74 #error File only for use with the Clutch runqueue test harness
75 #endif
76
77 #include <kern/sched_prim.h>
78 #include <kern/processor.h>
79 #include <kern/sched_rt.h>
80
81 boolean_t
priority_is_urgent(int priority)82 priority_is_urgent(int priority)
83 {
84 if (priority <= BASEPRI_FOREGROUND) {
85 return FALSE;
86 }
87 if (priority < MINPRI_KERNEL) {
88 return TRUE;
89 }
90 if (priority >= BASEPRI_PREEMPT) {
91 return TRUE;
92 }
93 return FALSE;
94 }
95
96 /*
97 * run_queue_init:
98 *
99 * Initialize a run queue before first use.
100 */
101 void
run_queue_init(run_queue_t rq)102 run_queue_init(
103 run_queue_t rq)
104 {
105 rq->highq = NOPRI;
106 for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
107 rq->bitmap[i] = 0;
108 }
109 rq->urgency = rq->count = 0;
110 for (int i = 0; i < NRQS; i++) {
111 circle_queue_init(&rq->queues[i]);
112 }
113 }
114 /*
115 * run_queue_dequeue:
116 *
117 * Perform a dequeue operation on a run queue,
118 * and return the resulting thread.
119 *
120 * The run queue must be locked (see thread_run_queue_remove()
121 * for more info), and not empty.
122 */
123 thread_t
run_queue_dequeue(run_queue_t rq,sched_options_t options)124 run_queue_dequeue(
125 run_queue_t rq,
126 sched_options_t options)
127 {
128 thread_t thread;
129 circle_queue_t queue = &rq->queues[rq->highq];
130
131 if (options & SCHED_HEADQ) {
132 thread = cqe_dequeue_head(queue, struct thread, runq_links);
133 } else {
134 thread = cqe_dequeue_tail(queue, struct thread, runq_links);
135 }
136
137 assert(thread != THREAD_NULL);
138
139 thread_clear_runq(thread);
140 rq->count--;
141 if (SCHED(priority_is_urgent)(rq->highq)) {
142 rq->urgency--; assert(rq->urgency >= 0);
143 }
144 if (circle_queue_empty(queue)) {
145 bitmap_clear(rq->bitmap, rq->highq);
146 rq->highq = bitmap_first(rq->bitmap, NRQS);
147 }
148
149 return thread;
150 }
151
152 /*
153 * run_queue_enqueue:
154 *
155 * Perform a enqueue operation on a run queue.
156 *
157 * The run queue must be locked (see thread_run_queue_remove()
158 * for more info).
159 */
160 boolean_t
run_queue_enqueue(run_queue_t rq,thread_t thread,sched_options_t options)161 run_queue_enqueue(
162 run_queue_t rq,
163 thread_t thread,
164 sched_options_t options)
165 {
166 circle_queue_t queue = &rq->queues[thread->sched_pri];
167 boolean_t result = FALSE;
168
169 if (circle_queue_empty(queue)) {
170 circle_enqueue_tail(queue, &thread->runq_links);
171
172 rq_bitmap_set(rq->bitmap, thread->sched_pri);
173 if (thread->sched_pri > rq->highq) {
174 rq->highq = thread->sched_pri;
175 result = TRUE;
176 }
177 } else {
178 if (options & SCHED_TAILQ) {
179 circle_enqueue_tail(queue, &thread->runq_links);
180 } else {
181 circle_enqueue_head(queue, &thread->runq_links);
182 }
183 }
184 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
185 rq->urgency++;
186 }
187 rq->count++;
188
189 return result;
190 }
191
192 /*
193 * run_queue_remove:
194 *
195 * Remove a specific thread from a runqueue.
196 *
197 * The run queue must be locked.
198 */
199 void
run_queue_remove(run_queue_t rq,thread_t thread)200 run_queue_remove(
201 run_queue_t rq,
202 thread_t thread)
203 {
204 circle_queue_t queue = &rq->queues[thread->sched_pri];
205
206 thread_assert_runq_nonnull(thread);
207
208 circle_dequeue(queue, &thread->runq_links);
209 rq->count--;
210 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
211 rq->urgency--; assert(rq->urgency >= 0);
212 }
213
214 if (circle_queue_empty(queue)) {
215 /* update run queue status */
216 bitmap_clear(rq->bitmap, thread->sched_pri);
217 rq->highq = bitmap_first(rq->bitmap, NRQS);
218 }
219
220 thread_clear_runq(thread);
221 }
222
223 /*
224 * run_queue_peek
225 *
226 * Peek at the runq and return the highest
227 * priority thread from the runq.
228 *
229 * The run queue must be locked.
230 */
231 thread_t
run_queue_peek(run_queue_t rq)232 run_queue_peek(
233 run_queue_t rq)
234 {
235 if (rq->count > 0) {
236 circle_queue_t queue = &rq->queues[rq->highq];
237 thread_t thread = cqe_queue_first(queue, struct thread, runq_links);
238 return thread;
239 } else {
240 return THREAD_NULL;
241 }
242 }
243
244 uint32_t sched_run_buckets[TH_BUCKET_MAX];
245 _Atomic uint32_t sched_tick = 0;
246 int8_t sched_load_shifts[NRQS];
247
248 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
249 int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
250 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
251 static int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
252
253 uint32_t std_quantum;
254 uint32_t min_std_quantum;
255 static uint32_t bg_quantum;
256
257 uint32_t std_quantum_us;
258 static uint32_t bg_quantum_us;
259
260 uint32_t default_timeshare_constraint;
261 uint32_t sched_fixed_shift;
262
263 void
sched_timeshare_init(void)264 sched_timeshare_init(void)
265 {
266 /*
267 * Calculate the timeslicing quantum
268 * in us.
269 */
270 if (default_preemption_rate < 1) {
271 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
272 }
273 std_quantum_us = (1000 * 1000) / default_preemption_rate;
274
275 if (default_bg_preemption_rate < 1) {
276 default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
277 }
278 bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
279 }
280
281 void
sched_timeshare_timebase_init(void)282 sched_timeshare_timebase_init(void)
283 {
284 uint64_t abstime;
285 uint32_t shift;
286
287 /* standard timeslicing quantum */
288 clock_interval_to_absolutetime_interval(
289 std_quantum_us, NSEC_PER_USEC, &abstime);
290 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
291 std_quantum = (uint32_t)abstime;
292
293 /* smallest remaining quantum (250 us) */
294 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
295 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
296 min_std_quantum = (uint32_t)abstime;
297
298 /* quantum for background tasks */
299 clock_interval_to_absolutetime_interval(
300 bg_quantum_us, NSEC_PER_USEC, &abstime);
301 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
302 bg_quantum = (uint32_t)abstime;
303
304 /* scheduler tick interval */
305 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
306 NSEC_PER_USEC, &abstime);
307 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
308
309 /*
310 * Compute conversion factor from usage to
311 * timesharing priorities with 5/8 ** n aging.
312 */
313 abstime = (abstime * 5) / 3;
314 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
315 abstime >>= 1;
316 }
317 sched_fixed_shift = shift;
318
319 default_timeshare_constraint = std_quantum;
320 }
321
322 /* Mocked version */
323 processor_t
choose_processor(processor_set_t starting_pset,processor_t processor,thread_t thread,__unused sched_options_t * options)324 choose_processor(
325 processor_set_t starting_pset,
326 processor_t processor,
327 thread_t thread,
328 __unused sched_options_t *options)
329 {
330 (void)processor;
331
332 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
333 return sched_rt_choose_processor(starting_pset, processor, thread);
334 }
335
336 if (thread->bound_processor != NULL) {
337 return thread->bound_processor;
338 }
339 /* Choose the first-indexed processor in the pset */
340 return processor_array[starting_pset->cpu_set_low];
341 }
342
343 int
pset_available_cpu_count(processor_set_t pset)344 pset_available_cpu_count(processor_set_t pset)
345 {
346 return bit_count(pset_available_cpumap(pset));
347 }
348
349 bool
pset_is_recommended(processor_set_t pset)350 pset_is_recommended(processor_set_t pset)
351 {
352 if (!pset) {
353 return false;
354 }
355 return pset_available_cpu_count(pset) > 0;
356 }
357
358 bool
pset_type_is_recommended(processor_set_t pset)359 pset_type_is_recommended(processor_set_t pset)
360 {
361 if (!pset) {
362 return false;
363 }
364 pset_map_t recommended_psets = os_atomic_load(&pset->node->pset_recommended_map, relaxed);
365 return bit_count(recommended_psets) > 0;
366 }
367
368 void
thread_setrun(thread_t thread,sched_options_t options)369 thread_setrun(thread_t thread, sched_options_t options)
370 {
371 (void)thread;
372 (void)options;
373 assertf(false, "unimplemented");
374 }
375
376 void
pulled_thread_queue_enqueue(__unused struct pulled_thread_queue * threadq,__unused thread_t thread)377 pulled_thread_queue_enqueue(
378 __unused struct pulled_thread_queue * threadq,
379 __unused thread_t thread)
380 {
381 assertf(false, "unimplemented");
382 }
383
384 bool
sched_steal_thread_enabled(processor_set_t pset)385 sched_steal_thread_enabled(processor_set_t pset)
386 {
387 return bit_count(pset->node->pset_map) > 1;
388 }
389
390 int sched_rt_n_backup_processors = SCHED_DEFAULT_BACKUP_PROCESSORS;
391
392 void
sched_update_pset_avg_execution_time(__unused processor_set_t pset,__unused uint64_t execution_time,__unused uint64_t curtime,__unused sched_bucket_t sched_bucket)393 sched_update_pset_avg_execution_time(__unused processor_set_t pset, __unused uint64_t execution_time, __unused uint64_t curtime, __unused sched_bucket_t sched_bucket)
394 {
395 }
396
397 void
sched_update_pset_load_average(__unused processor_set_t pset,__unused uint64_t curtime)398 sched_update_pset_load_average(__unused processor_set_t pset, __unused uint64_t curtime)
399 {
400 }
401
402 bool
thread_is_eager_preempt(thread_t thread)403 thread_is_eager_preempt(thread_t thread)
404 {
405 return false;
406 }
407
408 perfcontrol_class_t
thread_get_perfcontrol_class(thread_t thread)409 thread_get_perfcontrol_class(thread_t thread)
410 {
411 return PERFCONTROL_CLASS_MAX;
412 }
413
414 thread_urgency_t
thread_get_urgency(thread_t thread,uint64_t * arg1,uint64_t * arg2)415 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
416 {
417 return THREAD_URGENCY_NONE;
418 }
419