xref: /xnu-10063.101.15/osfmk/kern/sched_proto.c (revision 94d3b452840153a99b38a3a9659680b2a006908e)
1 /*
2  * Copyright (c) 2009 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
34 
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
38 
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/cpu_number.h>
42 #include <kern/cpu_data.h>
43 #include <kern/debug.h>
44 #include <kern/macro_help.h>
45 #include <kern/machine.h>
46 #include <kern/misc_protos.h>
47 #include <kern/processor.h>
48 #include <kern/queue.h>
49 #include <kern/sched.h>
50 #include <kern/sched_prim.h>
51 #include <kern/syscall_subr.h>
52 #include <kern/task.h>
53 #include <kern/thread.h>
54 
55 #include <vm/pmap.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_map.h>
58 
59 #include <mach/sdt.h>
60 
61 #include <sys/kdebug.h>
62 
63 static void
64 sched_proto_init(void);
65 
66 static void
67 sched_proto_timebase_init(void);
68 
69 static void
70 sched_proto_processor_init(processor_t processor);
71 
72 static void
73 sched_proto_pset_init(processor_set_t pset);
74 
75 static void
76 sched_proto_maintenance_continuation(void);
77 
78 static thread_t
79 sched_proto_choose_thread(processor_t           processor,
80     int                            priority,
81     ast_t                          reason);
82 
83 static thread_t
84 sched_proto_steal_thread(processor_set_t                pset);
85 
86 static int
87 sched_proto_compute_priority(thread_t thread);
88 
89 static processor_t
90 sched_proto_choose_processor(   processor_set_t         pset,
91     processor_t                     processor,
92     thread_t                        thread);
93 
94 
95 static boolean_t
96 sched_proto_processor_enqueue(
97 	processor_t                    processor,
98 	thread_t                       thread,
99 	sched_options_t                options);
100 
101 static void
102 sched_proto_processor_queue_shutdown(
103 	processor_t                    processor);
104 
105 static boolean_t
106 sched_proto_processor_queue_remove(
107 	processor_t                 processor,
108 	thread_t                thread);
109 
110 static boolean_t
111 sched_proto_processor_queue_empty(processor_t           processor);
112 
113 static boolean_t
114 sched_proto_processor_queue_has_priority(processor_t            processor,
115     int                            priority,
116     boolean_t              gte);
117 
118 static boolean_t
119 sched_proto_priority_is_urgent(int priority);
120 
121 static ast_t
122 sched_proto_processor_csw_check(processor_t processor);
123 
124 static uint32_t
125 sched_proto_initial_quantum_size(thread_t thread);
126 
127 static sched_mode_t
128 sched_proto_initial_thread_sched_mode(task_t parent_task);
129 
130 static boolean_t
131 sched_proto_can_update_priority(thread_t        thread);
132 
133 static void
134 sched_proto_update_priority(thread_t    thread);
135 
136 static void
137 sched_proto_lightweight_update_priority(thread_t        thread);
138 
139 static void
140 sched_proto_quantum_expire(thread_t     thread);
141 
142 static int
143 sched_proto_processor_runq_count(processor_t   processor);
144 
145 static uint64_t
146 sched_proto_processor_runq_stats_count_sum(processor_t   processor);
147 
148 static int
149 sched_proto_processor_bound_count(processor_t   processor);
150 
151 static void
152 sched_proto_thread_update_scan(sched_update_scan_context_t scan_context);
153 
154 
155 const struct sched_dispatch_table sched_proto_dispatch = {
156 	.sched_name                                     = "proto",
157 	.init                                           = sched_proto_init,
158 	.timebase_init                                  = sched_proto_timebase_init,
159 	.processor_init                                 = sched_proto_processor_init,
160 	.pset_init                                      = sched_proto_pset_init,
161 	.maintenance_continuation                       = sched_proto_maintenance_continuation,
162 	.choose_thread                                  = sched_proto_choose_thread,
163 	.steal_thread_enabled                           = sched_steal_thread_DISABLED,
164 	.steal_thread                                   = sched_proto_steal_thread,
165 	.compute_timeshare_priority                     = sched_proto_compute_priority,
166 	.choose_node                                    = sched_choose_node,
167 	.choose_processor                               = sched_proto_choose_processor,
168 	.processor_enqueue                              = sched_proto_processor_enqueue,
169 	.processor_queue_shutdown                       = sched_proto_processor_queue_shutdown,
170 	.processor_queue_remove                         = sched_proto_processor_queue_remove,
171 	.processor_queue_empty                          = sched_proto_processor_queue_empty,
172 	.priority_is_urgent                             = sched_proto_priority_is_urgent,
173 	.processor_csw_check                            = sched_proto_processor_csw_check,
174 	.processor_queue_has_priority                   = sched_proto_processor_queue_has_priority,
175 	.initial_quantum_size                           = sched_proto_initial_quantum_size,
176 	.initial_thread_sched_mode                      = sched_proto_initial_thread_sched_mode,
177 	.can_update_priority                            = sched_proto_can_update_priority,
178 	.update_priority                                = sched_proto_update_priority,
179 	.lightweight_update_priority                    = sched_proto_lightweight_update_priority,
180 	.quantum_expire                                 = sched_proto_quantum_expire,
181 	.processor_runq_count                           = sched_proto_processor_runq_count,
182 	.processor_runq_stats_count_sum                 = sched_proto_processor_runq_stats_count_sum,
183 	.processor_bound_count                          = sched_proto_processor_bound_count,
184 	.thread_update_scan                             = sched_proto_thread_update_scan,
185 	.multiple_psets_enabled                         = TRUE,
186 	.sched_groups_enabled                           = FALSE,
187 	.avoid_processor_enabled                        = FALSE,
188 	.thread_avoid_processor                         = NULL,
189 	.processor_balance                              = sched_SMT_balance,
190 
191 	.rt_runq                                        = sched_rtlocal_runq,
192 	.rt_init                                        = sched_rtlocal_init,
193 	.rt_queue_shutdown                              = sched_rtlocal_queue_shutdown,
194 	.rt_runq_scan                                   = sched_rtlocal_runq_scan,
195 	.rt_runq_count_sum                              = sched_rtlocal_runq_count_sum,
196 	.rt_steal_thread                                = sched_rtlocal_steal_thread,
197 
198 	.qos_max_parallelism                            = sched_qos_max_parallelism,
199 	.check_spill                                    = sched_check_spill,
200 	.ipi_policy                                     = sched_ipi_policy,
201 	.thread_should_yield                            = sched_thread_should_yield,
202 	.run_count_incr                                 = sched_run_incr,
203 	.run_count_decr                                 = sched_run_decr,
204 	.update_thread_bucket                           = sched_update_thread_bucket,
205 	.pset_made_schedulable                          = sched_pset_made_schedulable,
206 	.cpu_init_completed                             = NULL,
207 	.thread_eligible_for_pset                       = NULL,
208 };
209 
210 static struct run_queue *global_runq;
211 static struct run_queue global_runq_storage;
212 
213 #define GLOBAL_RUNQ             ((processor_t)-2)
214 decl_simple_lock_data(static, global_runq_lock);
215 
216 extern int      max_unsafe_quanta;
217 
218 static uint32_t proto_quantum_us;
219 static uint32_t proto_quantum;
220 
221 static uint32_t runqueue_generation;
222 
223 static processor_t proto_processor;
224 
225 static uint64_t                 sched_proto_tick_deadline;
226 static uint32_t                 sched_proto_tick;
227 
228 static void
sched_proto_init(void)229 sched_proto_init(void)
230 {
231 	proto_quantum_us = 10 * 1000;
232 
233 	printf("standard proto timeslicing quantum is %d us\n", proto_quantum_us);
234 
235 	simple_lock_init(&global_runq_lock, 0);
236 	global_runq = &global_runq_storage;
237 	run_queue_init(global_runq);
238 	runqueue_generation = 0;
239 
240 	proto_processor = master_processor;
241 }
242 
243 static void
sched_proto_timebase_init(void)244 sched_proto_timebase_init(void)
245 {
246 	uint64_t        abstime;
247 
248 	/* standard timeslicing quantum */
249 	clock_interval_to_absolutetime_interval(
250 		proto_quantum_us, NSEC_PER_USEC, &abstime);
251 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
252 	proto_quantum = (uint32_t)abstime;
253 
254 	thread_depress_time = 1 * proto_quantum;
255 	default_timeshare_computation = proto_quantum / 2;
256 	default_timeshare_constraint = proto_quantum;
257 
258 	sched_set_max_unsafe_rt_quanta(max_unsafe_rt_quanta);
259 	sched_set_max_unsafe_fixed_quanta(max_unsafe_rt_quanta);
260 }
261 
262 static void
sched_proto_processor_init(processor_t processor __unused)263 sched_proto_processor_init(processor_t processor __unused)
264 {
265 	/* No per-processor state */
266 }
267 
268 static void
sched_proto_pset_init(processor_set_t pset __unused)269 sched_proto_pset_init(processor_set_t pset __unused)
270 {
271 }
272 
273 static void
sched_proto_maintenance_continuation(void)274 sched_proto_maintenance_continuation(void)
275 {
276 	uint64_t                        abstime = mach_absolute_time();
277 
278 	sched_proto_tick++;
279 
280 	/* Every 8 seconds, switch to another processor */
281 	if ((sched_proto_tick & 0x7) == 0) {
282 		processor_t new_processor;
283 
284 		new_processor = proto_processor->processor_list;
285 		if (new_processor == PROCESSOR_NULL) {
286 			proto_processor = master_processor;
287 		} else {
288 			proto_processor = new_processor;
289 		}
290 	}
291 
292 
293 	/*
294 	 *  Compute various averages.
295 	 */
296 	compute_averages(1);
297 
298 	if (sched_proto_tick_deadline == 0) {
299 		sched_proto_tick_deadline = abstime;
300 	}
301 
302 	clock_deadline_for_periodic_event(sched_one_second_interval, abstime,
303 	    &sched_proto_tick_deadline);
304 
305 	assert_wait_deadline((event_t)sched_proto_maintenance_continuation, THREAD_UNINT, sched_proto_tick_deadline);
306 	thread_block((thread_continue_t)sched_proto_maintenance_continuation);
307 	/*NOTREACHED*/
308 }
309 
310 static thread_t
sched_proto_choose_thread(processor_t processor,int priority,ast_t reason __unused)311 sched_proto_choose_thread(processor_t           processor,
312     int                           priority,
313     ast_t                         reason __unused)
314 {
315 	run_queue_t             rq = global_runq;
316 	circle_queue_t          queue;
317 	int                             pri, count;
318 	thread_t                thread;
319 
320 
321 	simple_lock(&global_runq_lock, LCK_GRP_NULL);
322 
323 	queue = rq->queues + rq->highq;
324 	pri = rq->highq;
325 	count = rq->count;
326 
327 	/*
328 	 * Since we don't depress priorities, a high priority thread
329 	 * may get selected over and over again. Put a runqueue
330 	 * generation number in the thread structure so that we
331 	 * can ensure that we've cycled through all runnable tasks
332 	 * before coming back to a high priority thread. This isn't
333 	 * perfect, especially if the number of runnable threads always
334 	 * stays high, but is a workable approximation
335 	 */
336 
337 	while (count > 0 && pri >= priority) {
338 		cqe_foreach_element_safe(thread, queue, runq_links) {
339 			if ((thread->bound_processor == PROCESSOR_NULL ||
340 			    thread->bound_processor == processor) &&
341 			    runqueue_generation != thread->runqueue_generation) {
342 				circle_dequeue(queue, &thread->runq_links);
343 
344 				thread->runq = PROCESSOR_NULL;
345 				thread->runqueue_generation = runqueue_generation;
346 				SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
347 				rq->count--;
348 				if (circle_queue_empty(queue)) {
349 					bitmap_clear(rq->bitmap, pri);
350 					rq->highq = bitmap_first(rq->bitmap, NRQS);
351 				}
352 
353 				simple_unlock(&global_runq_lock);
354 				return thread;
355 			}
356 			count--;
357 
358 			thread = (thread_t)queue_next((queue_entry_t)thread);
359 		}
360 
361 		queue--; pri--;
362 	}
363 
364 	runqueue_generation++;
365 
366 	simple_unlock(&global_runq_lock);
367 	return THREAD_NULL;
368 }
369 
370 static thread_t
sched_proto_steal_thread(processor_set_t pset)371 sched_proto_steal_thread(processor_set_t                pset)
372 {
373 	pset_unlock(pset);
374 
375 	return THREAD_NULL;
376 }
377 
378 static int
sched_proto_compute_priority(thread_t thread)379 sched_proto_compute_priority(thread_t thread)
380 {
381 	return thread->base_pri;
382 }
383 
384 static processor_t
sched_proto_choose_processor(processor_set_t pset,processor_t processor,thread_t thread __unused)385 sched_proto_choose_processor(   processor_set_t         pset,
386     processor_t                    processor,
387     thread_t                       thread __unused)
388 {
389 	processor = proto_processor;
390 
391 	/*
392 	 *	Check that the correct processor set is
393 	 *	returned locked.
394 	 */
395 	if (pset != processor->processor_set) {
396 		pset_unlock(pset);
397 
398 		pset = processor->processor_set;
399 		pset_lock(pset);
400 	}
401 
402 	return processor;
403 }
404 
405 static boolean_t
sched_proto_processor_enqueue(processor_t processor __unused,thread_t thread,sched_options_t options)406 sched_proto_processor_enqueue(
407 	processor_t                    processor __unused,
408 	thread_t                       thread,
409 	sched_options_t                options)
410 {
411 	run_queue_t             rq = global_runq;
412 	boolean_t               result;
413 
414 	simple_lock(&global_runq_lock, LCK_GRP_NULL);
415 	result = run_queue_enqueue(rq, thread, options);
416 	thread->runq = GLOBAL_RUNQ;
417 	simple_unlock(&global_runq_lock);
418 
419 	return result;
420 }
421 
422 static void
sched_proto_processor_queue_shutdown(processor_t processor)423 sched_proto_processor_queue_shutdown(
424 	processor_t                    processor)
425 {
426 	/* With a global runqueue, just stop choosing this processor */
427 	(void)processor;
428 }
429 
430 static boolean_t
sched_proto_processor_queue_remove(processor_t processor,thread_t thread)431 sched_proto_processor_queue_remove(
432 	processor_t                     processor,
433 	thread_t                thread)
434 {
435 	void *                  rqlock;
436 	run_queue_t             rq;
437 
438 	rqlock = &global_runq_lock;
439 	rq = global_runq;
440 
441 	simple_lock(rqlock, LCK_GRP_NULL);
442 	if (processor == thread->runq) {
443 		/*
444 		 *	Thread is on a run queue and we have a lock on
445 		 *	that run queue.
446 		 */
447 		run_queue_remove(rq, thread);
448 	} else {
449 		/*
450 		 *	The thread left the run queue before we could
451 		 *      lock the run queue.
452 		 */
453 		assert(thread->runq == PROCESSOR_NULL);
454 		processor = PROCESSOR_NULL;
455 	}
456 
457 	simple_unlock(rqlock);
458 
459 	return processor != PROCESSOR_NULL;
460 }
461 
462 static boolean_t
sched_proto_processor_queue_empty(processor_t processor __unused)463 sched_proto_processor_queue_empty(processor_t           processor __unused)
464 {
465 	boolean_t result;
466 
467 	result = (global_runq->count == 0);
468 
469 	return result;
470 }
471 
472 static boolean_t
sched_proto_processor_queue_has_priority(processor_t processor __unused,int priority,boolean_t gte)473 sched_proto_processor_queue_has_priority(processor_t            processor __unused,
474     int                            priority,
475     boolean_t              gte)
476 {
477 	boolean_t result;
478 
479 	simple_lock(&global_runq_lock, LCK_GRP_NULL);
480 
481 	if (gte) {
482 		result = global_runq->highq >= priority;
483 	} else {
484 		result = global_runq->highq > priority;
485 	}
486 
487 	simple_unlock(&global_runq_lock);
488 
489 	return result;
490 }
491 
492 /* Implement sched_preempt_pri in code */
493 static boolean_t
sched_proto_priority_is_urgent(int priority)494 sched_proto_priority_is_urgent(int priority)
495 {
496 	if (priority <= BASEPRI_FOREGROUND) {
497 		return FALSE;
498 	}
499 
500 	if (priority < MINPRI_KERNEL) {
501 		return TRUE;
502 	}
503 
504 	if (priority >= BASEPRI_PREEMPT) {
505 		return TRUE;
506 	}
507 
508 	return FALSE;
509 }
510 
511 static ast_t
sched_proto_processor_csw_check(processor_t processor)512 sched_proto_processor_csw_check(processor_t processor)
513 {
514 	run_queue_t             runq;
515 	int                             count, urgency;
516 
517 	runq = global_runq;
518 	count = runq->count;
519 	urgency = runq->urgency;
520 
521 	if (count > 0) {
522 		if (urgency > 0) {
523 			return AST_PREEMPT | AST_URGENT;
524 		}
525 
526 		return AST_PREEMPT;
527 	}
528 
529 	if (proto_processor != processor) {
530 		return AST_PREEMPT;
531 	}
532 
533 	return AST_NONE;
534 }
535 
536 static uint32_t
sched_proto_initial_quantum_size(thread_t thread __unused)537 sched_proto_initial_quantum_size(thread_t thread __unused)
538 {
539 	return proto_quantum;
540 }
541 
542 static sched_mode_t
sched_proto_initial_thread_sched_mode(task_t parent_task)543 sched_proto_initial_thread_sched_mode(task_t parent_task)
544 {
545 	if (parent_task == kernel_task) {
546 		return TH_MODE_FIXED;
547 	} else {
548 		return TH_MODE_TIMESHARE;
549 	}
550 }
551 
552 static boolean_t
sched_proto_can_update_priority(thread_t thread __unused)553 sched_proto_can_update_priority(thread_t        thread __unused)
554 {
555 	return FALSE;
556 }
557 
558 static void
sched_proto_update_priority(thread_t thread __unused)559 sched_proto_update_priority(thread_t    thread __unused)
560 {
561 }
562 
563 static void
sched_proto_lightweight_update_priority(thread_t thread __unused)564 sched_proto_lightweight_update_priority(thread_t        thread __unused)
565 {
566 }
567 
568 static void
sched_proto_quantum_expire(thread_t thread __unused)569 sched_proto_quantum_expire(thread_t    thread __unused)
570 {
571 }
572 
573 static int
sched_proto_processor_runq_count(processor_t processor)574 sched_proto_processor_runq_count(processor_t   processor)
575 {
576 	if (master_processor == processor) {
577 		return global_runq->count;
578 	} else {
579 		return 0;
580 	}
581 }
582 
583 static uint64_t
sched_proto_processor_runq_stats_count_sum(processor_t processor)584 sched_proto_processor_runq_stats_count_sum(processor_t   processor)
585 {
586 	if (master_processor == processor) {
587 		return global_runq->runq_stats.count_sum;
588 	} else {
589 		return 0ULL;
590 	}
591 }
592 
593 static int
sched_proto_processor_bound_count(__unused processor_t processor)594 sched_proto_processor_bound_count(__unused processor_t   processor)
595 {
596 	return 0;
597 }
598 
599 static void
sched_proto_thread_update_scan(__unused sched_update_scan_context_t scan_context)600 sched_proto_thread_update_scan(__unused sched_update_scan_context_t scan_context)
601 {
602 }
603