1 /*
2 * Copyright (c) 2009-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
34
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
38
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/cpu_number.h>
42 #include <kern/cpu_data.h>
43 #include <kern/debug.h>
44 #include <kern/macro_help.h>
45 #include <kern/machine.h>
46 #include <kern/misc_protos.h>
47 #include <kern/processor.h>
48 #include <kern/queue.h>
49 #include <kern/sched.h>
50 #include <kern/sched_prim.h>
51 #include <kern/syscall_subr.h>
52 #include <kern/task.h>
53 #include <kern/thread.h>
54
55 #include <vm/pmap.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_map.h>
58
59 #include <mach/sdt.h>
60
61 #include <sys/kdebug.h>
62
63 #if defined(CONFIG_SCHED_GRRR_CORE)
64
65 static void
66 grrr_priority_mapping_init(void);
67
68 static boolean_t
69 grrr_enqueue(
70 grrr_run_queue_t rq,
71 thread_t thread);
72
73 static thread_t
74 grrr_select(
75 grrr_run_queue_t rq);
76
77 static void
78 grrr_remove(
79 grrr_run_queue_t rq,
80 thread_t thread);
81
82
83 static void
84 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
85 grrr_group_t group);
86
87 static void
88 grrr_rescale_work(grrr_run_queue_t rq);
89
90 static void
91 grrr_runqueue_init(grrr_run_queue_t runq);
92
93 /* Map Mach priorities to ones suitable for proportional sharing */
94 static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
95
96 /* Map each proportional priority to its group */
97 static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
98
99 uint32_t grrr_rescale_tick;
100
101 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
102
103 #if defined(CONFIG_SCHED_GRRR)
104
105 static void
106 sched_grrr_init(void);
107
108 static void
109 sched_grrr_timebase_init(void);
110
111 static void
112 sched_grrr_processor_init(processor_t processor);
113
114 static void
115 sched_grrr_pset_init(processor_set_t pset);
116
117 static void
118 sched_grrr_maintenance_continuation(void);
119
120 static thread_t
121 sched_grrr_choose_thread(processor_t processor,
122 int priority,
123 ast_t reason);
124
125 static thread_t
126 sched_grrr_steal_thread(processor_set_t pset);
127
128 static int
129 sched_grrr_compute_priority(thread_t thread);
130
131 static processor_t
132 sched_grrr_choose_processor( processor_set_t pset,
133 processor_t processor,
134 thread_t thread);
135
136 static boolean_t
137 sched_grrr_processor_enqueue(
138 processor_t processor,
139 thread_t thread,
140 sched_options_t options);
141
142 static void
143 sched_grrr_processor_queue_shutdown(
144 processor_t processor);
145
146 static boolean_t
147 sched_grrr_processor_queue_remove(
148 processor_t processor,
149 thread_t thread);
150
151 static boolean_t
152 sched_grrr_processor_queue_empty(processor_t processor);
153
154 static boolean_t
155 sched_grrr_processor_queue_has_priority(processor_t processor,
156 int priority,
157 boolean_t gte);
158
159 static boolean_t
160 sched_grrr_priority_is_urgent(int priority);
161
162 static ast_t
163 sched_grrr_processor_csw_check(processor_t processor);
164
165 static uint32_t
166 sched_grrr_initial_quantum_size(thread_t thread);
167
168 static sched_mode_t
169 sched_grrr_initial_thread_sched_mode(task_t parent_task);
170
171 static boolean_t
172 sched_grrr_can_update_priority(thread_t thread);
173
174 static void
175 sched_grrr_update_priority(thread_t thread);
176
177 static void
178 sched_grrr_lightweight_update_priority(thread_t thread);
179
180 static int
181 sched_grrr_processor_runq_count(processor_t processor);
182
183 static uint64_t
184 sched_grrr_processor_runq_stats_count_sum(processor_t processor);
185
186 static int
187 sched_grrr_processor_bound_count(processor_t processor);
188
189 static void
190 sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
191
192 const struct sched_dispatch_table sched_grrr_dispatch = {
193 .sched_name = "grrr",
194 .init = sched_grrr_init,
195 .timebase_init = sched_grrr_timebase_init,
196 .processor_init = sched_grrr_processor_init,
197 .pset_init = sched_grrr_pset_init,
198 .maintenance_continuation = sched_grrr_maintenance_continuation,
199 .choose_thread = sched_grrr_choose_thread,
200 .steal_thread_enabled = sched_steal_thread_DISABLED,
201 .steal_thread = sched_grrr_steal_thread,
202 .compute_timeshare_priority = sched_grrr_compute_priority,
203 .choose_node = sched_choose_node,
204 .choose_processor = sched_grrr_choose_processor,
205 .processor_enqueue = sched_grrr_processor_enqueue,
206 .processor_queue_shutdown = sched_grrr_processor_queue_shutdown,
207 .processor_queue_remove = sched_grrr_processor_queue_remove,
208 .processor_queue_empty = sched_grrr_processor_queue_empty,
209 .priority_is_urgent = sched_grrr_priority_is_urgent,
210 .processor_csw_check = sched_grrr_processor_csw_check,
211 .processor_queue_has_priority = sched_grrr_processor_queue_has_priority,
212 .initial_quantum_size = sched_grrr_initial_quantum_size,
213 .initial_thread_sched_mode = sched_grrr_initial_thread_sched_mode,
214 .can_update_priority = sched_grrr_can_update_priority,
215 .update_priority = sched_grrr_update_priority,
216 .lightweight_update_priority = sched_grrr_lightweight_update_priority,
217 .quantum_expire = sched_default_quantum_expire,
218 .processor_runq_count = sched_grrr_processor_runq_count,
219 .processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
220 .processor_bound_count = sched_grrr_processor_bound_count,
221 .thread_update_scan = sched_grrr_thread_update_scan,
222 .multiple_psets_enabled = TRUE,
223 .sched_groups_enabled = FALSE,
224 .avoid_processor_enabled = FALSE,
225 .thread_avoid_processor = NULL,
226 .processor_balance = sched_SMT_balance,
227
228 .rt_runq = sched_rtlocal_runq,
229 .rt_init = sched_rtlocal_init,
230 .rt_queue_shutdown = sched_rtlocal_queue_shutdown,
231 .rt_runq_scan = sched_rtlocal_runq_scan,
232 .rt_runq_count_sum = sched_rtlocal_runq_count_sum,
233 .rt_steal_thread = sched_rtlocal_steal_thread,
234
235 .qos_max_parallelism = sched_qos_max_parallelism,
236 .check_spill = sched_check_spill,
237 .ipi_policy = sched_ipi_policy,
238 .thread_should_yield = sched_thread_should_yield,
239 .run_count_incr = sched_run_incr,
240 .run_count_decr = sched_run_decr,
241 .update_thread_bucket = sched_update_thread_bucket,
242 .pset_made_schedulable = sched_pset_made_schedulable,
243 .cpu_init_completed = NULL,
244 .thread_eligible_for_pset = NULL,
245 };
246
247 extern int max_unsafe_rt_quanta;
248 extern int max_unsafe_failsafe_quanta;
249
250 static uint32_t grrr_quantum_us;
251 static uint32_t grrr_quantum;
252
253 static uint64_t sched_grrr_tick_deadline;
254
255 static void
sched_grrr_init(void)256 sched_grrr_init(void)
257 {
258 if (default_preemption_rate < 1) {
259 default_preemption_rate = 100;
260 }
261 grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
262
263 printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
264
265 grrr_priority_mapping_init();
266 }
267
268 static void
sched_grrr_timebase_init(void)269 sched_grrr_timebase_init(void)
270 {
271 uint64_t abstime;
272
273 /* standard timeslicing quantum */
274 clock_interval_to_absolutetime_interval(
275 grrr_quantum_us, NSEC_PER_USEC, &abstime);
276 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
277 grrr_quantum = (uint32_t)abstime;
278
279 thread_depress_time = 1 * grrr_quantum;
280 default_timeshare_computation = grrr_quantum / 2;
281 default_timeshare_constraint = grrr_quantum;
282
283 sched_set_max_unsafe_rt_quanta(max_unsafe_rt_quanta);
284 sched_set_max_unsafe_fixed_quanta(max_unsafe_rt_quanta);
285 }
286
287 static void
sched_grrr_processor_init(processor_t processor)288 sched_grrr_processor_init(processor_t processor)
289 {
290 grrr_runqueue_init(&processor->grrr_runq);
291 }
292
293 static void
sched_grrr_pset_init(processor_set_t pset __unused)294 sched_grrr_pset_init(processor_set_t pset __unused)
295 {
296 }
297
298 static void
sched_grrr_maintenance_continuation(void)299 sched_grrr_maintenance_continuation(void)
300 {
301 uint64_t abstime = mach_absolute_time();
302
303 grrr_rescale_tick++;
304
305 /*
306 * Compute various averages.
307 */
308 compute_averages(1);
309
310 if (sched_grrr_tick_deadline == 0) {
311 sched_grrr_tick_deadline = abstime;
312 }
313
314 clock_deadline_for_periodic_event(10 * sched_one_second_interval, abstime,
315 &sched_grrr_tick_deadline);
316
317 assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
318 thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
319 /*NOTREACHED*/
320 }
321
322 static thread_t
sched_grrr_choose_thread(processor_t processor,int priority __unused,ast_t reason __unused)323 sched_grrr_choose_thread(processor_t processor,
324 int priority __unused,
325 ast_t reason __unused)
326 {
327 grrr_run_queue_t rq = &processor->grrr_runq;
328
329 return grrr_select(rq);
330 }
331
332 static thread_t
sched_grrr_steal_thread(processor_set_t pset)333 sched_grrr_steal_thread(processor_set_t pset)
334 {
335 pset_unlock(pset);
336
337 return THREAD_NULL;
338 }
339
340 static int
sched_grrr_compute_priority(thread_t thread)341 sched_grrr_compute_priority(thread_t thread)
342 {
343 return thread->base_pri;
344 }
345
346 static processor_t
sched_grrr_choose_processor(processor_set_t pset,processor_t processor,thread_t thread)347 sched_grrr_choose_processor( processor_set_t pset,
348 processor_t processor,
349 thread_t thread)
350 {
351 return choose_processor(pset, processor, thread);
352 }
353
354 static boolean_t
sched_grrr_processor_enqueue(processor_t processor,thread_t thread,sched_options_t options __unused)355 sched_grrr_processor_enqueue(
356 processor_t processor,
357 thread_t thread,
358 sched_options_t options __unused)
359 {
360 grrr_run_queue_t rq = &processor->grrr_runq;
361 boolean_t result;
362
363 result = grrr_enqueue(rq, thread);
364
365 thread->runq = processor;
366
367 return result;
368 }
369
370 static void
sched_grrr_processor_queue_shutdown(processor_t processor)371 sched_grrr_processor_queue_shutdown(
372 processor_t processor)
373 {
374 processor_set_t pset = processor->processor_set;
375 thread_t thread;
376 queue_head_t tqueue, bqueue;
377
378 queue_init(&tqueue);
379 queue_init(&bqueue);
380
381 while ((thread = sched_grrr_choose_thread(processor, IDLEPRI, AST_NONE)) != THREAD_NULL) {
382 if (thread->bound_processor == PROCESSOR_NULL) {
383 enqueue_tail(&tqueue, (queue_entry_t)thread);
384 } else {
385 enqueue_tail(&bqueue, (queue_entry_t)thread);
386 }
387 }
388
389 while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) {
390 sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
391 }
392
393 pset_unlock(pset);
394
395 while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) {
396 thread_lock(thread);
397
398 thread_setrun(thread, SCHED_TAILQ);
399
400 thread_unlock(thread);
401 }
402 }
403
404 static boolean_t
sched_grrr_processor_queue_remove(processor_t processor,thread_t thread)405 sched_grrr_processor_queue_remove(
406 processor_t processor,
407 thread_t thread)
408 {
409 processor_set_t pset = processor->processor_set;
410
411 pset_lock(pset);
412
413 if (processor == thread->runq) {
414 /*
415 * Thread is on a run queue and we have a lock on
416 * that run queue.
417 */
418 grrr_run_queue_t rq = &processor->grrr_runq;
419
420 grrr_remove(rq, thread);
421 } else {
422 /*
423 * The thread left the run queue before we could
424 * lock the run queue.
425 */
426 assert(thread->runq == PROCESSOR_NULL);
427 processor = PROCESSOR_NULL;
428 }
429
430 pset_unlock(pset);
431
432 return processor != PROCESSOR_NULL;
433 }
434
435 static boolean_t
sched_grrr_processor_queue_empty(processor_t processor __unused)436 sched_grrr_processor_queue_empty(processor_t processor __unused)
437 {
438 boolean_t result;
439
440 result = (processor->grrr_runq.count == 0);
441
442 return result;
443 }
444
445 static boolean_t
sched_grrr_processor_queue_has_priority(processor_t processor,int priority,boolean_t gte __unused)446 sched_grrr_processor_queue_has_priority(processor_t processor,
447 int priority,
448 boolean_t gte __unused)
449 {
450 grrr_run_queue_t rq = &processor->grrr_runq;
451 unsigned int i;
452
453 i = grrr_group_mapping[grrr_priority_mapping[priority]];
454 for (; i < NUM_GRRR_GROUPS; i++) {
455 if (rq->groups[i].count > 0) {
456 return TRUE;
457 }
458 }
459
460 return FALSE;
461 }
462
463 /* Implement sched_preempt_pri in code */
464 static boolean_t
sched_grrr_priority_is_urgent(int priority)465 sched_grrr_priority_is_urgent(int priority)
466 {
467 if (priority <= BASEPRI_FOREGROUND) {
468 return FALSE;
469 }
470
471 if (priority < MINPRI_KERNEL) {
472 return TRUE;
473 }
474
475 if (priority >= BASEPRI_PREEMPT) {
476 return TRUE;
477 }
478
479 return FALSE;
480 }
481
482 static ast_t
sched_grrr_processor_csw_check(processor_t processor)483 sched_grrr_processor_csw_check(processor_t processor)
484 {
485 int count;
486
487 count = sched_grrr_processor_runq_count(processor);
488
489 if (count > 0) {
490 return AST_PREEMPT;
491 }
492
493 return AST_NONE;
494 }
495
496 static uint32_t
sched_grrr_initial_quantum_size(thread_t thread __unused)497 sched_grrr_initial_quantum_size(thread_t thread __unused)
498 {
499 return grrr_quantum;
500 }
501
502 static sched_mode_t
sched_grrr_initial_thread_sched_mode(task_t parent_task)503 sched_grrr_initial_thread_sched_mode(task_t parent_task)
504 {
505 if (parent_task == kernel_task) {
506 return TH_MODE_FIXED;
507 } else {
508 return TH_MODE_TIMESHARE;
509 }
510 }
511
512 static boolean_t
sched_grrr_can_update_priority(thread_t thread __unused)513 sched_grrr_can_update_priority(thread_t thread __unused)
514 {
515 return FALSE;
516 }
517
518 static void
sched_grrr_update_priority(thread_t thread __unused)519 sched_grrr_update_priority(thread_t thread __unused)
520 {
521 return;
522 }
523
524 static void
sched_grrr_lightweight_update_priority(thread_t thread __unused)525 sched_grrr_lightweight_update_priority(thread_t thread __unused)
526 {
527 return;
528 }
529
530 static int
sched_grrr_processor_runq_count(processor_t processor)531 sched_grrr_processor_runq_count(processor_t processor)
532 {
533 return processor->grrr_runq.count;
534 }
535
536 static uint64_t
sched_grrr_processor_runq_stats_count_sum(processor_t processor)537 sched_grrr_processor_runq_stats_count_sum(processor_t processor)
538 {
539 return processor->grrr_runq.runq_stats.count_sum;
540 }
541
542 static int
sched_grrr_processor_bound_count(__unused processor_t processor)543 sched_grrr_processor_bound_count(__unused processor_t processor)
544 {
545 return 0;
546 }
547
548 static void
sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)549 sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)
550 {
551 return;
552 }
553
554 #endif /* defined(CONFIG_SCHED_GRRR) */
555
556 #if defined(CONFIG_SCHED_GRRR_CORE)
557
558 static void
grrr_priority_mapping_init(void)559 grrr_priority_mapping_init(void)
560 {
561 unsigned int i;
562
563 /* Map 0->0 up to 10->20 */
564 for (i = 0; i <= 10; i++) {
565 grrr_priority_mapping[i] = (grrr_proportional_priority_t)(2 * i);
566 }
567
568 /* Map user priorities 11->33 up to 51 -> 153 */
569 for (i = 11; i <= 51; i++) {
570 grrr_priority_mapping[i] = (grrr_proportional_priority_t)(3 * i);
571 }
572
573 /* Map high priorities 52->180 up to 127->255 */
574 for (i = 52; i <= 127; i++) {
575 grrr_priority_mapping[i] = (grrr_proportional_priority_t)(128 + i);
576 }
577
578 for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
579 #if 0
580 unsigned j, k;
581 /* Calculate log(i); */
582 for (j = 0, k = 1; k <= i; j++, k *= 2) {
583 ;
584 }
585 #endif
586
587 /* Groups of 4 */
588 grrr_group_mapping[i] = (grrr_group_index_t)(i >> 2);
589 }
590 }
591
592 static thread_t
grrr_intragroup_schedule(grrr_group_t group)593 grrr_intragroup_schedule(grrr_group_t group)
594 {
595 thread_t thread;
596
597 if (group->count == 0) {
598 return THREAD_NULL;
599 }
600
601 thread = group->current_client;
602 if (thread == THREAD_NULL) {
603 thread = (thread_t)(void *)queue_first(&group->clients);
604 }
605
606 if (1 /* deficit */) {
607 group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread);
608 if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
609 group->current_client = (thread_t)(void *)queue_first(&group->clients);
610 }
611
612 thread = group->current_client;
613 }
614
615 return thread;
616 }
617
618 static thread_t
grrr_intergroup_schedule(grrr_run_queue_t rq)619 grrr_intergroup_schedule(grrr_run_queue_t rq)
620 {
621 thread_t thread;
622 grrr_group_t group;
623
624 if (rq->count == 0) {
625 return THREAD_NULL;
626 }
627
628 group = rq->current_group;
629
630 if (group == GRRR_GROUP_NULL) {
631 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
632 }
633
634 thread = grrr_intragroup_schedule(group);
635
636 if ((group->work >= (UINT32_MAX - 256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
637 grrr_rescale_work(rq);
638 }
639 group->work++;
640
641 if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) {
642 /* last group, go back to beginning */
643 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
644 } else {
645 grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group);
646 uint64_t orderleft, orderright;
647
648 /*
649 * The well-ordering condition for intergroup selection is:
650 *
651 * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight)
652 *
653 * Multiply both sides by their denominators to avoid division
654 *
655 */
656 orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight);
657 orderright = (nextgroup->work + 1) * ((uint64_t)group->weight);
658 if (orderleft > orderright) {
659 group = nextgroup;
660 } else {
661 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
662 }
663 }
664
665 rq->current_group = group;
666
667 return thread;
668 }
669
670 static void
grrr_runqueue_init(grrr_run_queue_t runq)671 grrr_runqueue_init(grrr_run_queue_t runq)
672 {
673 grrr_group_index_t index;
674
675 runq->count = 0;
676
677 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
678 unsigned int prisearch;
679
680 for (prisearch = 0;
681 prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
682 prisearch++) {
683 if (grrr_group_mapping[prisearch] == index) {
684 runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
685 break;
686 }
687 }
688
689 runq->groups[index].index = index;
690
691 queue_init(&runq->groups[index].clients);
692 runq->groups[index].count = 0;
693 runq->groups[index].weight = 0;
694 runq->groups[index].work = 0;
695 runq->groups[index].current_client = THREAD_NULL;
696 }
697
698 queue_init(&runq->sorted_group_list);
699 runq->weight = 0;
700 runq->current_group = GRRR_GROUP_NULL;
701 }
702
703 static void
grrr_rescale_work(grrr_run_queue_t rq)704 grrr_rescale_work(grrr_run_queue_t rq)
705 {
706 grrr_group_index_t index;
707
708 /* avoid overflow by scaling by 1/8th */
709 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
710 rq->groups[index].work >>= 3;
711 }
712
713 rq->last_rescale_tick = grrr_rescale_tick;
714 }
715
716 static boolean_t
grrr_enqueue(grrr_run_queue_t rq,thread_t thread)717 grrr_enqueue(
718 grrr_run_queue_t rq,
719 thread_t thread)
720 {
721 grrr_proportional_priority_t gpriority;
722 grrr_group_index_t gindex;
723 grrr_group_t group;
724
725 gpriority = grrr_priority_mapping[thread->sched_pri];
726 gindex = grrr_group_mapping[gpriority];
727 group = &rq->groups[gindex];
728
729 if (group->count == 0) {
730 /* Empty group, this is the first client */
731 enqueue_tail(&group->clients, (queue_entry_t)thread);
732 group->count = 1;
733 group->weight = gpriority;
734 group->current_client = thread;
735 } else {
736 /* Insert before the current client */
737 if (group->current_client == THREAD_NULL ||
738 queue_first(&group->clients) == (queue_entry_t)group->current_client) {
739 enqueue_head(&group->clients, (queue_entry_t)thread);
740 } else {
741 insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
742 }
743 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
744 group->count++;
745 group->weight += gpriority;
746
747 /* Since there was already a client, this is on the per-processor sorted list already */
748 remqueue((queue_entry_t)group);
749 }
750
751 grrr_sorted_list_insert_group(rq, group);
752
753 rq->count++;
754 rq->weight += gpriority;
755
756 return FALSE;
757 }
758
759 static thread_t
grrr_select(grrr_run_queue_t rq)760 grrr_select(grrr_run_queue_t rq)
761 {
762 thread_t thread;
763
764 thread = grrr_intergroup_schedule(rq);
765 if (thread != THREAD_NULL) {
766 grrr_proportional_priority_t gpriority;
767 grrr_group_index_t gindex;
768 grrr_group_t group;
769
770 gpriority = grrr_priority_mapping[thread->sched_pri];
771 gindex = grrr_group_mapping[gpriority];
772 group = &rq->groups[gindex];
773
774 remqueue((queue_entry_t)thread);
775 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
776 group->count--;
777 group->weight -= gpriority;
778 if (group->current_client == thread) {
779 group->current_client = THREAD_NULL;
780 }
781
782 remqueue((queue_entry_t)group);
783 if (group->count == 0) {
784 if (rq->current_group == group) {
785 rq->current_group = GRRR_GROUP_NULL;
786 }
787 } else {
788 /* Need to re-insert in sorted location */
789 grrr_sorted_list_insert_group(rq, group);
790 }
791
792 rq->count--;
793 rq->weight -= gpriority;
794
795 thread->runq = PROCESSOR_NULL;
796 }
797
798 return thread;
799 }
800
801 static void
grrr_remove(grrr_run_queue_t rq,thread_t thread)802 grrr_remove(
803 grrr_run_queue_t rq,
804 thread_t thread)
805 {
806 grrr_proportional_priority_t gpriority;
807 grrr_group_index_t gindex;
808 grrr_group_t group;
809
810 gpriority = grrr_priority_mapping[thread->sched_pri];
811 gindex = grrr_group_mapping[gpriority];
812 group = &rq->groups[gindex];
813
814 remqueue((queue_entry_t)thread);
815 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
816 group->count--;
817 group->weight -= gpriority;
818 if (group->current_client == thread) {
819 group->current_client = THREAD_NULL;
820 }
821
822 remqueue((queue_entry_t)group);
823 if (group->count == 0) {
824 if (rq->current_group == group) {
825 rq->current_group = GRRR_GROUP_NULL;
826 }
827 } else {
828 /* Need to re-insert in sorted location */
829 grrr_sorted_list_insert_group(rq, group);
830 }
831
832 rq->count--;
833 rq->weight -= gpriority;
834
835 thread->runq = PROCESSOR_NULL;
836 }
837
838 static void
grrr_sorted_list_insert_group(grrr_run_queue_t rq,grrr_group_t group)839 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
840 grrr_group_t group)
841 {
842 /* Simple insertion sort */
843 if (queue_empty(&rq->sorted_group_list)) {
844 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
845 } else {
846 grrr_group_t search_group;
847
848 /* Start searching from the head (heaviest weight) for the first
849 * element less than us, so we can insert before it
850 */
851 search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
852 while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
853 if (search_group->weight < group->weight) {
854 /* we should be before this */
855 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
856 break;
857 }
858 if (search_group->weight == group->weight) {
859 /* Use group index as a tie breaker */
860 if (search_group->index < group->index) {
861 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
862 break;
863 }
864 }
865
866 /* otherwise, our weight is too small, keep going */
867 search_group = (grrr_group_t)queue_next((queue_entry_t)search_group);
868 }
869
870 if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
871 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
872 } else {
873 insque((queue_entry_t)group, (queue_entry_t)search_group);
874 }
875 }
876 }
877
878 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
879