1 /*
2 * Copyright (c) 2009-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/policy.h>
32 #include <mach/sync_policy.h>
33 #include <mach/thread_act.h>
34
35 #include <machine/machine_routines.h>
36 #include <machine/sched_param.h>
37 #include <machine/machine_cpu.h>
38
39 #include <kern/kern_types.h>
40 #include <kern/clock.h>
41 #include <kern/cpu_number.h>
42 #include <kern/cpu_data.h>
43 #include <kern/debug.h>
44 #include <kern/macro_help.h>
45 #include <kern/machine.h>
46 #include <kern/misc_protos.h>
47 #include <kern/processor.h>
48 #include <kern/queue.h>
49 #include <kern/sched.h>
50 #include <kern/sched_prim.h>
51 #include <kern/syscall_subr.h>
52 #include <kern/task.h>
53 #include <kern/thread.h>
54
55 #include <vm/pmap.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_map.h>
58
59 #include <mach/sdt.h>
60
61 #include <sys/kdebug.h>
62
63 #if defined(CONFIG_SCHED_GRRR_CORE)
64
65 static void
66 grrr_priority_mapping_init(void);
67
68 static boolean_t
69 grrr_enqueue(
70 grrr_run_queue_t rq,
71 thread_t thread);
72
73 static thread_t
74 grrr_select(
75 grrr_run_queue_t rq);
76
77 static void
78 grrr_remove(
79 grrr_run_queue_t rq,
80 thread_t thread);
81
82
83 static void
84 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
85 grrr_group_t group);
86
87 static void
88 grrr_rescale_work(grrr_run_queue_t rq);
89
90 static void
91 grrr_runqueue_init(grrr_run_queue_t runq);
92
93 /* Map Mach priorities to ones suitable for proportional sharing */
94 static grrr_proportional_priority_t grrr_priority_mapping[NRQS];
95
96 /* Map each proportional priority to its group */
97 static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES];
98
99 uint32_t grrr_rescale_tick;
100
101 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
102
103 #if defined(CONFIG_SCHED_GRRR)
104
105 static void
106 sched_grrr_init(void);
107
108 static void
109 sched_grrr_timebase_init(void);
110
111 static void
112 sched_grrr_processor_init(processor_t processor);
113
114 static void
115 sched_grrr_pset_init(processor_set_t pset);
116
117 static void
118 sched_grrr_maintenance_continuation(void);
119
120 static thread_t
121 sched_grrr_choose_thread(processor_t processor,
122 int priority,
123 ast_t reason);
124
125 static thread_t
126 sched_grrr_steal_thread(processor_set_t pset);
127
128 static int
129 sched_grrr_compute_priority(thread_t thread);
130
131 static processor_t
132 sched_grrr_choose_processor( processor_set_t pset,
133 processor_t processor,
134 thread_t thread);
135
136 static boolean_t
137 sched_grrr_processor_enqueue(
138 processor_t processor,
139 thread_t thread,
140 sched_options_t options);
141
142 static void
143 sched_grrr_processor_queue_shutdown(
144 processor_t processor);
145
146 static boolean_t
147 sched_grrr_processor_queue_remove(
148 processor_t processor,
149 thread_t thread);
150
151 static boolean_t
152 sched_grrr_processor_queue_empty(processor_t processor);
153
154 static boolean_t
155 sched_grrr_processor_queue_has_priority(processor_t processor,
156 int priority,
157 boolean_t gte);
158
159 static boolean_t
160 sched_grrr_priority_is_urgent(int priority);
161
162 static ast_t
163 sched_grrr_processor_csw_check(processor_t processor);
164
165 static uint32_t
166 sched_grrr_initial_quantum_size(thread_t thread);
167
168 static sched_mode_t
169 sched_grrr_initial_thread_sched_mode(task_t parent_task);
170
171 static boolean_t
172 sched_grrr_can_update_priority(thread_t thread);
173
174 static void
175 sched_grrr_update_priority(thread_t thread);
176
177 static void
178 sched_grrr_lightweight_update_priority(thread_t thread);
179
180 static int
181 sched_grrr_processor_runq_count(processor_t processor);
182
183 static uint64_t
184 sched_grrr_processor_runq_stats_count_sum(processor_t processor);
185
186 static int
187 sched_grrr_processor_bound_count(processor_t processor);
188
189 static void
190 sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context);
191
192 const struct sched_dispatch_table sched_grrr_dispatch = {
193 .sched_name = "grrr",
194 .init = sched_grrr_init,
195 .timebase_init = sched_grrr_timebase_init,
196 .processor_init = sched_grrr_processor_init,
197 .pset_init = sched_grrr_pset_init,
198 .maintenance_continuation = sched_grrr_maintenance_continuation,
199 .choose_thread = sched_grrr_choose_thread,
200 .steal_thread_enabled = sched_steal_thread_DISABLED,
201 .steal_thread = sched_grrr_steal_thread,
202 .compute_timeshare_priority = sched_grrr_compute_priority,
203 .choose_node = sched_choose_node,
204 .choose_processor = sched_grrr_choose_processor,
205 .processor_enqueue = sched_grrr_processor_enqueue,
206 .processor_queue_shutdown = sched_grrr_processor_queue_shutdown,
207 .processor_queue_remove = sched_grrr_processor_queue_remove,
208 .processor_queue_empty = sched_grrr_processor_queue_empty,
209 .priority_is_urgent = sched_grrr_priority_is_urgent,
210 .processor_csw_check = sched_grrr_processor_csw_check,
211 .processor_queue_has_priority = sched_grrr_processor_queue_has_priority,
212 .initial_quantum_size = sched_grrr_initial_quantum_size,
213 .initial_thread_sched_mode = sched_grrr_initial_thread_sched_mode,
214 .can_update_priority = sched_grrr_can_update_priority,
215 .update_priority = sched_grrr_update_priority,
216 .lightweight_update_priority = sched_grrr_lightweight_update_priority,
217 .quantum_expire = sched_default_quantum_expire,
218 .processor_runq_count = sched_grrr_processor_runq_count,
219 .processor_runq_stats_count_sum = sched_grrr_processor_runq_stats_count_sum,
220 .processor_bound_count = sched_grrr_processor_bound_count,
221 .thread_update_scan = sched_grrr_thread_update_scan,
222 .multiple_psets_enabled = TRUE,
223 .sched_groups_enabled = FALSE,
224 .avoid_processor_enabled = FALSE,
225 .thread_avoid_processor = NULL,
226 .processor_balance = sched_SMT_balance,
227
228 .rt_runq = sched_rtlocal_runq,
229 .rt_init = sched_rtlocal_init,
230 .rt_queue_shutdown = sched_rtlocal_queue_shutdown,
231 .rt_runq_scan = sched_rtlocal_runq_scan,
232 .rt_runq_count_sum = sched_rtlocal_runq_count_sum,
233 .rt_steal_thread = sched_rtlocal_steal_thread,
234
235 .qos_max_parallelism = sched_qos_max_parallelism,
236 .check_spill = sched_check_spill,
237 .ipi_policy = sched_ipi_policy,
238 .thread_should_yield = sched_thread_should_yield,
239 .run_count_incr = sched_run_incr,
240 .run_count_decr = sched_run_decr,
241 .update_thread_bucket = sched_update_thread_bucket,
242 .pset_made_schedulable = sched_pset_made_schedulable,
243 .cpu_init_completed = NULL,
244 .thread_eligible_for_pset = NULL,
245 };
246
247 extern int max_unsafe_quanta;
248
249 static uint32_t grrr_quantum_us;
250 static uint32_t grrr_quantum;
251
252 static uint64_t sched_grrr_tick_deadline;
253
254 static void
sched_grrr_init(void)255 sched_grrr_init(void)
256 {
257 if (default_preemption_rate < 1) {
258 default_preemption_rate = 100;
259 }
260 grrr_quantum_us = (1000 * 1000) / default_preemption_rate;
261
262 printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us);
263
264 grrr_priority_mapping_init();
265 }
266
267 static void
sched_grrr_timebase_init(void)268 sched_grrr_timebase_init(void)
269 {
270 uint64_t abstime;
271
272 /* standard timeslicing quantum */
273 clock_interval_to_absolutetime_interval(
274 grrr_quantum_us, NSEC_PER_USEC, &abstime);
275 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
276 grrr_quantum = (uint32_t)abstime;
277
278 thread_depress_time = 1 * grrr_quantum;
279 default_timeshare_computation = grrr_quantum / 2;
280 default_timeshare_constraint = grrr_quantum;
281
282 max_unsafe_computation = max_unsafe_quanta * grrr_quantum;
283 sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum;
284 }
285
286 static void
sched_grrr_processor_init(processor_t processor)287 sched_grrr_processor_init(processor_t processor)
288 {
289 grrr_runqueue_init(&processor->grrr_runq);
290 }
291
292 static void
sched_grrr_pset_init(processor_set_t pset __unused)293 sched_grrr_pset_init(processor_set_t pset __unused)
294 {
295 }
296
297 static void
sched_grrr_maintenance_continuation(void)298 sched_grrr_maintenance_continuation(void)
299 {
300 uint64_t abstime = mach_absolute_time();
301
302 grrr_rescale_tick++;
303
304 /*
305 * Compute various averages.
306 */
307 compute_averages(1);
308
309 if (sched_grrr_tick_deadline == 0) {
310 sched_grrr_tick_deadline = abstime;
311 }
312
313 clock_deadline_for_periodic_event(10 * sched_one_second_interval, abstime,
314 &sched_grrr_tick_deadline);
315
316 assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline);
317 thread_block((thread_continue_t)sched_grrr_maintenance_continuation);
318 /*NOTREACHED*/
319 }
320
321 static thread_t
sched_grrr_choose_thread(processor_t processor,int priority __unused,ast_t reason __unused)322 sched_grrr_choose_thread(processor_t processor,
323 int priority __unused,
324 ast_t reason __unused)
325 {
326 grrr_run_queue_t rq = &processor->grrr_runq;
327
328 return grrr_select(rq);
329 }
330
331 static thread_t
sched_grrr_steal_thread(processor_set_t pset)332 sched_grrr_steal_thread(processor_set_t pset)
333 {
334 pset_unlock(pset);
335
336 return THREAD_NULL;
337 }
338
339 static int
sched_grrr_compute_priority(thread_t thread)340 sched_grrr_compute_priority(thread_t thread)
341 {
342 return thread->base_pri;
343 }
344
345 static processor_t
sched_grrr_choose_processor(processor_set_t pset,processor_t processor,thread_t thread)346 sched_grrr_choose_processor( processor_set_t pset,
347 processor_t processor,
348 thread_t thread)
349 {
350 return choose_processor(pset, processor, thread);
351 }
352
353 static boolean_t
sched_grrr_processor_enqueue(processor_t processor,thread_t thread,sched_options_t options __unused)354 sched_grrr_processor_enqueue(
355 processor_t processor,
356 thread_t thread,
357 sched_options_t options __unused)
358 {
359 grrr_run_queue_t rq = &processor->grrr_runq;
360 boolean_t result;
361
362 result = grrr_enqueue(rq, thread);
363
364 thread->runq = processor;
365
366 return result;
367 }
368
369 static void
sched_grrr_processor_queue_shutdown(processor_t processor)370 sched_grrr_processor_queue_shutdown(
371 processor_t processor)
372 {
373 processor_set_t pset = processor->processor_set;
374 thread_t thread;
375 queue_head_t tqueue, bqueue;
376
377 queue_init(&tqueue);
378 queue_init(&bqueue);
379
380 while ((thread = sched_grrr_choose_thread(processor, IDLEPRI, AST_NONE)) != THREAD_NULL) {
381 if (thread->bound_processor == PROCESSOR_NULL) {
382 enqueue_tail(&tqueue, (queue_entry_t)thread);
383 } else {
384 enqueue_tail(&bqueue, (queue_entry_t)thread);
385 }
386 }
387
388 while ((thread = (thread_t)(void *)dequeue_head(&bqueue)) != THREAD_NULL) {
389 sched_grrr_processor_enqueue(processor, thread, SCHED_TAILQ);
390 }
391
392 pset_unlock(pset);
393
394 while ((thread = (thread_t)(void *)dequeue_head(&tqueue)) != THREAD_NULL) {
395 thread_lock(thread);
396
397 thread_setrun(thread, SCHED_TAILQ);
398
399 thread_unlock(thread);
400 }
401 }
402
403 static boolean_t
sched_grrr_processor_queue_remove(processor_t processor,thread_t thread)404 sched_grrr_processor_queue_remove(
405 processor_t processor,
406 thread_t thread)
407 {
408 processor_set_t pset = processor->processor_set;
409
410 pset_lock(pset);
411
412 if (processor == thread->runq) {
413 /*
414 * Thread is on a run queue and we have a lock on
415 * that run queue.
416 */
417 grrr_run_queue_t rq = &processor->grrr_runq;
418
419 grrr_remove(rq, thread);
420 } else {
421 /*
422 * The thread left the run queue before we could
423 * lock the run queue.
424 */
425 assert(thread->runq == PROCESSOR_NULL);
426 processor = PROCESSOR_NULL;
427 }
428
429 pset_unlock(pset);
430
431 return processor != PROCESSOR_NULL;
432 }
433
434 static boolean_t
sched_grrr_processor_queue_empty(processor_t processor __unused)435 sched_grrr_processor_queue_empty(processor_t processor __unused)
436 {
437 boolean_t result;
438
439 result = (processor->grrr_runq.count == 0);
440
441 return result;
442 }
443
444 static boolean_t
sched_grrr_processor_queue_has_priority(processor_t processor,int priority,boolean_t gte __unused)445 sched_grrr_processor_queue_has_priority(processor_t processor,
446 int priority,
447 boolean_t gte __unused)
448 {
449 grrr_run_queue_t rq = &processor->grrr_runq;
450 unsigned int i;
451
452 i = grrr_group_mapping[grrr_priority_mapping[priority]];
453 for (; i < NUM_GRRR_GROUPS; i++) {
454 if (rq->groups[i].count > 0) {
455 return TRUE;
456 }
457 }
458
459 return FALSE;
460 }
461
462 /* Implement sched_preempt_pri in code */
463 static boolean_t
sched_grrr_priority_is_urgent(int priority)464 sched_grrr_priority_is_urgent(int priority)
465 {
466 if (priority <= BASEPRI_FOREGROUND) {
467 return FALSE;
468 }
469
470 if (priority < MINPRI_KERNEL) {
471 return TRUE;
472 }
473
474 if (priority >= BASEPRI_PREEMPT) {
475 return TRUE;
476 }
477
478 return FALSE;
479 }
480
481 static ast_t
sched_grrr_processor_csw_check(processor_t processor)482 sched_grrr_processor_csw_check(processor_t processor)
483 {
484 int count;
485
486 count = sched_grrr_processor_runq_count(processor);
487
488 if (count > 0) {
489 return AST_PREEMPT;
490 }
491
492 return AST_NONE;
493 }
494
495 static uint32_t
sched_grrr_initial_quantum_size(thread_t thread __unused)496 sched_grrr_initial_quantum_size(thread_t thread __unused)
497 {
498 return grrr_quantum;
499 }
500
501 static sched_mode_t
sched_grrr_initial_thread_sched_mode(task_t parent_task)502 sched_grrr_initial_thread_sched_mode(task_t parent_task)
503 {
504 if (parent_task == kernel_task) {
505 return TH_MODE_FIXED;
506 } else {
507 return TH_MODE_TIMESHARE;
508 }
509 }
510
511 static boolean_t
sched_grrr_can_update_priority(thread_t thread __unused)512 sched_grrr_can_update_priority(thread_t thread __unused)
513 {
514 return FALSE;
515 }
516
517 static void
sched_grrr_update_priority(thread_t thread __unused)518 sched_grrr_update_priority(thread_t thread __unused)
519 {
520 return;
521 }
522
523 static void
sched_grrr_lightweight_update_priority(thread_t thread __unused)524 sched_grrr_lightweight_update_priority(thread_t thread __unused)
525 {
526 return;
527 }
528
529 static int
sched_grrr_processor_runq_count(processor_t processor)530 sched_grrr_processor_runq_count(processor_t processor)
531 {
532 return processor->grrr_runq.count;
533 }
534
535 static uint64_t
sched_grrr_processor_runq_stats_count_sum(processor_t processor)536 sched_grrr_processor_runq_stats_count_sum(processor_t processor)
537 {
538 return processor->grrr_runq.runq_stats.count_sum;
539 }
540
541 static int
sched_grrr_processor_bound_count(__unused processor_t processor)542 sched_grrr_processor_bound_count(__unused processor_t processor)
543 {
544 return 0;
545 }
546
547 static void
sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)548 sched_grrr_thread_update_scan(__unused sched_update_scan_context_t scan_context)
549 {
550 return;
551 }
552
553 #endif /* defined(CONFIG_SCHED_GRRR) */
554
555 #if defined(CONFIG_SCHED_GRRR_CORE)
556
557 static void
grrr_priority_mapping_init(void)558 grrr_priority_mapping_init(void)
559 {
560 unsigned int i;
561
562 /* Map 0->0 up to 10->20 */
563 for (i = 0; i <= 10; i++) {
564 grrr_priority_mapping[i] = (grrr_proportional_priority_t)(2 * i);
565 }
566
567 /* Map user priorities 11->33 up to 51 -> 153 */
568 for (i = 11; i <= 51; i++) {
569 grrr_priority_mapping[i] = (grrr_proportional_priority_t)(3 * i);
570 }
571
572 /* Map high priorities 52->180 up to 127->255 */
573 for (i = 52; i <= 127; i++) {
574 grrr_priority_mapping[i] = (grrr_proportional_priority_t)(128 + i);
575 }
576
577 for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) {
578 #if 0
579 unsigned j, k;
580 /* Calculate log(i); */
581 for (j = 0, k = 1; k <= i; j++, k *= 2) {
582 ;
583 }
584 #endif
585
586 /* Groups of 4 */
587 grrr_group_mapping[i] = (grrr_group_index_t)(i >> 2);
588 }
589 }
590
591 static thread_t
grrr_intragroup_schedule(grrr_group_t group)592 grrr_intragroup_schedule(grrr_group_t group)
593 {
594 thread_t thread;
595
596 if (group->count == 0) {
597 return THREAD_NULL;
598 }
599
600 thread = group->current_client;
601 if (thread == THREAD_NULL) {
602 thread = (thread_t)(void *)queue_first(&group->clients);
603 }
604
605 if (1 /* deficit */) {
606 group->current_client = (thread_t)(void *)queue_next((queue_entry_t)thread);
607 if (queue_end(&group->clients, (queue_entry_t)group->current_client)) {
608 group->current_client = (thread_t)(void *)queue_first(&group->clients);
609 }
610
611 thread = group->current_client;
612 }
613
614 return thread;
615 }
616
617 static thread_t
grrr_intergroup_schedule(grrr_run_queue_t rq)618 grrr_intergroup_schedule(grrr_run_queue_t rq)
619 {
620 thread_t thread;
621 grrr_group_t group;
622
623 if (rq->count == 0) {
624 return THREAD_NULL;
625 }
626
627 group = rq->current_group;
628
629 if (group == GRRR_GROUP_NULL) {
630 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
631 }
632
633 thread = grrr_intragroup_schedule(group);
634
635 if ((group->work >= (UINT32_MAX - 256)) || (rq->last_rescale_tick != grrr_rescale_tick)) {
636 grrr_rescale_work(rq);
637 }
638 group->work++;
639
640 if (queue_end(&rq->sorted_group_list, queue_next((queue_entry_t)group))) {
641 /* last group, go back to beginning */
642 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
643 } else {
644 grrr_group_t nextgroup = (grrr_group_t)queue_next((queue_entry_t)group);
645 uint64_t orderleft, orderright;
646
647 /*
648 * The well-ordering condition for intergroup selection is:
649 *
650 * (group->work+1) / (nextgroup->work+1) > (group->weight) / (nextgroup->weight)
651 *
652 * Multiply both sides by their denominators to avoid division
653 *
654 */
655 orderleft = (group->work + 1) * ((uint64_t)nextgroup->weight);
656 orderright = (nextgroup->work + 1) * ((uint64_t)group->weight);
657 if (orderleft > orderright) {
658 group = nextgroup;
659 } else {
660 group = (grrr_group_t)queue_first(&rq->sorted_group_list);
661 }
662 }
663
664 rq->current_group = group;
665
666 return thread;
667 }
668
669 static void
grrr_runqueue_init(grrr_run_queue_t runq)670 grrr_runqueue_init(grrr_run_queue_t runq)
671 {
672 grrr_group_index_t index;
673
674 runq->count = 0;
675
676 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
677 unsigned int prisearch;
678
679 for (prisearch = 0;
680 prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES;
681 prisearch++) {
682 if (grrr_group_mapping[prisearch] == index) {
683 runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch;
684 break;
685 }
686 }
687
688 runq->groups[index].index = index;
689
690 queue_init(&runq->groups[index].clients);
691 runq->groups[index].count = 0;
692 runq->groups[index].weight = 0;
693 runq->groups[index].work = 0;
694 runq->groups[index].current_client = THREAD_NULL;
695 }
696
697 queue_init(&runq->sorted_group_list);
698 runq->weight = 0;
699 runq->current_group = GRRR_GROUP_NULL;
700 }
701
702 static void
grrr_rescale_work(grrr_run_queue_t rq)703 grrr_rescale_work(grrr_run_queue_t rq)
704 {
705 grrr_group_index_t index;
706
707 /* avoid overflow by scaling by 1/8th */
708 for (index = 0; index < NUM_GRRR_GROUPS; index++) {
709 rq->groups[index].work >>= 3;
710 }
711
712 rq->last_rescale_tick = grrr_rescale_tick;
713 }
714
715 static boolean_t
grrr_enqueue(grrr_run_queue_t rq,thread_t thread)716 grrr_enqueue(
717 grrr_run_queue_t rq,
718 thread_t thread)
719 {
720 grrr_proportional_priority_t gpriority;
721 grrr_group_index_t gindex;
722 grrr_group_t group;
723
724 gpriority = grrr_priority_mapping[thread->sched_pri];
725 gindex = grrr_group_mapping[gpriority];
726 group = &rq->groups[gindex];
727
728 if (group->count == 0) {
729 /* Empty group, this is the first client */
730 enqueue_tail(&group->clients, (queue_entry_t)thread);
731 group->count = 1;
732 group->weight = gpriority;
733 group->current_client = thread;
734 } else {
735 /* Insert before the current client */
736 if (group->current_client == THREAD_NULL ||
737 queue_first(&group->clients) == (queue_entry_t)group->current_client) {
738 enqueue_head(&group->clients, (queue_entry_t)thread);
739 } else {
740 insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client));
741 }
742 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
743 group->count++;
744 group->weight += gpriority;
745
746 /* Since there was already a client, this is on the per-processor sorted list already */
747 remqueue((queue_entry_t)group);
748 }
749
750 grrr_sorted_list_insert_group(rq, group);
751
752 rq->count++;
753 rq->weight += gpriority;
754
755 return FALSE;
756 }
757
758 static thread_t
grrr_select(grrr_run_queue_t rq)759 grrr_select(grrr_run_queue_t rq)
760 {
761 thread_t thread;
762
763 thread = grrr_intergroup_schedule(rq);
764 if (thread != THREAD_NULL) {
765 grrr_proportional_priority_t gpriority;
766 grrr_group_index_t gindex;
767 grrr_group_t group;
768
769 gpriority = grrr_priority_mapping[thread->sched_pri];
770 gindex = grrr_group_mapping[gpriority];
771 group = &rq->groups[gindex];
772
773 remqueue((queue_entry_t)thread);
774 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
775 group->count--;
776 group->weight -= gpriority;
777 if (group->current_client == thread) {
778 group->current_client = THREAD_NULL;
779 }
780
781 remqueue((queue_entry_t)group);
782 if (group->count == 0) {
783 if (rq->current_group == group) {
784 rq->current_group = GRRR_GROUP_NULL;
785 }
786 } else {
787 /* Need to re-insert in sorted location */
788 grrr_sorted_list_insert_group(rq, group);
789 }
790
791 rq->count--;
792 rq->weight -= gpriority;
793
794 thread->runq = PROCESSOR_NULL;
795 }
796
797 return thread;
798 }
799
800 static void
grrr_remove(grrr_run_queue_t rq,thread_t thread)801 grrr_remove(
802 grrr_run_queue_t rq,
803 thread_t thread)
804 {
805 grrr_proportional_priority_t gpriority;
806 grrr_group_index_t gindex;
807 grrr_group_t group;
808
809 gpriority = grrr_priority_mapping[thread->sched_pri];
810 gindex = grrr_group_mapping[gpriority];
811 group = &rq->groups[gindex];
812
813 remqueue((queue_entry_t)thread);
814 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
815 group->count--;
816 group->weight -= gpriority;
817 if (group->current_client == thread) {
818 group->current_client = THREAD_NULL;
819 }
820
821 remqueue((queue_entry_t)group);
822 if (group->count == 0) {
823 if (rq->current_group == group) {
824 rq->current_group = GRRR_GROUP_NULL;
825 }
826 } else {
827 /* Need to re-insert in sorted location */
828 grrr_sorted_list_insert_group(rq, group);
829 }
830
831 rq->count--;
832 rq->weight -= gpriority;
833
834 thread->runq = PROCESSOR_NULL;
835 }
836
837 static void
grrr_sorted_list_insert_group(grrr_run_queue_t rq,grrr_group_t group)838 grrr_sorted_list_insert_group(grrr_run_queue_t rq,
839 grrr_group_t group)
840 {
841 /* Simple insertion sort */
842 if (queue_empty(&rq->sorted_group_list)) {
843 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
844 } else {
845 grrr_group_t search_group;
846
847 /* Start searching from the head (heaviest weight) for the first
848 * element less than us, so we can insert before it
849 */
850 search_group = (grrr_group_t)queue_first(&rq->sorted_group_list);
851 while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
852 if (search_group->weight < group->weight) {
853 /* we should be before this */
854 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
855 break;
856 }
857 if (search_group->weight == group->weight) {
858 /* Use group index as a tie breaker */
859 if (search_group->index < group->index) {
860 search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group);
861 break;
862 }
863 }
864
865 /* otherwise, our weight is too small, keep going */
866 search_group = (grrr_group_t)queue_next((queue_entry_t)search_group);
867 }
868
869 if (queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) {
870 enqueue_tail(&rq->sorted_group_list, (queue_entry_t)group);
871 } else {
872 insque((queue_entry_t)group, (queue_entry_t)search_group);
873 }
874 }
875 }
876
877 #endif /* defined(CONFIG_SCHED_GRRR_CORE) */
878