Searched refs:SCHED (Results 1 – 15 of 15) sorted by relevance
| /xnu-11417.121.6/osfmk/kern/ |
| H A D | syscall_subr.c | 110 result = SCHED(thread_should_yield)(myprocessor, current_thread()); in swtch_continue() 127 if (!SCHED(thread_should_yield)(myprocessor, current_thread())) { in swtch() 147 result = SCHED(thread_should_yield)(myprocessor, current_thread()); in swtch_pri_continue() 164 if (!SCHED(thread_should_yield)(myprocessor, current_thread())) { in swtch_pri() 306 bool should_yield = SCHED(thread_should_yield)(current_processor(), current_thread()); in thread_switch() 607 if (!SCHED(thread_should_yield)(myprocessor, self)) { in thread_yield_internal()
|
| H A D | sched_prim.c | 149 return os_atomic_load(&SCHED(rt_runq)(pset)->count, relaxed); in rt_runq_count() 155 return os_atomic_load_wide(&SCHED(rt_runq)(pset)->earliest_deadline, relaxed); in rt_runq_earliest_deadline() 162 rt_queue_t rt_run_queue = SCHED(rt_runq)(pset); in rt_runq_priority() 246 return os_atomic_load(&SCHED(rt_runq)(pset)->constraint, relaxed) <= rt_constraint_threshold; in rt_runq_is_low_latency() 568 kprintf("Scheduler: Default of %s\n", SCHED(sched_name)); in sched_init() 585 strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string)); in sched_init() 591 SCHED(init)(); in sched_init() 592 SCHED(rt_init)(&pset0); in sched_init() 595 SCHED(pset_init)(&pset0); in sched_init() 596 SCHED(processor_init)(master_processor); in sched_init() [all …]
|
| H A D | priority.c | 200 if (SCHED(can_update_priority)(thread)) { in thread_quantum_expire() 201 SCHED(update_priority)(thread); in thread_quantum_expire() 203 SCHED(lightweight_update_priority)(thread); in thread_quantum_expire() 207 SCHED(quantum_expire)(thread); in thread_quantum_expire() 326 SCHED(update_thread_bucket)(thread); in sched_set_thread_base_priority() 391 priority = (int16_t)SCHED(compute_timeshare_priority)(thread); in thread_recompute_sched_pri() 966 SCHED(update_thread_bucket)(thread); in sched_set_thread_mode()
|
| H A D | sched_prim.h | 866 #define SCHED(f) (sched_edge_dispatch.f) macro 869 #define SCHED(f) (sched_amp_dispatch.f) macro 876 #define SCHED(f) (sched_clutch_dispatch.f) macro 879 #define SCHED(f) (sched_dualq_dispatch.f) macro
|
| H A D | processor.c | 310 SCHED(processor_init)(processor); in processor_init() 573 if (SCHED(multiple_psets_enabled) == FALSE) { in pset_create() 696 SCHED(pset_init)(pset); in pset_init() 697 SCHED(rt_init)(pset); in pset_init()
|
| H A D | host.c | 258 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL); in host_info() 1051 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor); in get_sched_statistics() 1066 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)(); in get_sched_statistics()
|
| H A D | thread.c | 1514 new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task); in thread_create_internal() 1987 if (SCHED(can_update_priority)(thread)) { in retrieve_thread_basic_info() 1988 SCHED(update_priority)(thread); in retrieve_thread_basic_info() 2168 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL); in thread_info_internal() 3705 return (uint64_t) (SCHED(initial_quantum_size)(thread) / 2); in thread_workq_quantum_size()
|
| H A D | startup.c | 1022 SCHED(run_count_incr)(thread); in load_context()
|
| H A D | thread_group.c | 1330 SCHED(thread_group_recommendation_change)(tg, new_recommendation); in sched_perfcontrol_thread_group_recommend()
|
| H A D | sched_clutch.c | 1239 if (SCHED(priority_is_urgent)(thread->sched_pri)) { in sched_clutch_root_urgency_inc() 1259 if (SCHED(priority_is_urgent)(thread->sched_pri)) { in sched_clutch_root_urgency_dec()
|
| H A D | thread_policy.c | 1266 sched_mode_t newmode = SCHED(initial_thread_sched_mode)(get_threadtask(thread)); in thread_policy_reset()
|
| H A D | task.c | 5658 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL); in task_info()
|
| /xnu-11417.121.6/tests/sched/sched_test_harness/shadow_headers/ |
| H A D | sched_prim.c | 140 if (SCHED(priority_is_urgent)(rq->highq)) { in run_queue_dequeue() 183 if (SCHED(priority_is_urgent)(thread->sched_pri)) { in run_queue_enqueue() 209 if (SCHED(priority_is_urgent)(thread->sched_pri)) { in run_queue_remove()
|
| /xnu-11417.121.6/tests/sched/sched_test_harness/ |
| H A D | sched_edge_harness.c | 65 return SCHED(ipi_policy)(dst, thread, (dst->active_thread == NULL), event); in sched_ipi_action()
|
| /xnu-11417.121.6/doc/scheduler/ |
| H A D | sched_clutch_edge.md | 248 …SCHED(steal_thread)` scheduler callout is invoked when the processor does not find any thread for … 262 …turn THREAD_NULL for the steal callout and perform rebalancing as part of SCHED(processor_balance)… 269 If `SCHED(steal_thread)` did not return a thread for the processor, it indicates that the processor…
|