xref: /xnu-11417.101.15/tests/sched/sched_test_harness/shadow_headers/misc_needed_deps.c (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1 // Copyright (c) 2024 Apple Inc.  All rights reserved.
2 
3 #include <mach/mach_time.h>
4 
5 #ifndef MIN
6 #define MIN(a, b) (((a)<(b))?(a):(b))
7 #endif /* MIN */
8 #ifndef MAX
9 #define MAX(a, b) (((a)>(b))?(a):(b))
10 #endif  /* MAX */
11 
12 /* Overrides necessary for userspace code */
13 #define panic(...) ({ printf("Panicking:\n"); printf(__VA_ARGS__); abort(); })
14 #define KDBG(...) (void)0
15 #define kalloc_type(x, y, z) calloc((size_t)y, sizeof(x))
16 #define kfree_type(x, y, z) free(z)
17 #define PE_parse_boot_argn(x, y, z) FALSE
18 
19 /* Mock locks */
20 typedef void *lck_ticket_t;
21 #define decl_lck_mtx_data(class, name)     class int name
22 #define decl_simple_lock_data(class, name) class int name
23 #define pset_lock(x) (void)x
24 #define pset_unlock(x) (void)x
25 #define pset_assert_locked(x) (void)x
26 #define thread_lock(x) (void)x
27 #define thread_unlock(x) (void)x
28 
29 /* Processor-related */
30 #define PERCPU_DECL(type_t, name) type_t name
31 #include <kern/processor.h>
32 processor_t processor_array[MAX_SCHED_CPUS];
33 processor_set_t pset_array[MAX_PSETS];
34 
35 /* Expected global(s) */
36 static task_t kernel_task = NULL;
37 
38 /* Time conversion to mock the implementation in osfmk/arm/rtclock.c */
39 static mach_timebase_info_data_t timebase_info;
40 void
clock_interval_to_absolutetime_interval(uint32_t interval,uint32_t scale_factor,uint64_t * result)41 clock_interval_to_absolutetime_interval(uint32_t   interval,
42     uint32_t   scale_factor,
43     uint64_t * result)
44 {
45 	mach_timebase_info(&timebase_info);
46 	uint64_t nanosecs = (uint64_t) interval * scale_factor;
47 	*result = nanosecs * timebase_info.denom / timebase_info.numer;
48 }
49 
50 /*
51  * thread struct from osfmk/kern/thread.h containing only fields needed by
52  * the Clutch runqueue logic, followed by needed functions from osfmk/kern/thread.c
53  * for operating on the __runq field
54  */
55 struct thread {
56 	int id;
57 	sched_mode_t sched_mode;
58 	int16_t                 sched_pri;              /* scheduled (current) priority */
59 	int16_t                 base_pri;               /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */
60 	queue_chain_t                   runq_links;             /* run queue links */
61 	struct { processor_t    runq; } __runq; /* internally managed run queue assignment, see above comment */
62 	sched_bucket_t          th_sched_bucket;
63 	processor_t             bound_processor;        /* bound to a processor? */
64 	processor_t             last_processor;         /* processor last dispatched on */
65 	ast_t                   reason;         /* why we blocked */
66 	int                     state;
67 #define TH_WAIT                 0x01            /* queued for waiting */
68 #define TH_RUN                  0x04            /* running or on runq */
69 #define TH_IDLE                 0x80            /* idling processor */
70 #define TH_SFLAG_DEPRESS                0x0040          /* normal depress yield */
71 #define TH_SFLAG_POLLDEPRESS            0x0080          /* polled depress yield */
72 #define TH_SFLAG_DEPRESSED_MASK         (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
73 #define TH_SFLAG_BOUND_SOFT             0x20000         /* thread is soft bound to a cluster; can run anywhere if bound cluster unavailable */
74 	uint64_t                thread_id;             /* system wide unique thread-id */
75 	struct {
76 		uint64_t user_time;
77 		uint64_t system_time;
78 	} mock_recount_time;
79 	uint64_t sched_time_save;
80 	natural_t               sched_usage;            /* timesharing cpu usage [sched] */
81 	natural_t               pri_shift;              /* usage -> priority from pset */
82 	natural_t               cpu_usage;              /* instrumented cpu usage [%cpu] */
83 	natural_t               cpu_delta;              /* accumulated cpu_usage delta */
84 	struct thread_group     *thread_group;
85 	struct priority_queue_entry_stable      th_clutch_runq_link;
86 	struct priority_queue_entry_sched       th_clutch_pri_link;
87 	queue_chain_t                           th_clutch_timeshare_link;
88 	uint32_t                sched_flags;            /* current flag bits */
89 #define THREAD_BOUND_CLUSTER_NONE       (UINT32_MAX)
90 	uint32_t                 th_bound_cluster_id;
91 #if CONFIG_SCHED_EDGE
92 	bool            th_bound_cluster_enqueued;
93 	bool            th_shared_rsrc_enqueued[CLUSTER_SHARED_RSRC_TYPE_COUNT];
94 	bool            th_shared_rsrc_heavy_user[CLUSTER_SHARED_RSRC_TYPE_COUNT];
95 	bool            th_shared_rsrc_heavy_perf_control[CLUSTER_SHARED_RSRC_TYPE_COUNT];
96 #endif /* CONFIG_SCHED_EDGE */
97 };
98 
99 void
thread_assert_runq_null(__assert_only thread_t thread)100 thread_assert_runq_null(__assert_only thread_t thread)
101 {
102 	assert(thread->__runq.runq == PROCESSOR_NULL);
103 }
104 
105 void
thread_assert_runq_nonnull(thread_t thread)106 thread_assert_runq_nonnull(thread_t thread)
107 {
108 	assert(thread->__runq.runq != PROCESSOR_NULL);
109 }
110 
111 void
thread_clear_runq(thread_t thread)112 thread_clear_runq(thread_t thread)
113 {
114 	thread_assert_runq_nonnull(thread);
115 	thread->__runq.runq = PROCESSOR_NULL;
116 }
117 
118 void
thread_set_runq_locked(thread_t thread,processor_t new_runq)119 thread_set_runq_locked(thread_t thread, processor_t new_runq)
120 {
121 	thread_assert_runq_null(thread);
122 	thread->__runq.runq = new_runq;
123 }
124 
125 processor_t
thread_get_runq_locked(thread_t thread)126 thread_get_runq_locked(thread_t thread)
127 {
128 	return thread->__runq.runq;
129 }
130 
131 uint64_t
thread_tid(thread_t thread)132 thread_tid(
133 	thread_t        thread)
134 {
135 	return thread != THREAD_NULL? thread->thread_id: 0;
136 }
137 
138 /* Satisfy recount dependency needed by osfmk/kern/sched.h */
139 #define recount_thread_time_mach(thread) (thread->mock_recount_time.user_time + thread->mock_recount_time.system_time)
140 
141 /*
142  * thread_group struct from osfmk/kern/thread_group.c containing only fields
143  * needed by the Clutch runqueue logic, followed by needed functions from
144  * osfmk/kern/thread_group.c
145  */
146 struct thread_group {
147 	uint64_t                tg_id;
148 	struct sched_clutch     tg_sched_clutch;
149 };
150 
151 sched_clutch_t
sched_clutch_for_thread(thread_t thread)152 sched_clutch_for_thread(thread_t thread)
153 {
154 	assert(thread->thread_group != NULL);
155 	return &(thread->thread_group->tg_sched_clutch);
156 }
157 
158 sched_clutch_t
sched_clutch_for_thread_group(struct thread_group * thread_group)159 sched_clutch_for_thread_group(struct thread_group *thread_group)
160 {
161 	return &(thread_group->tg_sched_clutch);
162 }
163 
164 uint64_t
thread_group_get_id(struct thread_group * tg)165 thread_group_get_id(struct thread_group *tg)
166 {
167 	return tg->tg_id;
168 }
169 
170 #if CONFIG_SCHED_EDGE
171 
172 bool
thread_shared_rsrc_policy_get(thread_t thread,cluster_shared_rsrc_type_t type)173 thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type)
174 {
175 	return thread->th_shared_rsrc_heavy_user[type] || thread->th_shared_rsrc_heavy_perf_control[type];
176 }
177 
178 #endif /* CONFIG_SCHED_EDGE */
179