1 // Copyright (c) 2021 Apple Inc. All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26
27 #include <kern/assert.h>
28 #include <kern/kalloc.h>
29 #include <pexpert/pexpert.h>
30 #include <sys/kdebug.h>
31 #include <sys/_types/_size_t.h>
32 #if MONOTONIC
33 #include <kern/monotonic.h>
34 #endif // MONOTONIC
35 #include <kern/percpu.h>
36 #include <kern/processor.h>
37 #include <kern/recount.h>
38 #include <kern/startup.h>
39 #include <kern/task.h>
40 #include <kern/thread.h>
41 #include <mach/mach_time.h>
42 #include <mach/mach_types.h>
43 #include <machine/config.h>
44 #include <machine/machine_routines.h>
45 #include <os/atomic_private.h>
46 #include <stdbool.h>
47 #include <stdint.h>
48
49 // Recount's machine-independent implementation and interfaces for the kernel
50 // at-large.
51
52 #define PRECISE_USER_KERNEL_PMCS PRECISE_USER_KERNEL_TIME
53
54 // On non-release kernels, allow precise PMC (instructions, cycles) updates to
55 // be disabled for performance characterization.
56 #if PRECISE_USER_KERNEL_PMCS && (DEVELOPMENT || DEBUG)
57 #define PRECISE_USER_KERNEL_PMC_TUNABLE 1
58
59 TUNABLE(bool, no_precise_pmcs, "-no-precise-pmcs", false);
60 #endif // PRECISE_USER_KERNEL_PMCS
61
62 #if !PRECISE_USER_KERNEL_TIME
63 #define PRECISE_TIME_FATAL_FUNC OS_NORETURN
64 #define PRECISE_TIME_ONLY_FUNC OS_UNUSED
65 #else // !PRECISE_USER_KERNEL_TIME
66 #define PRECISE_TIME_FATAL_FUNC
67 #define PRECISE_TIME_ONLY_FUNC
68 #endif // PRECISE_USER_KERNEL_TIME
69
70 #if !PRECISE_USER_KERNEL_PMCS
71 #define PRECISE_PMCS_ONLY_FUNC OS_UNUSED
72 #else // !PRECISE_PMCS_ONLY_FUNC
73 #define PRECISE_PMCS_ONLY_FUNC
74 #endif // PRECISE_USER_KERNEL_PMCS
75
76 #if HAS_CPU_DPE_COUNTER
77 // Only certain platforms have DPE counters.
78 #define RECOUNT_ENERGY CONFIG_PERVASIVE_ENERGY
79 #else // HAS_CPU_DPE_COUNTER
80 #define RECOUNT_ENERGY 0
81 #endif // !HAS_CPU_DPE_COUNTER
82
83 // Topography helpers.
84 size_t recount_topo_count(recount_topo_t topo);
85 static bool recount_topo_matches_cpu_kind(recount_topo_t topo,
86 recount_cpu_kind_t kind, size_t idx);
87 static size_t recount_topo_index(recount_topo_t topo, processor_t processor);
88 static size_t recount_convert_topo_index(recount_topo_t from, recount_topo_t to,
89 size_t i);
90
91 // Prevent counter updates before the system is ready.
92 __security_const_late bool recount_started = false;
93
94 // Lookup table that matches CPU numbers (indices) to their track index.
95 __security_const_late uint8_t _topo_cpu_kinds[MAX_CPUS] = { 0 };
96
97 __startup_func
98 static void
recount_startup(void)99 recount_startup(void)
100 {
101 #if __AMP__
102 unsigned int cpu_count = ml_get_cpu_count();
103 const ml_topology_info_t *topo_info = ml_get_topology_info();
104 for (unsigned int i = 0; i < cpu_count; i++) {
105 cluster_type_t type = topo_info->cpus[i].cluster_type;
106 uint8_t cluster_i = (type == CLUSTER_TYPE_P) ? RCT_CPU_PERFORMANCE :
107 RCT_CPU_EFFICIENCY;
108 _topo_cpu_kinds[i] = cluster_i;
109 }
110 #endif // __AMP__
111
112 recount_started = true;
113 }
114
115 STARTUP(PERCPU, STARTUP_RANK_LAST, recount_startup);
116
117 #pragma mark - tracks
118
119 RECOUNT_PLAN_DEFINE(recount_thread_plan, RCT_TOPO_CPU_KIND);
120 RECOUNT_PLAN_DEFINE(recount_task_plan, RCT_TOPO_CPU);
121 RECOUNT_PLAN_DEFINE(recount_task_terminated_plan, RCT_TOPO_CPU_KIND);
122 RECOUNT_PLAN_DEFINE(recount_coalition_plan, RCT_TOPO_CPU_KIND);
123 RECOUNT_PLAN_DEFINE(recount_processor_plan, RCT_TOPO_SYSTEM);
124
125 OS_ALWAYS_INLINE
126 static inline uint64_t
recount_timestamp_speculative(void)127 recount_timestamp_speculative(void)
128 {
129 #if __arm__ || __arm64__
130 return ml_get_speculative_timebase();
131 #else // __arm__ || __arm64__
132 return mach_absolute_time();
133 #endif // !__arm__ && !__arm64__
134 }
135
136 OS_ALWAYS_INLINE
137 void
recount_snapshot_speculative(struct recount_snap * snap)138 recount_snapshot_speculative(struct recount_snap *snap)
139 {
140 snap->rsn_time_mach = recount_timestamp_speculative();
141 #if CONFIG_PERVASIVE_CPI
142 mt_cur_cpu_cycles_instrs_speculative(&snap->rsn_cycles, &snap->rsn_insns);
143 #endif // CONFIG_PERVASIVE_CPI
144 }
145
146 void
recount_snapshot(struct recount_snap * snap)147 recount_snapshot(struct recount_snap *snap)
148 {
149 #if __arm__ || __arm64__
150 __builtin_arm_isb(ISB_SY);
151 #endif // __arm__ || __arm64__
152 recount_snapshot_speculative(snap);
153 }
154
155 static struct recount_snap *
recount_get_snap(processor_t processor)156 recount_get_snap(processor_t processor)
157 {
158 return &processor->pr_recount.rpr_snap;
159 }
160
161 // A simple sequence lock implementation.
162
163 static void
_seqlock_shared_lock_slowpath(const uint32_t * lck,uint32_t gen)164 _seqlock_shared_lock_slowpath(const uint32_t *lck, uint32_t gen)
165 {
166 disable_preemption();
167 do {
168 gen = hw_wait_while_equals32((uint32_t *)(uintptr_t)lck, gen);
169 } while (__improbable((gen & 1) != 0));
170 os_atomic_thread_fence(acquire);
171 enable_preemption();
172 }
173
174 static uintptr_t
_seqlock_shared_lock(const uint32_t * lck)175 _seqlock_shared_lock(const uint32_t *lck)
176 {
177 uint32_t gen = os_atomic_load(lck, acquire);
178 if (__improbable((gen & 1) != 0)) {
179 _seqlock_shared_lock_slowpath(lck, gen);
180 }
181 return gen;
182 }
183
184 static bool
_seqlock_shared_try_unlock(const uint32_t * lck,uintptr_t on_enter)185 _seqlock_shared_try_unlock(const uint32_t *lck, uintptr_t on_enter)
186 {
187 return os_atomic_load(lck, acquire) == on_enter;
188 }
189
190 static void
_seqlock_excl_lock_relaxed(uint32_t * lck)191 _seqlock_excl_lock_relaxed(uint32_t *lck)
192 {
193 __assert_only uintptr_t new = os_atomic_inc(lck, relaxed);
194 assert3u((new & 1), ==, 1);
195 }
196
197 static void
_seqlock_excl_commit(void)198 _seqlock_excl_commit(void)
199 {
200 os_atomic_thread_fence(release);
201 }
202
203 static void
_seqlock_excl_unlock_relaxed(uint32_t * lck)204 _seqlock_excl_unlock_relaxed(uint32_t *lck)
205 {
206 __assert_only uint32_t new = os_atomic_inc(lck, relaxed);
207 assert3u((new & 1), ==, 0);
208 }
209
210 static struct recount_track *
recount_update_start(struct recount_track * tracks,recount_topo_t topo,processor_t processor)211 recount_update_start(struct recount_track *tracks, recount_topo_t topo,
212 processor_t processor)
213 {
214 struct recount_track *track = &tracks[recount_topo_index(topo, processor)];
215 _seqlock_excl_lock_relaxed(&track->rt_sync);
216 return track;
217 }
218
219 #if RECOUNT_ENERGY
220
221 static struct recount_track *
recount_update_single_start(struct recount_track * tracks,recount_topo_t topo,processor_t processor)222 recount_update_single_start(struct recount_track *tracks, recount_topo_t topo,
223 processor_t processor)
224 {
225 return &tracks[recount_topo_index(topo, processor)];
226 }
227
228 #endif // RECOUNT_ENERGY
229
230 static void
recount_update_commit(void)231 recount_update_commit(void)
232 {
233 _seqlock_excl_commit();
234 }
235
236 static void
recount_update_end(struct recount_track * track)237 recount_update_end(struct recount_track *track)
238 {
239 _seqlock_excl_unlock_relaxed(&track->rt_sync);
240 }
241
242 static const struct recount_usage *
recount_read_start(const struct recount_track * track,uintptr_t * on_enter)243 recount_read_start(const struct recount_track *track, uintptr_t *on_enter)
244 {
245 const struct recount_usage *stats = &track->rt_usage;
246 *on_enter = _seqlock_shared_lock(&track->rt_sync);
247 return stats;
248 }
249
250 static bool
recount_try_read_end(const struct recount_track * track,uintptr_t on_enter)251 recount_try_read_end(const struct recount_track *track, uintptr_t on_enter)
252 {
253 return _seqlock_shared_try_unlock(&track->rt_sync, on_enter);
254 }
255
256 static void
recount_read_track(struct recount_usage * stats,const struct recount_track * track)257 recount_read_track(struct recount_usage *stats,
258 const struct recount_track *track)
259 {
260 uintptr_t on_enter = 0;
261 do {
262 const struct recount_usage *vol_stats =
263 recount_read_start(track, &on_enter);
264 *stats = *vol_stats;
265 } while (!recount_try_read_end(track, on_enter));
266 }
267
268 static void
recount_usage_add(struct recount_usage * sum,const struct recount_usage * to_add)269 recount_usage_add(struct recount_usage *sum, const struct recount_usage *to_add)
270 {
271 sum->ru_user_time_mach += to_add->ru_user_time_mach;
272 sum->ru_system_time_mach += to_add->ru_system_time_mach;
273 #if CONFIG_PERVASIVE_CPI
274 sum->ru_cycles += to_add->ru_cycles;
275 sum->ru_instructions += to_add->ru_instructions;
276 #endif // CONFIG_PERVASIVE_CPI
277 #if CONFIG_PERVASIVE_ENERGY
278 sum->ru_energy_nj += to_add->ru_energy_nj;
279 #endif // CONFIG_PERVASIVE_CPI
280 }
281
282 OS_ALWAYS_INLINE
283 static inline void
recount_usage_add_snap(struct recount_usage * usage,uint64_t * add_time,struct recount_snap * snap)284 recount_usage_add_snap(struct recount_usage *usage, uint64_t *add_time,
285 struct recount_snap *snap)
286 {
287 *add_time += snap->rsn_time_mach;
288 #if CONFIG_PERVASIVE_CPI
289 usage->ru_cycles += snap->rsn_cycles;
290 usage->ru_instructions += snap->rsn_insns;
291 #else // CONFIG_PERVASIVE_CPI
292 #pragma unused(usage)
293 #endif // !CONFIG_PERVASIVE_CPI
294 }
295
296 static void
recount_rollup(recount_plan_t plan,const struct recount_track * tracks,recount_topo_t to_topo,struct recount_usage * stats)297 recount_rollup(recount_plan_t plan, const struct recount_track *tracks,
298 recount_topo_t to_topo, struct recount_usage *stats)
299 {
300 recount_topo_t from_topo = plan->rpl_topo;
301 size_t topo_count = recount_topo_count(from_topo);
302 struct recount_usage tmp = { 0 };
303 for (size_t i = 0; i < topo_count; i++) {
304 recount_read_track(&tmp, &tracks[i]);
305 size_t to_i = recount_convert_topo_index(from_topo, to_topo, i);
306 recount_usage_add(&stats[to_i], &tmp);
307 }
308 }
309
310 // This function must be run when counters cannot increment for the track, like from the current thread.
311 static void
recount_rollup_unsafe(recount_plan_t plan,struct recount_track * tracks,recount_topo_t to_topo,struct recount_usage * stats)312 recount_rollup_unsafe(recount_plan_t plan, struct recount_track *tracks,
313 recount_topo_t to_topo, struct recount_usage *stats)
314 {
315 recount_topo_t from_topo = plan->rpl_topo;
316 size_t topo_count = recount_topo_count(from_topo);
317 for (size_t i = 0; i < topo_count; i++) {
318 size_t to_i = recount_convert_topo_index(from_topo, to_topo, i);
319 recount_usage_add(&stats[to_i], &tracks[i].rt_usage);
320 }
321 }
322
323 void
recount_sum(recount_plan_t plan,const struct recount_track * tracks,struct recount_usage * sum)324 recount_sum(recount_plan_t plan, const struct recount_track *tracks,
325 struct recount_usage *sum)
326 {
327 recount_rollup(plan, tracks, RCT_TOPO_SYSTEM, sum);
328 }
329
330 void
recount_sum_unsafe(recount_plan_t plan,const struct recount_track * tracks,struct recount_usage * sum)331 recount_sum_unsafe(recount_plan_t plan, const struct recount_track *tracks,
332 struct recount_usage *sum)
333 {
334 recount_topo_t topo = plan->rpl_topo;
335 size_t topo_count = recount_topo_count(topo);
336 for (size_t i = 0; i < topo_count; i++) {
337 recount_usage_add(sum, &tracks[i].rt_usage);
338 }
339 }
340
341 void
recount_sum_and_isolate_cpu_kind(recount_plan_t plan,struct recount_track * tracks,recount_cpu_kind_t kind,struct recount_usage * sum,struct recount_usage * only_kind)342 recount_sum_and_isolate_cpu_kind(recount_plan_t plan,
343 struct recount_track *tracks, recount_cpu_kind_t kind,
344 struct recount_usage *sum, struct recount_usage *only_kind)
345 {
346 size_t topo_count = recount_topo_count(plan->rpl_topo);
347 struct recount_usage tmp = { 0 };
348 for (size_t i = 0; i < topo_count; i++) {
349 recount_read_track(&tmp, &tracks[i]);
350 recount_usage_add(sum, &tmp);
351 if (recount_topo_matches_cpu_kind(plan->rpl_topo, kind, i)) {
352 recount_usage_add(only_kind, &tmp);
353 }
354 }
355 }
356
357 static void
recount_sum_usage(recount_plan_t plan,const struct recount_usage * usages,struct recount_usage * sum)358 recount_sum_usage(recount_plan_t plan, const struct recount_usage *usages,
359 struct recount_usage *sum)
360 {
361 const size_t topo_count = recount_topo_count(plan->rpl_topo);
362 for (size_t i = 0; i < topo_count; i++) {
363 recount_usage_add(sum, &usages[i]);
364 }
365 }
366
367 void
recount_sum_usage_and_isolate_cpu_kind(recount_plan_t plan,struct recount_usage * usage,recount_cpu_kind_t kind,struct recount_usage * sum,struct recount_usage * only_kind)368 recount_sum_usage_and_isolate_cpu_kind(recount_plan_t plan,
369 struct recount_usage *usage, recount_cpu_kind_t kind,
370 struct recount_usage *sum, struct recount_usage *only_kind)
371 {
372 const size_t topo_count = recount_topo_count(plan->rpl_topo);
373 for (size_t i = 0; i < topo_count; i++) {
374 recount_usage_add(sum, &usage[i]);
375 if (only_kind && recount_topo_matches_cpu_kind(plan->rpl_topo, kind, i)) {
376 recount_usage_add(only_kind, &usage[i]);
377 }
378 }
379 }
380
381 void
recount_sum_perf_levels(recount_plan_t plan,struct recount_track * tracks,struct recount_usage * sums)382 recount_sum_perf_levels(recount_plan_t plan, struct recount_track *tracks,
383 struct recount_usage *sums)
384 {
385 recount_rollup(plan, tracks, RCT_TOPO_CPU_KIND, sums);
386 }
387
388 // Plan-specific helpers.
389
390 void
recount_coalition_rollup_task(struct recount_coalition * co,struct recount_task * tk)391 recount_coalition_rollup_task(struct recount_coalition *co,
392 struct recount_task *tk)
393 {
394 recount_rollup(&recount_task_plan, tk->rtk_lifetime,
395 recount_coalition_plan.rpl_topo, co->rco_exited);
396 }
397
398 void
recount_task_rollup_thread(struct recount_task * tk,const struct recount_thread * th)399 recount_task_rollup_thread(struct recount_task *tk,
400 const struct recount_thread *th)
401 {
402 recount_rollup(&recount_thread_plan, th->rth_lifetime,
403 recount_task_terminated_plan.rpl_topo, tk->rtk_terminated);
404 }
405
406 #pragma mark - scheduler
407
408 // `result = lhs - rhs` for snapshots.
409 OS_ALWAYS_INLINE
410 static void
recount_snap_diff(struct recount_snap * result,const struct recount_snap * lhs,const struct recount_snap * rhs)411 recount_snap_diff(struct recount_snap *result,
412 const struct recount_snap *lhs, const struct recount_snap *rhs)
413 {
414 assert3u(lhs->rsn_time_mach, >=, rhs->rsn_time_mach);
415 result->rsn_time_mach = lhs->rsn_time_mach - rhs->rsn_time_mach;
416 #if CONFIG_PERVASIVE_CPI
417 assert3u(lhs->rsn_insns, >=, rhs->rsn_insns);
418 assert3u(lhs->rsn_cycles, >=, rhs->rsn_cycles);
419 result->rsn_cycles = lhs->rsn_cycles - rhs->rsn_cycles;
420 result->rsn_insns = lhs->rsn_insns - rhs->rsn_insns;
421 #endif // CONFIG_PERVASIVE_CPI
422 }
423
424 void
recount_update_snap(struct recount_snap * cur)425 recount_update_snap(struct recount_snap *cur)
426 {
427 struct recount_snap *this_snap = recount_get_snap(current_processor());
428 this_snap->rsn_time_mach = cur->rsn_time_mach;
429 #if CONFIG_PERVASIVE_CPI
430 this_snap->rsn_cycles = cur->rsn_cycles;
431 this_snap->rsn_insns = cur->rsn_insns;
432 #endif // CONFIG_PERVASIVE_CPI
433 }
434
435 static void
_fix_time_precision(struct recount_usage * usage)436 _fix_time_precision(struct recount_usage *usage)
437 {
438 #if PRECISE_USER_KERNEL_TIME
439 #pragma unused(usage)
440 #else // PRECISE_USER_KERNEL_TIME
441 // Attribute all time to user, as the system is only acting "on behalf
442 // of" user processes -- a bit sketchy.
443 usage->ru_user_time_mach += usage->ru_system_time_mach;
444 usage->ru_system_time_mach = 0;
445 #endif // !PRECISE_USER_KERNEL_TIME
446 }
447
448 void
recount_current_thread_usage(struct recount_usage * usage)449 recount_current_thread_usage(struct recount_usage *usage)
450 {
451 assert(ml_get_interrupts_enabled() == FALSE);
452 thread_t thread = current_thread();
453 struct recount_snap snap = { 0 };
454 recount_snapshot(&snap);
455 recount_sum_unsafe(&recount_thread_plan, thread->th_recount.rth_lifetime,
456 usage);
457 struct recount_snap *last = recount_get_snap(current_processor());
458 struct recount_snap diff = { 0 };
459 recount_snap_diff(&diff, &snap, last);
460 recount_usage_add_snap(usage, &usage->ru_system_time_mach, &diff);
461 _fix_time_precision(usage);
462 }
463
464 void
recount_current_thread_usage_perf_only(struct recount_usage * usage,struct recount_usage * usage_perf_only)465 recount_current_thread_usage_perf_only(struct recount_usage *usage,
466 struct recount_usage *usage_perf_only)
467 {
468 struct recount_usage usage_perf_levels[RCT_CPU_KIND_COUNT] = { 0 };
469 recount_current_thread_perf_level_usage(usage_perf_levels);
470 recount_sum_usage(&recount_thread_plan, usage_perf_levels, usage);
471 *usage_perf_only = usage_perf_levels[RCT_CPU_PERFORMANCE];
472 _fix_time_precision(usage);
473 _fix_time_precision(usage_perf_only);
474 }
475
476 void
recount_thread_perf_level_usage(struct thread * thread,struct recount_usage * usage_levels)477 recount_thread_perf_level_usage(struct thread *thread,
478 struct recount_usage *usage_levels)
479 {
480 recount_rollup(&recount_thread_plan, thread->th_recount.rth_lifetime,
481 RCT_TOPO_CPU_KIND, usage_levels);
482 size_t topo_count = recount_topo_count(RCT_TOPO_CPU_KIND);
483 for (size_t i = 0; i < topo_count; i++) {
484 _fix_time_precision(&usage_levels[i]);
485 }
486 }
487
488 void
recount_current_thread_perf_level_usage(struct recount_usage * usage_levels)489 recount_current_thread_perf_level_usage(struct recount_usage *usage_levels)
490 {
491 assert(ml_get_interrupts_enabled() == FALSE);
492 processor_t processor = current_processor();
493 thread_t thread = current_thread();
494 struct recount_snap snap = { 0 };
495 recount_snapshot(&snap);
496 recount_rollup_unsafe(&recount_thread_plan, thread->th_recount.rth_lifetime,
497 RCT_TOPO_CPU_KIND, usage_levels);
498 struct recount_snap *last = recount_get_snap(processor);
499 struct recount_snap diff = { 0 };
500 recount_snap_diff(&diff, &snap, last);
501 size_t cur_i = recount_topo_index(RCT_TOPO_CPU_KIND, processor);
502 struct recount_usage *cur_usage = &usage_levels[cur_i];
503 recount_usage_add_snap(cur_usage, &cur_usage->ru_system_time_mach, &diff);
504 size_t topo_count = recount_topo_count(RCT_TOPO_CPU_KIND);
505 for (size_t i = 0; i < topo_count; i++) {
506 _fix_time_precision(&usage_levels[i]);
507 }
508 }
509
510 uint64_t
recount_current_thread_energy_nj(void)511 recount_current_thread_energy_nj(void)
512 {
513 #if RECOUNT_ENERGY
514 assert(ml_get_interrupts_enabled() == FALSE);
515 thread_t thread = current_thread();
516 size_t topo_count = recount_topo_count(recount_thread_plan.rpl_topo);
517 uint64_t energy_nj = 0;
518 for (size_t i = 0; i < topo_count; i++) {
519 energy_nj += thread->th_recount.rth_lifetime[i].rt_usage.ru_energy_nj;
520 }
521 return energy_nj;
522 #else // RECOUNT_ENERGY
523 return 0;
524 #endif // !RECOUNT_ENERGY
525 }
526
527 static void
_times_add_usage(struct recount_times_mach * times,struct recount_usage * usage)528 _times_add_usage(struct recount_times_mach *times, struct recount_usage *usage)
529 {
530 times->rtm_user += usage->ru_user_time_mach;
531 #if PRECISE_USER_KERNEL_TIME
532 times->rtm_system += usage->ru_system_time_mach;
533 #else // PRECISE_USER_KERNEL_TIME
534 times->rtm_user += usage->ru_system_time_mach;
535 #endif // !PRECISE_USER_KERNEL_TIME
536 }
537
538 struct recount_times_mach
recount_thread_times(struct thread * thread)539 recount_thread_times(struct thread *thread)
540 {
541 size_t topo_count = recount_topo_count(recount_thread_plan.rpl_topo);
542 struct recount_times_mach times = { 0 };
543 for (size_t i = 0; i < topo_count; i++) {
544 _times_add_usage(×, &thread->th_recount.rth_lifetime[i].rt_usage);
545 }
546 return times;
547 }
548
549 uint64_t
recount_thread_time_mach(struct thread * thread)550 recount_thread_time_mach(struct thread *thread)
551 {
552 struct recount_times_mach times = recount_thread_times(thread);
553 return times.rtm_user + times.rtm_system;
554 }
555
556 static uint64_t
_time_since_last_snapshot(void)557 _time_since_last_snapshot(void)
558 {
559 struct recount_snap *last = recount_get_snap(current_processor());
560 uint64_t cur_time = mach_absolute_time();
561 return cur_time - last->rsn_time_mach;
562 }
563
564 uint64_t
recount_current_thread_time_mach(void)565 recount_current_thread_time_mach(void)
566 {
567 assert(ml_get_interrupts_enabled() == FALSE);
568 uint64_t previous_time = recount_thread_time_mach(current_thread());
569 return previous_time + _time_since_last_snapshot();
570 }
571
572 struct recount_times_mach
recount_current_thread_times(void)573 recount_current_thread_times(void)
574 {
575 assert(ml_get_interrupts_enabled() == FALSE);
576 struct recount_times_mach times = recount_thread_times(
577 current_thread());
578 #if PRECISE_USER_KERNEL_TIME
579 times.rtm_user += _time_since_last_snapshot();
580 #else // PRECISE_USER_KERNEL_TIME
581 times.rtm_system += _time_since_last_snapshot();
582 #endif // !PRECISE_USER_KERNEL_TIME
583 return times;
584 }
585
586 void
recount_thread_usage(thread_t thread,struct recount_usage * usage)587 recount_thread_usage(thread_t thread, struct recount_usage *usage)
588 {
589 recount_sum(&recount_thread_plan, thread->th_recount.rth_lifetime, usage);
590 _fix_time_precision(usage);
591 }
592
593 void
recount_current_task_usage(struct recount_usage * usage)594 recount_current_task_usage(struct recount_usage *usage)
595 {
596 task_t task = current_task();
597 struct recount_track *tracks = task->tk_recount.rtk_lifetime;
598 recount_sum(&recount_task_plan, tracks, usage);
599 _fix_time_precision(usage);
600 }
601
602 void
recount_current_task_usage_perf_only(struct recount_usage * usage,struct recount_usage * usage_perf_only)603 recount_current_task_usage_perf_only(struct recount_usage *usage,
604 struct recount_usage *usage_perf_only)
605 {
606 task_t task = current_task();
607 struct recount_track *tracks = task->tk_recount.rtk_lifetime;
608 recount_sum_and_isolate_cpu_kind(&recount_task_plan,
609 tracks, RCT_CPU_PERFORMANCE, usage, usage_perf_only);
610 _fix_time_precision(usage);
611 _fix_time_precision(usage_perf_only);
612 }
613
614 void
recount_task_times_perf_only(struct task * task,struct recount_times_mach * sum,struct recount_times_mach * sum_perf_only)615 recount_task_times_perf_only(struct task *task,
616 struct recount_times_mach *sum, struct recount_times_mach *sum_perf_only)
617 {
618 const recount_topo_t topo = recount_task_plan.rpl_topo;
619 const size_t topo_count = recount_topo_count(topo);
620 struct recount_track *tracks = task->tk_recount.rtk_lifetime;
621 for (size_t i = 0; i < topo_count; i++) {
622 struct recount_usage *usage = &tracks[i].rt_usage;
623 _times_add_usage(sum, usage);
624 if (recount_topo_matches_cpu_kind(topo, RCT_CPU_PERFORMANCE, i)) {
625 _times_add_usage(sum_perf_only, usage);
626 }
627 }
628 }
629
630 void
recount_task_terminated_usage(task_t task,struct recount_usage * usage)631 recount_task_terminated_usage(task_t task, struct recount_usage *usage)
632 {
633 recount_sum_usage(&recount_task_terminated_plan,
634 task->tk_recount.rtk_terminated, usage);
635 _fix_time_precision(usage);
636 }
637
638 struct recount_times_mach
recount_task_terminated_times(struct task * task)639 recount_task_terminated_times(struct task *task)
640 {
641 size_t topo_count = recount_topo_count(recount_task_terminated_plan.rpl_topo);
642 struct recount_times_mach times = { 0 };
643 for (size_t i = 0; i < topo_count; i++) {
644 _times_add_usage(×, &task->tk_recount.rtk_terminated[i]);
645 }
646 return times;
647 }
648
649 void
recount_task_terminated_usage_perf_only(task_t task,struct recount_usage * usage,struct recount_usage * perf_only)650 recount_task_terminated_usage_perf_only(task_t task,
651 struct recount_usage *usage, struct recount_usage *perf_only)
652 {
653 recount_sum_usage_and_isolate_cpu_kind(&recount_task_terminated_plan,
654 task->tk_recount.rtk_terminated, RCT_CPU_PERFORMANCE, usage, perf_only);
655 _fix_time_precision(usage);
656 _fix_time_precision(perf_only);
657 }
658
659 void
recount_task_usage_perf_only(task_t task,struct recount_usage * sum,struct recount_usage * sum_perf_only)660 recount_task_usage_perf_only(task_t task, struct recount_usage *sum,
661 struct recount_usage *sum_perf_only)
662 {
663 recount_sum_and_isolate_cpu_kind(&recount_task_plan,
664 task->tk_recount.rtk_lifetime, RCT_CPU_PERFORMANCE, sum, sum_perf_only);
665 _fix_time_precision(sum);
666 _fix_time_precision(sum_perf_only);
667 }
668
669 void
recount_task_usage(task_t task,struct recount_usage * usage)670 recount_task_usage(task_t task, struct recount_usage *usage)
671 {
672 recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, usage);
673 _fix_time_precision(usage);
674 }
675
676 struct recount_times_mach
recount_task_times(struct task * task)677 recount_task_times(struct task *task)
678 {
679 size_t topo_count = recount_topo_count(recount_task_plan.rpl_topo);
680 struct recount_times_mach times = { 0 };
681 for (size_t i = 0; i < topo_count; i++) {
682 _times_add_usage(×, &task->tk_recount.rtk_lifetime[i].rt_usage);
683 }
684 return times;
685 }
686
687 uint64_t
recount_task_energy_nj(struct task * task)688 recount_task_energy_nj(struct task *task)
689 {
690 #if RECOUNT_ENERGY
691 size_t topo_count = recount_topo_count(recount_task_plan.rpl_topo);
692 uint64_t energy = 0;
693 for (size_t i = 0; i < topo_count; i++) {
694 energy += task->tk_recount.rtk_lifetime[i].rt_usage.ru_energy_nj;
695 }
696 return energy;
697 #else // RECOUNT_ENERGY
698 #pragma unused(task)
699 return 0;
700 #endif // !RECOUNT_ENERGY
701 }
702
703 void
recount_coalition_usage_perf_only(struct recount_coalition * coal,struct recount_usage * sum,struct recount_usage * sum_perf_only)704 recount_coalition_usage_perf_only(struct recount_coalition *coal,
705 struct recount_usage *sum, struct recount_usage *sum_perf_only)
706 {
707 recount_sum_usage_and_isolate_cpu_kind(&recount_coalition_plan,
708 coal->rco_exited, RCT_CPU_PERFORMANCE, sum, sum_perf_only);
709 _fix_time_precision(sum);
710 _fix_time_precision(sum_perf_only);
711 }
712
713 OS_ALWAYS_INLINE
714 static void
recount_absorb_snap(struct recount_snap * to_add,thread_t thread,task_t task,processor_t processor,bool from_user)715 recount_absorb_snap(struct recount_snap *to_add, thread_t thread, task_t task,
716 processor_t processor, bool from_user)
717 {
718 // Idle threads do not attribute their usage back to the task or processor,
719 // as the time is not spent "running."
720 //
721 // The processor-level metrics include idle time, instead, as the idle time
722 // needs to be read as up-to-date from `recount_processor_usage`.
723
724 bool was_idle = (thread->options & TH_OPT_IDLE_THREAD) != 0;
725
726 struct recount_track *th_track = recount_update_start(
727 thread->th_recount.rth_lifetime, recount_thread_plan.rpl_topo,
728 processor);
729 struct recount_track *tk_track = was_idle ? NULL : recount_update_start(
730 task->tk_recount.rtk_lifetime, recount_task_plan.rpl_topo,
731 processor);
732 struct recount_track *pr_track = was_idle ? NULL : recount_update_start(
733 &processor->pr_recount.rpr_active, recount_processor_plan.rpl_topo,
734 processor);
735 recount_update_commit();
736
737 uint64_t *th_time = NULL, *tk_time = NULL, *pr_time = NULL;
738 if (from_user) {
739 th_time = &th_track->rt_usage.ru_user_time_mach;
740 tk_time = &tk_track->rt_usage.ru_user_time_mach;
741 pr_time = &pr_track->rt_usage.ru_user_time_mach;
742 } else {
743 th_time = &th_track->rt_usage.ru_system_time_mach;
744 tk_time = &tk_track->rt_usage.ru_system_time_mach;
745 pr_time = &pr_track->rt_usage.ru_system_time_mach;
746 }
747
748 recount_usage_add_snap(&th_track->rt_usage, th_time, to_add);
749 if (!was_idle) {
750 recount_usage_add_snap(&tk_track->rt_usage, tk_time, to_add);
751 recount_usage_add_snap(&pr_track->rt_usage, pr_time, to_add);
752 }
753
754 recount_update_commit();
755 recount_update_end(th_track);
756 if (!was_idle) {
757 recount_update_end(tk_track);
758 recount_update_end(pr_track);
759 }
760 }
761
762 void
recount_switch_thread(struct recount_snap * cur,struct thread * off_thread,struct task * off_task)763 recount_switch_thread(struct recount_snap *cur, struct thread *off_thread,
764 struct task *off_task)
765 {
766 assert(ml_get_interrupts_enabled() == FALSE);
767
768 if (__improbable(!recount_started)) {
769 return;
770 }
771
772 processor_t processor = current_processor();
773
774 struct recount_snap *last = recount_get_snap(processor);
775 struct recount_snap diff = { 0 };
776 recount_snap_diff(&diff, cur, last);
777 recount_absorb_snap(&diff, off_thread, off_task, processor, false);
778 recount_update_snap(cur);
779 }
780
781 void
recount_add_energy(struct thread * off_thread,struct task * off_task,uint64_t energy_nj)782 recount_add_energy(struct thread *off_thread, struct task *off_task,
783 uint64_t energy_nj)
784 {
785 #if RECOUNT_ENERGY
786 assert(ml_get_interrupts_enabled() == FALSE);
787 if (__improbable(!recount_started)) {
788 return;
789 }
790
791 bool was_idle = (off_thread->options & TH_OPT_IDLE_THREAD) != 0;
792 processor_t processor = current_processor();
793
794 struct recount_track *th_track = recount_update_single_start(
795 off_thread->th_recount.rth_lifetime, recount_thread_plan.rpl_topo,
796 processor);
797 struct recount_track *tk_track = was_idle ? NULL :
798 recount_update_single_start(off_task->tk_recount.rtk_lifetime,
799 recount_task_plan.rpl_topo, processor);
800 struct recount_track *pr_track = was_idle ? NULL :
801 recount_update_single_start(&processor->pr_recount.rpr_active,
802 recount_processor_plan.rpl_topo, processor);
803
804 th_track->rt_usage.ru_energy_nj += energy_nj;
805
806 if (!was_idle) {
807 tk_track->rt_usage.ru_energy_nj += energy_nj;
808 pr_track->rt_usage.ru_energy_nj += energy_nj;
809 }
810 #else // RECOUNT_ENERGY
811 #pragma unused(off_thread, off_task, energy_nj)
812 #endif // !RECOUNT_ENERGY
813 }
814
815
816 #define MT_KDBG_IC_CPU_CSWITCH \
817 KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_INSTRS_CYCLES, 1)
818
819 void
recount_log_switch_thread(const struct recount_snap * snap)820 recount_log_switch_thread(const struct recount_snap *snap)
821 {
822 #if CONFIG_PERVASIVE_CPI
823 if (kdebug_debugid_explicitly_enabled(MT_KDBG_IC_CPU_CSWITCH)) {
824 // In Monotonic's event hierarchy for backwards-compatibility.
825 KDBG_RELEASE(MT_KDBG_IC_CPU_CSWITCH, snap->rsn_insns, snap->rsn_cycles);
826 }
827 #else // CONFIG_PERVASIVE_CPI
828 #pragma unused(snap)
829 #endif // CONFIG_PERVASIVE_CPI
830 }
831
832 OS_ALWAYS_INLINE
833 PRECISE_TIME_ONLY_FUNC
834 static void
recount_precise_transition_diff(struct recount_snap * diff,struct recount_snap * last,struct recount_snap * cur)835 recount_precise_transition_diff(struct recount_snap *diff,
836 struct recount_snap *last, struct recount_snap *cur)
837 {
838 #if PRECISE_USER_KERNEL_PMCS
839 #if PRECISE_USER_KERNEL_PMC_TUNABLE
840 // The full `recount_snapshot_speculative` shouldn't get PMCs with a tunable
841 // in this configuration.
842 if (__improbable(no_precise_pmcs)) {
843 cur->rsn_time_mach = recount_timestamp_speculative();
844 diff->rsn_time_mach = cur->rsn_time_mach - last->rsn_time_mach;
845 } else
846 #endif // PRECISE_USER_KERNEL_PMC_TUNABLE
847 {
848 recount_snapshot_speculative(cur);
849 recount_snap_diff(diff, cur, last);
850 }
851 #else // PRECISE_USER_KERNEL_PMCS
852 cur->rsn_time_mach = recount_timestamp_speculative();
853 diff->rsn_time_mach = cur->rsn_time_mach - last->rsn_time_mach;
854 #endif // !PRECISE_USER_KERNEL_PMCS
855 }
856
857 /// Called when entering or exiting the kernel to maintain system vs. user counts, extremely performance sensitive.
858 ///
859 /// Must be called with interrupts disabled.
860 ///
861 /// - Parameter from_user: Whether the kernel is being entered from user space.
862 ///
863 /// - Returns: The value of Mach time that was sampled inside this function.
864 PRECISE_TIME_FATAL_FUNC
865 static uint64_t
recount_kernel_transition(bool from_user)866 recount_kernel_transition(bool from_user)
867 {
868 #if PRECISE_USER_KERNEL_TIME
869 // Omit interrupts-disabled assertion for performance reasons.
870 processor_t processor = current_processor();
871 thread_t thread = processor->active_thread;
872 task_t task = get_thread_ro_unchecked(thread)->tro_task;
873
874 struct recount_snap *last = recount_get_snap(processor);
875 struct recount_snap diff = { 0 };
876 struct recount_snap cur = { 0 };
877 recount_precise_transition_diff(&diff, last, &cur);
878 recount_absorb_snap(&diff, thread, task, processor, from_user);
879 recount_update_snap(&cur);
880
881 return cur.rsn_time_mach;
882 #else // PRECISE_USER_KERNEL_TIME
883 #pragma unused(from_user)
884 panic("recount: kernel transition called with precise time off");
885 #endif // !PRECISE_USER_KERNEL_TIME
886 }
887
888 PRECISE_TIME_FATAL_FUNC
889 void
recount_leave_user(void)890 recount_leave_user(void)
891 {
892 recount_kernel_transition(true);
893 }
894
895 PRECISE_TIME_FATAL_FUNC
896 void
recount_enter_user(void)897 recount_enter_user(void)
898 {
899 recount_kernel_transition(false);
900 }
901
902 #if __x86_64__
903
904 void
recount_enter_intel_interrupt(x86_saved_state_t * state)905 recount_enter_intel_interrupt(x86_saved_state_t *state)
906 {
907 // The low bits of `%cs` being set indicate interrupt was delivered while
908 // executing in user space.
909 bool from_user = (is_saved_state64(state) ? state->ss_64.isf.cs :
910 state->ss_32.cs) & 0x03;
911 uint64_t timestamp = recount_kernel_transition(from_user);
912 current_cpu_datap()->cpu_int_event_time = timestamp;
913 }
914
915 void
recount_leave_intel_interrupt(void)916 recount_leave_intel_interrupt(void)
917 {
918 // XXX This is not actually entering user space, but it does update the
919 // system timer, which is desirable.
920 recount_enter_user();
921 current_cpu_datap()->cpu_int_event_time = 0;
922 }
923
924 #endif // __x86_64__
925
926 // Set on rpr_state_last_abs_time when the processor is idle.
927 #define RCT_PR_IDLING (0x1ULL << 63)
928
929 void
recount_processor_idle(struct recount_processor * pr,struct recount_snap * snap)930 recount_processor_idle(struct recount_processor *pr, struct recount_snap *snap)
931 {
932 __assert_only uint64_t state_time = os_atomic_load_wide(
933 &pr->rpr_state_last_abs_time, relaxed);
934 assert((state_time & RCT_PR_IDLING) == 0);
935 assert((snap->rsn_time_mach & RCT_PR_IDLING) == 0);
936 uint64_t new_state_stamp = RCT_PR_IDLING | snap->rsn_time_mach;
937 os_atomic_store_wide(&pr->rpr_state_last_abs_time, new_state_stamp,
938 relaxed);
939 }
940
941 OS_PURE OS_ALWAYS_INLINE
942 static inline uint64_t
_state_time(uint64_t state_stamp)943 _state_time(uint64_t state_stamp)
944 {
945 return state_stamp & ~(RCT_PR_IDLING);
946 }
947
948 void
recount_processor_init(processor_t processor)949 recount_processor_init(processor_t processor)
950 {
951 #if __AMP__
952 processor->pr_recount.rpr_cpu_kind_index =
953 processor->processor_set->pset_cluster_type == PSET_AMP_P ? 1 : 0;
954 #else // __AMP__
955 #pragma unused(processor)
956 #endif // !__AMP__
957 }
958
959 void
recount_processor_run(struct recount_processor * pr,struct recount_snap * snap)960 recount_processor_run(struct recount_processor *pr, struct recount_snap *snap)
961 {
962 uint64_t state = os_atomic_load_wide(&pr->rpr_state_last_abs_time, relaxed);
963 assert(state == 0 || (state & RCT_PR_IDLING) == RCT_PR_IDLING);
964 assert((snap->rsn_time_mach & RCT_PR_IDLING) == 0);
965 uint64_t new_state_stamp = snap->rsn_time_mach;
966 pr->rpr_idle_time_mach += snap->rsn_time_mach - _state_time(state);
967 os_atomic_store_wide(&pr->rpr_state_last_abs_time, new_state_stamp,
968 relaxed);
969 }
970
971 void
recount_processor_usage(struct recount_processor * pr,struct recount_usage * usage,uint64_t * idle_time_out)972 recount_processor_usage(struct recount_processor *pr,
973 struct recount_usage *usage, uint64_t *idle_time_out)
974 {
975 recount_sum(&recount_processor_plan, &pr->rpr_active, usage);
976 _fix_time_precision(usage);
977
978 uint64_t idle_time = pr->rpr_idle_time_mach;
979 uint64_t idle_stamp = os_atomic_load_wide(&pr->rpr_state_last_abs_time,
980 relaxed);
981 bool idle = (idle_stamp & RCT_PR_IDLING) == RCT_PR_IDLING;
982 if (idle) {
983 // Since processors can idle for some time without an update, make sure
984 // the idle time is up-to-date with respect to the caller.
985 idle_time += mach_absolute_time() - _state_time(idle_stamp);
986 }
987 *idle_time_out = idle_time;
988 }
989
990 bool
recount_task_thread_perf_level_usage(struct task * task,uint64_t tid,struct recount_usage * usage_levels)991 recount_task_thread_perf_level_usage(struct task *task, uint64_t tid,
992 struct recount_usage *usage_levels)
993 {
994 thread_t thread = task_findtid(task, tid);
995 if (thread != THREAD_NULL) {
996 if (thread == current_thread()) {
997 boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
998 recount_current_thread_perf_level_usage(usage_levels);
999 ml_set_interrupts_enabled(interrupt_state);
1000 } else {
1001 recount_thread_perf_level_usage(thread, usage_levels);
1002 }
1003 }
1004 return thread != THREAD_NULL;
1005 }
1006
1007 #pragma mark - utilities
1008
1009 // For rolling up counts, convert an index from one topography to another.
1010 static size_t
recount_convert_topo_index(recount_topo_t from,recount_topo_t to,size_t i)1011 recount_convert_topo_index(recount_topo_t from, recount_topo_t to, size_t i)
1012 {
1013 if (from == to) {
1014 return i;
1015 } else if (to == RCT_TOPO_SYSTEM) {
1016 return 0;
1017 } else if (from == RCT_TOPO_CPU) {
1018 assertf(to == RCT_TOPO_CPU_KIND,
1019 "recount: cannot convert from CPU topography to %d", to);
1020 return _topo_cpu_kinds[i];
1021 } else {
1022 panic("recount: unexpected rollup request from %d to %d", from, to);
1023 }
1024 }
1025
1026 // Get the track index of the provided processor and topography.
1027 OS_ALWAYS_INLINE
1028 static size_t
recount_topo_index(recount_topo_t topo,processor_t processor)1029 recount_topo_index(recount_topo_t topo, processor_t processor)
1030 {
1031 switch (topo) {
1032 case RCT_TOPO_SYSTEM:
1033 return 0;
1034 case RCT_TOPO_CPU:
1035 return processor->cpu_id;
1036 case RCT_TOPO_CPU_KIND:
1037 #if __AMP__
1038 return processor->pr_recount.rpr_cpu_kind_index;
1039 #else // __AMP__
1040 return 0;
1041 #endif // !__AMP__
1042 default:
1043 panic("recount: invalid topology %u to index", topo);
1044 }
1045 }
1046
1047 // Return the number of tracks needed for a given topography.
1048 size_t
recount_topo_count(recount_topo_t topo)1049 recount_topo_count(recount_topo_t topo)
1050 {
1051 // Allow the compiler to reason about at least the system and CPU kind
1052 // counts.
1053 switch (topo) {
1054 case RCT_TOPO_SYSTEM:
1055 return 1;
1056
1057 case RCT_TOPO_CPU_KIND:
1058 #if __AMP__
1059 return 2;
1060 #else // __AMP__
1061 return 1;
1062 #endif // !__AMP__
1063
1064 case RCT_TOPO_CPU:
1065 #if __arm__ || __arm64__
1066 return ml_get_cpu_count();
1067 #else // __arm__ || __arm64__
1068 return ml_early_cpu_max_number() + 1;
1069 #endif // !__arm__ && !__arm64__
1070
1071 default:
1072 panic("recount: invalid topography %d", topo);
1073 }
1074 }
1075
1076 static bool
recount_topo_matches_cpu_kind(recount_topo_t topo,recount_cpu_kind_t kind,size_t idx)1077 recount_topo_matches_cpu_kind(recount_topo_t topo, recount_cpu_kind_t kind,
1078 size_t idx)
1079 {
1080 #if !__AMP__
1081 #pragma unused(kind, idx)
1082 #endif // !__AMP__
1083 switch (topo) {
1084 case RCT_TOPO_SYSTEM:
1085 return true;
1086
1087 case RCT_TOPO_CPU_KIND:
1088 #if __AMP__
1089 return kind == idx;
1090 #else // __AMP__
1091 return false;
1092 #endif // !__AMP__
1093
1094 case RCT_TOPO_CPU: {
1095 #if __AMP__
1096 return _topo_cpu_kinds[idx] == kind;
1097 #else // __AMP__
1098 return false;
1099 #endif // !__AMP__
1100 }
1101
1102 default:
1103 panic("recount: unexpected topography %d", topo);
1104 }
1105 }
1106
1107 struct recount_track *
recount_tracks_create(recount_plan_t plan)1108 recount_tracks_create(recount_plan_t plan)
1109 {
1110 return kalloc_type_tag(struct recount_track,
1111 recount_topo_count(plan->rpl_topo), Z_WAITOK | Z_ZERO | Z_NOFAIL,
1112 VM_KERN_MEMORY_RECOUNT);
1113 }
1114
1115 static void
recount_tracks_copy(recount_plan_t plan,struct recount_track * dst,struct recount_track * src)1116 recount_tracks_copy(recount_plan_t plan, struct recount_track *dst,
1117 struct recount_track *src)
1118 {
1119 size_t topo_count = recount_topo_count(plan->rpl_topo);
1120 for (size_t i = 0; i < topo_count; i++) {
1121 recount_read_track(&dst[i].rt_usage, &src[i]);
1122 }
1123 }
1124
1125 void
recount_tracks_destroy(recount_plan_t plan,struct recount_track * tracks)1126 recount_tracks_destroy(recount_plan_t plan, struct recount_track *tracks)
1127 {
1128 kfree_type(struct recount_track, recount_topo_count(plan->rpl_topo),
1129 tracks);
1130 }
1131
1132 void
recount_thread_init(struct recount_thread * th)1133 recount_thread_init(struct recount_thread *th)
1134 {
1135 th->rth_lifetime = recount_tracks_create(&recount_thread_plan);
1136 }
1137
1138 void
recount_thread_copy(struct recount_thread * dst,struct recount_thread * src)1139 recount_thread_copy(struct recount_thread *dst, struct recount_thread *src)
1140 {
1141 recount_tracks_copy(&recount_thread_plan, dst->rth_lifetime,
1142 src->rth_lifetime);
1143 }
1144
1145 void
recount_task_copy(struct recount_task * dst,const struct recount_task * src)1146 recount_task_copy(struct recount_task *dst, const struct recount_task *src)
1147 {
1148 recount_tracks_copy(&recount_task_plan, dst->rtk_lifetime,
1149 src->rtk_lifetime);
1150 }
1151
1152 void
recount_thread_deinit(struct recount_thread * th)1153 recount_thread_deinit(struct recount_thread *th)
1154 {
1155 recount_tracks_destroy(&recount_thread_plan, th->rth_lifetime);
1156 }
1157
1158 void
recount_task_init(struct recount_task * tk)1159 recount_task_init(struct recount_task *tk)
1160 {
1161 tk->rtk_lifetime = recount_tracks_create(&recount_task_plan);
1162 tk->rtk_terminated = recount_usage_alloc(
1163 recount_task_terminated_plan.rpl_topo);
1164 }
1165
1166 void
recount_task_deinit(struct recount_task * tk)1167 recount_task_deinit(struct recount_task *tk)
1168 {
1169 recount_tracks_destroy(&recount_task_plan, tk->rtk_lifetime);
1170 recount_usage_free(recount_task_terminated_plan.rpl_topo,
1171 tk->rtk_terminated);
1172 }
1173
1174 void
recount_coalition_init(struct recount_coalition * co)1175 recount_coalition_init(struct recount_coalition *co)
1176 {
1177 co->rco_exited = recount_usage_alloc(recount_coalition_plan.rpl_topo);
1178 }
1179
1180 void
recount_coalition_deinit(struct recount_coalition * co)1181 recount_coalition_deinit(struct recount_coalition *co)
1182 {
1183 recount_usage_free(recount_coalition_plan.rpl_topo, co->rco_exited);
1184 }
1185
1186 struct recount_usage *
recount_usage_alloc(recount_topo_t topo)1187 recount_usage_alloc(recount_topo_t topo)
1188 {
1189 return kalloc_type_tag(struct recount_usage, recount_topo_count(topo),
1190 Z_WAITOK | Z_ZERO | Z_NOFAIL, VM_KERN_MEMORY_RECOUNT);
1191 }
1192
1193 void
recount_usage_free(recount_topo_t topo,struct recount_usage * usage)1194 recount_usage_free(recount_topo_t topo, struct recount_usage *usage)
1195 {
1196 kfree_type(struct recount_usage, recount_topo_count(topo),
1197 usage);
1198 }
1199