1 // Copyright (c) 2021 Apple Inc. All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26
27 #include <kern/assert.h>
28 #include <kern/kalloc.h>
29 #include <pexpert/pexpert.h>
30 #include <sys/kdebug.h>
31 #include <sys/_types/_size_t.h>
32 #if MONOTONIC
33 #include <kern/monotonic.h>
34 #endif // MONOTONIC
35 #include <kern/percpu.h>
36 #include <kern/processor.h>
37 #include <kern/recount.h>
38 #include <kern/startup.h>
39 #include <kern/task.h>
40 #include <kern/thread.h>
41 #include <mach/mach_time.h>
42 #include <mach/mach_types.h>
43 #include <machine/config.h>
44 #include <machine/machine_routines.h>
45 #include <os/atomic_private.h>
46 #include <stdbool.h>
47 #include <stdint.h>
48
49 // Recount's machine-independent implementation and interfaces for the kernel
50 // at-large.
51
52 #define PRECISE_USER_KERNEL_PMCS PRECISE_USER_KERNEL_TIME
53
54 // On non-release kernels, allow precise PMC (instructions, cycles) updates to
55 // be disabled for performance characterization.
56 #if PRECISE_USER_KERNEL_PMCS && (DEVELOPMENT || DEBUG)
57 #define PRECISE_USER_KERNEL_PMC_TUNABLE 1
58
59 TUNABLE(bool, no_precise_pmcs, "-no-precise-pmcs", false);
60 #endif // PRECISE_USER_KERNEL_PMCS
61
62 #if !PRECISE_USER_KERNEL_TIME
63 #define PRECISE_TIME_FATAL_FUNC OS_NORETURN
64 #define PRECISE_TIME_ONLY_FUNC OS_UNUSED
65 #else // !PRECISE_USER_KERNEL_TIME
66 #define PRECISE_TIME_FATAL_FUNC
67 #define PRECISE_TIME_ONLY_FUNC
68 #endif // PRECISE_USER_KERNEL_TIME
69
70 #if !PRECISE_USER_KERNEL_PMCS
71 #define PRECISE_PMCS_ONLY_FUNC OS_UNUSED
72 #else // !PRECISE_PMCS_ONLY_FUNC
73 #define PRECISE_PMCS_ONLY_FUNC
74 #endif // PRECISE_USER_KERNEL_PMCS
75
76 #if HAS_CPU_DPE_COUNTER
77 // Only certain platforms have DPE counters.
78 #define RECOUNT_ENERGY CONFIG_PERVASIVE_ENERGY
79 #else // HAS_CPU_DPE_COUNTER
80 #define RECOUNT_ENERGY 0
81 #endif // !HAS_CPU_DPE_COUNTER
82
83 // Topography helpers.
84 size_t recount_topo_count(recount_topo_t topo);
85 static bool recount_topo_matches_cpu_kind(recount_topo_t topo,
86 recount_cpu_kind_t kind, size_t idx);
87 static size_t recount_topo_index(recount_topo_t topo, processor_t processor);
88 static size_t recount_convert_topo_index(recount_topo_t from, recount_topo_t to,
89 size_t i);
90
91 // Prevent counter updates before the system is ready.
92 __security_const_late bool recount_started = false;
93
94 // Lookup table that matches CPU numbers (indices) to their track index.
95 __security_const_late uint8_t _topo_cpu_kinds[MAX_CPUS] = { 0 };
96
97 __startup_func
98 static void
recount_startup(void)99 recount_startup(void)
100 {
101 #if __AMP__
102 unsigned int cpu_count = ml_get_cpu_count();
103 const ml_topology_info_t *topo_info = ml_get_topology_info();
104 for (unsigned int i = 0; i < cpu_count; i++) {
105 cluster_type_t type = topo_info->cpus[i].cluster_type;
106 uint8_t cluster_i = (type == CLUSTER_TYPE_P) ? RCT_CPU_PERFORMANCE :
107 RCT_CPU_EFFICIENCY;
108 _topo_cpu_kinds[i] = cluster_i;
109 }
110 #endif // __AMP__
111
112 recount_started = true;
113 }
114
115 STARTUP(PERCPU, STARTUP_RANK_LAST, recount_startup);
116
117 #pragma mark - tracks
118
119 RECOUNT_PLAN_DEFINE(recount_thread_plan, RCT_TOPO_CPU_KIND);
120 RECOUNT_PLAN_DEFINE(recount_task_plan, RCT_TOPO_CPU);
121 RECOUNT_PLAN_DEFINE(recount_task_terminated_plan, RCT_TOPO_CPU_KIND);
122 RECOUNT_PLAN_DEFINE(recount_coalition_plan, RCT_TOPO_CPU_KIND);
123 RECOUNT_PLAN_DEFINE(recount_processor_plan, RCT_TOPO_SYSTEM);
124
125 OS_ALWAYS_INLINE
126 static inline uint64_t
recount_timestamp_speculative(void)127 recount_timestamp_speculative(void)
128 {
129 #if __arm__ || __arm64__
130 return ml_get_speculative_timebase();
131 #else // __arm__ || __arm64__
132 return mach_absolute_time();
133 #endif // !__arm__ && !__arm64__
134 }
135
136 void
recount_snapshot_speculative(struct recount_snap * snap)137 recount_snapshot_speculative(struct recount_snap *snap)
138 {
139 snap->rsn_time_mach = recount_timestamp_speculative();
140 #if CONFIG_PERVASIVE_CPI
141 mt_cur_cpu_cycles_instrs_speculative(&snap->rsn_cycles, &snap->rsn_insns);
142 #endif // CONFIG_PERVASIVE_CPI
143 }
144
145 void
recount_snapshot(struct recount_snap * snap)146 recount_snapshot(struct recount_snap *snap)
147 {
148 #if __arm__ || __arm64__
149 __builtin_arm_isb(ISB_SY);
150 #endif // __arm__ || __arm64__
151 recount_snapshot_speculative(snap);
152 }
153
154 struct recount_snap PERCPU_DATA(_snaps_percpu);
155
156 // A simple sequence lock implementation.
157
158 static void
_seqlock_shared_lock_slowpath(const uint32_t * lck,uint32_t gen)159 _seqlock_shared_lock_slowpath(const uint32_t *lck, uint32_t gen)
160 {
161 disable_preemption();
162 do {
163 gen = hw_wait_while_equals32((uint32_t *)(uintptr_t)lck, gen);
164 } while (__improbable((gen & 1) != 0));
165 os_atomic_thread_fence(acquire);
166 enable_preemption();
167 }
168
169 static uintptr_t
_seqlock_shared_lock(const uint32_t * lck)170 _seqlock_shared_lock(const uint32_t *lck)
171 {
172 uint32_t gen = os_atomic_load(lck, acquire);
173 if (__improbable((gen & 1) != 0)) {
174 _seqlock_shared_lock_slowpath(lck, gen);
175 }
176 return gen;
177 }
178
179 static bool
_seqlock_shared_try_unlock(const uint32_t * lck,uintptr_t on_enter)180 _seqlock_shared_try_unlock(const uint32_t *lck, uintptr_t on_enter)
181 {
182 return os_atomic_load(lck, acquire) == on_enter;
183 }
184
185 static void
_seqlock_excl_lock_relaxed(uint32_t * lck)186 _seqlock_excl_lock_relaxed(uint32_t *lck)
187 {
188 __assert_only uintptr_t new = os_atomic_inc(lck, relaxed);
189 assertf((new & 1) == 1, "invalid seqlock in exclusive lock");
190 }
191
192 static void
_seqlock_excl_commit(void)193 _seqlock_excl_commit(void)
194 {
195 os_atomic_thread_fence(release);
196 }
197
198 static void
_seqlock_excl_unlock_relaxed(uint32_t * lck)199 _seqlock_excl_unlock_relaxed(uint32_t *lck)
200 {
201 __assert_only uint32_t new = os_atomic_inc(lck, relaxed);
202 assertf((new & 1) == 0, "invalid seqlock in exclusive unlock");
203 }
204
205 static struct recount_track *
recount_update_start(struct recount_track * tracks,recount_topo_t topo,processor_t processor)206 recount_update_start(struct recount_track *tracks, recount_topo_t topo,
207 processor_t processor)
208 {
209 struct recount_track *track = &tracks[recount_topo_index(topo, processor)];
210 _seqlock_excl_lock_relaxed(&track->rt_sync);
211 return track;
212 }
213
214 #if RECOUNT_ENERGY
215
216 static struct recount_track *
recount_update_single_start(struct recount_track * tracks,recount_topo_t topo,processor_t processor)217 recount_update_single_start(struct recount_track *tracks, recount_topo_t topo,
218 processor_t processor)
219 {
220 return &tracks[recount_topo_index(topo, processor)];
221 }
222
223 #endif // RECOUNT_ENERGY
224
225 static void
recount_update_commit(void)226 recount_update_commit(void)
227 {
228 _seqlock_excl_commit();
229 }
230
231 static void
recount_update_end(struct recount_track * track)232 recount_update_end(struct recount_track *track)
233 {
234 _seqlock_excl_unlock_relaxed(&track->rt_sync);
235 }
236
237 static const struct recount_usage *
recount_read_start(const struct recount_track * track,uintptr_t * on_enter)238 recount_read_start(const struct recount_track *track, uintptr_t *on_enter)
239 {
240 const struct recount_usage *stats = &track->rt_usage;
241 *on_enter = _seqlock_shared_lock(&track->rt_sync);
242 return stats;
243 }
244
245 static bool
recount_try_read_end(const struct recount_track * track,uintptr_t on_enter)246 recount_try_read_end(const struct recount_track *track, uintptr_t on_enter)
247 {
248 return _seqlock_shared_try_unlock(&track->rt_sync, on_enter);
249 }
250
251 static void
recount_read_track(struct recount_usage * stats,const struct recount_track * track)252 recount_read_track(struct recount_usage *stats,
253 const struct recount_track *track)
254 {
255 uintptr_t on_enter = 0;
256 do {
257 const struct recount_usage *vol_stats =
258 recount_read_start(track, &on_enter);
259 *stats = *vol_stats;
260 } while (!recount_try_read_end(track, on_enter));
261 }
262
263 static void
recount_usage_add(struct recount_usage * sum,const struct recount_usage * to_add)264 recount_usage_add(struct recount_usage *sum, const struct recount_usage *to_add)
265 {
266 sum->ru_user_time_mach += to_add->ru_user_time_mach;
267 sum->ru_system_time_mach += to_add->ru_system_time_mach;
268 #if CONFIG_PERVASIVE_CPI
269 sum->ru_cycles += to_add->ru_cycles;
270 sum->ru_instructions += to_add->ru_instructions;
271 #endif // CONFIG_PERVASIVE_CPI
272 #if CONFIG_PERVASIVE_ENERGY
273 sum->ru_energy_nj += to_add->ru_energy_nj;
274 #endif // CONFIG_PERVASIVE_CPI
275 }
276
277 OS_ALWAYS_INLINE
278 static inline void
recount_usage_add_snap(struct recount_usage * usage,uint64_t * add_time,struct recount_snap * snap)279 recount_usage_add_snap(struct recount_usage *usage, uint64_t *add_time,
280 struct recount_snap *snap)
281 {
282 *add_time += snap->rsn_time_mach;
283 #if CONFIG_PERVASIVE_CPI
284 usage->ru_cycles += snap->rsn_cycles;
285 usage->ru_instructions += snap->rsn_insns;
286 #else // CONFIG_PERVASIVE_CPI
287 #pragma unused(usage)
288 #endif // !CONFIG_PERVASIVE_CPI
289 }
290
291 static void
recount_rollup(recount_plan_t plan,const struct recount_track * tracks,recount_topo_t to_topo,struct recount_usage * stats)292 recount_rollup(recount_plan_t plan, const struct recount_track *tracks,
293 recount_topo_t to_topo, struct recount_usage *stats)
294 {
295 recount_topo_t from_topo = plan->rpl_topo;
296 size_t topo_count = recount_topo_count(from_topo);
297 struct recount_usage tmp = { 0 };
298 for (size_t i = 0; i < topo_count; i++) {
299 recount_read_track(&tmp, &tracks[i]);
300 size_t to_i = recount_convert_topo_index(from_topo, to_topo, i);
301 recount_usage_add(&stats[to_i], &tmp);
302 }
303 }
304
305 // This function must be run when counters cannot increment for the track, like from the current thread.
306 static void
recount_rollup_unsafe(recount_plan_t plan,struct recount_track * tracks,recount_topo_t to_topo,struct recount_usage * stats)307 recount_rollup_unsafe(recount_plan_t plan, struct recount_track *tracks,
308 recount_topo_t to_topo, struct recount_usage *stats)
309 {
310 recount_topo_t from_topo = plan->rpl_topo;
311 size_t topo_count = recount_topo_count(from_topo);
312 for (size_t i = 0; i < topo_count; i++) {
313 size_t to_i = recount_convert_topo_index(from_topo, to_topo, i);
314 recount_usage_add(&stats[to_i], &tracks[i].rt_usage);
315 }
316 }
317
318 void
recount_sum(recount_plan_t plan,const struct recount_track * tracks,struct recount_usage * sum)319 recount_sum(recount_plan_t plan, const struct recount_track *tracks,
320 struct recount_usage *sum)
321 {
322 recount_rollup(plan, tracks, RCT_TOPO_SYSTEM, sum);
323 }
324
325 void
recount_sum_unsafe(recount_plan_t plan,const struct recount_track * tracks,struct recount_usage * sum)326 recount_sum_unsafe(recount_plan_t plan, const struct recount_track *tracks,
327 struct recount_usage *sum)
328 {
329 recount_topo_t topo = plan->rpl_topo;
330 size_t topo_count = recount_topo_count(topo);
331 for (size_t i = 0; i < topo_count; i++) {
332 recount_usage_add(sum, &tracks[i].rt_usage);
333 }
334 }
335
336 void
recount_sum_and_isolate_cpu_kind(recount_plan_t plan,struct recount_track * tracks,recount_cpu_kind_t kind,struct recount_usage * sum,struct recount_usage * only_kind)337 recount_sum_and_isolate_cpu_kind(recount_plan_t plan,
338 struct recount_track *tracks, recount_cpu_kind_t kind,
339 struct recount_usage *sum, struct recount_usage *only_kind)
340 {
341 size_t topo_count = recount_topo_count(plan->rpl_topo);
342 struct recount_usage tmp = { 0 };
343 for (size_t i = 0; i < topo_count; i++) {
344 recount_read_track(&tmp, &tracks[i]);
345 recount_usage_add(sum, &tmp);
346 if (recount_topo_matches_cpu_kind(plan->rpl_topo, kind, i)) {
347 recount_usage_add(only_kind, &tmp);
348 }
349 }
350 }
351
352 static void
recount_sum_usage(recount_plan_t plan,const struct recount_usage * usages,struct recount_usage * sum)353 recount_sum_usage(recount_plan_t plan, const struct recount_usage *usages,
354 struct recount_usage *sum)
355 {
356 const size_t topo_count = recount_topo_count(plan->rpl_topo);
357 for (size_t i = 0; i < topo_count; i++) {
358 recount_usage_add(sum, &usages[i]);
359 }
360 }
361
362 void
recount_sum_usage_and_isolate_cpu_kind(recount_plan_t plan,struct recount_usage * usage,recount_cpu_kind_t kind,struct recount_usage * sum,struct recount_usage * only_kind)363 recount_sum_usage_and_isolate_cpu_kind(recount_plan_t plan,
364 struct recount_usage *usage, recount_cpu_kind_t kind,
365 struct recount_usage *sum, struct recount_usage *only_kind)
366 {
367 const size_t topo_count = recount_topo_count(plan->rpl_topo);
368 for (size_t i = 0; i < topo_count; i++) {
369 recount_usage_add(sum, &usage[i]);
370 if (only_kind && recount_topo_matches_cpu_kind(plan->rpl_topo, kind, i)) {
371 recount_usage_add(only_kind, &usage[i]);
372 }
373 }
374 }
375
376 void
recount_sum_perf_levels(recount_plan_t plan,struct recount_track * tracks,struct recount_usage * sums)377 recount_sum_perf_levels(recount_plan_t plan, struct recount_track *tracks,
378 struct recount_usage *sums)
379 {
380 recount_rollup(plan, tracks, RCT_TOPO_CPU_KIND, sums);
381 }
382
383 // Plan-specific helpers.
384
385 void
recount_coalition_rollup_task(struct recount_coalition * co,struct recount_task * tk)386 recount_coalition_rollup_task(struct recount_coalition *co,
387 struct recount_task *tk)
388 {
389 recount_rollup(&recount_task_plan, tk->rtk_lifetime,
390 recount_coalition_plan.rpl_topo, co->rco_exited);
391 }
392
393 void
recount_task_rollup_thread(struct recount_task * tk,const struct recount_thread * th)394 recount_task_rollup_thread(struct recount_task *tk,
395 const struct recount_thread *th)
396 {
397 recount_rollup(&recount_thread_plan, th->rth_lifetime,
398 recount_task_terminated_plan.rpl_topo, tk->rtk_terminated);
399 }
400
401 #pragma mark - scheduler
402
403 // `result = lhs - rhs` for snapshots.
404 OS_ALWAYS_INLINE
405 static void
recount_snap_diff(struct recount_snap * result,const struct recount_snap * lhs,const struct recount_snap * rhs)406 recount_snap_diff(struct recount_snap *result,
407 const struct recount_snap *lhs, const struct recount_snap *rhs)
408 {
409 assert3u(lhs->rsn_time_mach, >=, rhs->rsn_time_mach);
410 result->rsn_time_mach = lhs->rsn_time_mach - rhs->rsn_time_mach;
411 #if CONFIG_PERVASIVE_CPI
412 assert3u(lhs->rsn_insns, >=, rhs->rsn_insns);
413 assert3u(lhs->rsn_cycles, >=, rhs->rsn_cycles);
414 result->rsn_cycles = lhs->rsn_cycles - rhs->rsn_cycles;
415 result->rsn_insns = lhs->rsn_insns - rhs->rsn_insns;
416 #endif // CONFIG_PERVASIVE_CPI
417 }
418
419 void
recount_update_snap(struct recount_snap * cur)420 recount_update_snap(struct recount_snap *cur)
421 {
422 struct recount_snap *this_snap = PERCPU_GET(_snaps_percpu);
423 this_snap->rsn_time_mach = cur->rsn_time_mach;
424 #if CONFIG_PERVASIVE_CPI
425 this_snap->rsn_cycles = cur->rsn_cycles;
426 this_snap->rsn_insns = cur->rsn_insns;
427 #endif // CONFIG_PERVASIVE_CPI
428 }
429
430 static void
_fix_time_precision(struct recount_usage * usage)431 _fix_time_precision(struct recount_usage *usage)
432 {
433 #if PRECISE_USER_KERNEL_TIME
434 #pragma unused(usage)
435 #else // PRECISE_USER_KERNEL_TIME
436 // Attribute all time to user, as the system is only acting "on behalf
437 // of" user processes -- a bit sketchy.
438 usage->ru_user_time_mach += usage->ru_system_time_mach;
439 usage->ru_system_time_mach = 0;
440 #endif // !PRECISE_USER_KERNEL_TIME
441 }
442
443 void
recount_current_thread_usage(struct recount_usage * usage)444 recount_current_thread_usage(struct recount_usage *usage)
445 {
446 assert(ml_get_interrupts_enabled() == FALSE);
447 thread_t thread = current_thread();
448 struct recount_snap snap = { 0 };
449 recount_snapshot(&snap);
450 recount_sum_unsafe(&recount_thread_plan, thread->th_recount.rth_lifetime,
451 usage);
452 struct recount_snap *last = PERCPU_GET(_snaps_percpu);
453 struct recount_snap diff = { 0 };
454 recount_snap_diff(&diff, &snap, last);
455 recount_usage_add_snap(usage, &usage->ru_system_time_mach, &diff);
456 _fix_time_precision(usage);
457 }
458
459 void
recount_current_thread_usage_perf_only(struct recount_usage * usage,struct recount_usage * usage_perf_only)460 recount_current_thread_usage_perf_only(struct recount_usage *usage,
461 struct recount_usage *usage_perf_only)
462 {
463 struct recount_usage usage_perf_levels[RCT_CPU_KIND_COUNT] = { 0 };
464 recount_current_thread_perf_level_usage(usage_perf_levels);
465 recount_sum_usage(&recount_thread_plan, usage_perf_levels, usage);
466 *usage_perf_only = usage_perf_levels[RCT_CPU_PERFORMANCE];
467 _fix_time_precision(usage);
468 _fix_time_precision(usage_perf_only);
469 }
470
471 void
recount_thread_perf_level_usage(struct thread * thread,struct recount_usage * usage_levels)472 recount_thread_perf_level_usage(struct thread *thread,
473 struct recount_usage *usage_levels)
474 {
475 recount_rollup(&recount_thread_plan, thread->th_recount.rth_lifetime,
476 RCT_TOPO_CPU_KIND, usage_levels);
477 size_t topo_count = recount_topo_count(RCT_TOPO_CPU_KIND);
478 for (size_t i = 0; i < topo_count; i++) {
479 _fix_time_precision(&usage_levels[i]);
480 }
481 }
482
483 void
recount_current_thread_perf_level_usage(struct recount_usage * usage_levels)484 recount_current_thread_perf_level_usage(struct recount_usage *usage_levels)
485 {
486 assert(ml_get_interrupts_enabled() == FALSE);
487 thread_t thread = current_thread();
488 struct recount_snap snap = { 0 };
489 recount_snapshot(&snap);
490 recount_rollup_unsafe(&recount_thread_plan, thread->th_recount.rth_lifetime,
491 RCT_TOPO_CPU_KIND, usage_levels);
492 struct recount_snap *last = PERCPU_GET(_snaps_percpu);
493 struct recount_snap diff = { 0 };
494 recount_snap_diff(&diff, &snap, last);
495 size_t cur_i = recount_topo_index(RCT_TOPO_CPU_KIND, current_processor());
496 struct recount_usage *cur_usage = &usage_levels[cur_i];
497 recount_usage_add_snap(cur_usage, &cur_usage->ru_system_time_mach, &diff);
498 size_t topo_count = recount_topo_count(RCT_TOPO_CPU_KIND);
499 for (size_t i = 0; i < topo_count; i++) {
500 _fix_time_precision(&usage_levels[i]);
501 }
502 }
503
504 uint64_t
recount_current_thread_energy_nj(void)505 recount_current_thread_energy_nj(void)
506 {
507 #if RECOUNT_ENERGY
508 assert(ml_get_interrupts_enabled() == FALSE);
509 thread_t thread = current_thread();
510 size_t topo_count = recount_topo_count(recount_thread_plan.rpl_topo);
511 uint64_t energy_nj = 0;
512 for (size_t i = 0; i < topo_count; i++) {
513 energy_nj += thread->th_recount.rth_lifetime[i].rt_usage.ru_energy_nj;
514 }
515 return energy_nj;
516 #else // RECOUNT_ENERGY
517 return 0;
518 #endif // !RECOUNT_ENERGY
519 }
520
521 static void
_times_add_usage(struct recount_times_mach * times,struct recount_usage * usage)522 _times_add_usage(struct recount_times_mach *times, struct recount_usage *usage)
523 {
524 times->rtm_user += usage->ru_user_time_mach;
525 #if PRECISE_USER_KERNEL_TIME
526 times->rtm_system += usage->ru_system_time_mach;
527 #else // PRECISE_USER_KERNEL_TIME
528 times->rtm_user += usage->ru_system_time_mach;
529 #endif // !PRECISE_USER_KERNEL_TIME
530 }
531
532 struct recount_times_mach
recount_thread_times(struct thread * thread)533 recount_thread_times(struct thread *thread)
534 {
535 size_t topo_count = recount_topo_count(recount_thread_plan.rpl_topo);
536 struct recount_times_mach times = { 0 };
537 for (size_t i = 0; i < topo_count; i++) {
538 _times_add_usage(×, &thread->th_recount.rth_lifetime[i].rt_usage);
539 }
540 return times;
541 }
542
543 uint64_t
recount_thread_time_mach(struct thread * thread)544 recount_thread_time_mach(struct thread *thread)
545 {
546 struct recount_times_mach times = recount_thread_times(thread);
547 return times.rtm_user + times.rtm_system;
548 }
549
550 static uint64_t
_time_since_last_snapshot(void)551 _time_since_last_snapshot(void)
552 {
553 struct recount_snap *last = PERCPU_GET(_snaps_percpu);
554 uint64_t cur_time = mach_absolute_time();
555 return cur_time - last->rsn_time_mach;
556 }
557
558 uint64_t
recount_current_thread_time_mach(void)559 recount_current_thread_time_mach(void)
560 {
561 assert(ml_get_interrupts_enabled() == FALSE);
562 uint64_t previous_time = recount_thread_time_mach(current_thread());
563 return previous_time + _time_since_last_snapshot();
564 }
565
566 struct recount_times_mach
recount_current_thread_times(void)567 recount_current_thread_times(void)
568 {
569 assert(ml_get_interrupts_enabled() == FALSE);
570 struct recount_times_mach times = recount_thread_times(
571 current_thread());
572 #if PRECISE_USER_KERNEL_TIME
573 times.rtm_user += _time_since_last_snapshot();
574 #else // PRECISE_USER_KERNEL_TIME
575 times.rtm_system += _time_since_last_snapshot();
576 #endif // !PRECISE_USER_KERNEL_TIME
577 return times;
578 }
579
580 void
recount_thread_usage(thread_t thread,struct recount_usage * usage)581 recount_thread_usage(thread_t thread, struct recount_usage *usage)
582 {
583 recount_sum(&recount_thread_plan, thread->th_recount.rth_lifetime, usage);
584 _fix_time_precision(usage);
585 }
586
587 void
recount_current_task_usage(struct recount_usage * usage)588 recount_current_task_usage(struct recount_usage *usage)
589 {
590 task_t task = current_task();
591 struct recount_track *tracks = task->tk_recount.rtk_lifetime;
592 recount_sum(&recount_task_plan, tracks, usage);
593 _fix_time_precision(usage);
594 }
595
596 void
recount_current_task_usage_perf_only(struct recount_usage * usage,struct recount_usage * usage_perf_only)597 recount_current_task_usage_perf_only(struct recount_usage *usage,
598 struct recount_usage *usage_perf_only)
599 {
600 task_t task = current_task();
601 struct recount_track *tracks = task->tk_recount.rtk_lifetime;
602 recount_sum_and_isolate_cpu_kind(&recount_task_plan,
603 tracks, RCT_CPU_PERFORMANCE, usage, usage_perf_only);
604 _fix_time_precision(usage);
605 _fix_time_precision(usage_perf_only);
606 }
607
608 void
recount_task_times_perf_only(struct task * task,struct recount_times_mach * sum,struct recount_times_mach * sum_perf_only)609 recount_task_times_perf_only(struct task *task,
610 struct recount_times_mach *sum, struct recount_times_mach *sum_perf_only)
611 {
612 const recount_topo_t topo = recount_task_plan.rpl_topo;
613 const size_t topo_count = recount_topo_count(topo);
614 struct recount_track *tracks = task->tk_recount.rtk_lifetime;
615 for (size_t i = 0; i < topo_count; i++) {
616 struct recount_usage *usage = &tracks[i].rt_usage;
617 _times_add_usage(sum, usage);
618 if (recount_topo_matches_cpu_kind(topo, RCT_CPU_PERFORMANCE, i)) {
619 _times_add_usage(sum_perf_only, usage);
620 }
621 }
622 }
623
624 void
recount_task_terminated_usage(task_t task,struct recount_usage * usage)625 recount_task_terminated_usage(task_t task, struct recount_usage *usage)
626 {
627 recount_sum_usage(&recount_task_terminated_plan,
628 task->tk_recount.rtk_terminated, usage);
629 _fix_time_precision(usage);
630 }
631
632 struct recount_times_mach
recount_task_terminated_times(struct task * task)633 recount_task_terminated_times(struct task *task)
634 {
635 size_t topo_count = recount_topo_count(recount_task_terminated_plan.rpl_topo);
636 struct recount_times_mach times = { 0 };
637 for (size_t i = 0; i < topo_count; i++) {
638 _times_add_usage(×, &task->tk_recount.rtk_terminated[i]);
639 }
640 return times;
641 }
642
643 void
recount_task_terminated_usage_perf_only(task_t task,struct recount_usage * usage,struct recount_usage * perf_only)644 recount_task_terminated_usage_perf_only(task_t task,
645 struct recount_usage *usage, struct recount_usage *perf_only)
646 {
647 recount_sum_usage_and_isolate_cpu_kind(&recount_task_terminated_plan,
648 task->tk_recount.rtk_terminated, RCT_CPU_PERFORMANCE, usage, perf_only);
649 _fix_time_precision(usage);
650 _fix_time_precision(perf_only);
651 }
652
653 void
recount_task_usage_perf_only(task_t task,struct recount_usage * sum,struct recount_usage * sum_perf_only)654 recount_task_usage_perf_only(task_t task, struct recount_usage *sum,
655 struct recount_usage *sum_perf_only)
656 {
657 recount_sum_and_isolate_cpu_kind(&recount_task_plan,
658 task->tk_recount.rtk_lifetime, RCT_CPU_PERFORMANCE, sum, sum_perf_only);
659 _fix_time_precision(sum);
660 _fix_time_precision(sum_perf_only);
661 }
662
663 void
recount_task_usage(task_t task,struct recount_usage * usage)664 recount_task_usage(task_t task, struct recount_usage *usage)
665 {
666 recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, usage);
667 _fix_time_precision(usage);
668 }
669
670 struct recount_times_mach
recount_task_times(struct task * task)671 recount_task_times(struct task *task)
672 {
673 size_t topo_count = recount_topo_count(recount_task_plan.rpl_topo);
674 struct recount_times_mach times = { 0 };
675 for (size_t i = 0; i < topo_count; i++) {
676 _times_add_usage(×, &task->tk_recount.rtk_lifetime[i].rt_usage);
677 }
678 return times;
679 }
680
681 uint64_t
recount_task_energy_nj(struct task * task)682 recount_task_energy_nj(struct task *task)
683 {
684 #if RECOUNT_ENERGY
685 size_t topo_count = recount_topo_count(recount_task_plan.rpl_topo);
686 uint64_t energy = 0;
687 for (size_t i = 0; i < topo_count; i++) {
688 energy += task->tk_recount.rtk_lifetime[i].rt_usage.ru_energy_nj;
689 }
690 return energy;
691 #else // RECOUNT_ENERGY
692 #pragma unused(task)
693 return 0;
694 #endif // !RECOUNT_ENERGY
695 }
696
697 void
recount_coalition_usage_perf_only(struct recount_coalition * coal,struct recount_usage * sum,struct recount_usage * sum_perf_only)698 recount_coalition_usage_perf_only(struct recount_coalition *coal,
699 struct recount_usage *sum, struct recount_usage *sum_perf_only)
700 {
701 recount_sum_usage_and_isolate_cpu_kind(&recount_coalition_plan,
702 coal->rco_exited, RCT_CPU_PERFORMANCE, sum, sum_perf_only);
703 _fix_time_precision(sum);
704 _fix_time_precision(sum_perf_only);
705 }
706
707 OS_ALWAYS_INLINE
708 static void
recount_absorb_snap(struct recount_snap * to_add,thread_t thread,task_t task,processor_t processor,bool from_user)709 recount_absorb_snap(struct recount_snap *to_add, thread_t thread, task_t task,
710 processor_t processor, bool from_user)
711 {
712 // Idle threads do not attribute their usage back to the task or processor,
713 // as the time is not spent "running."
714 //
715 // The processor-level metrics include idle time, instead, as the idle time
716 // needs to be read as up-to-date from `recount_processor_usage`.
717
718 bool was_idle = (thread->options & TH_OPT_IDLE_THREAD) != 0;
719
720 struct recount_track *th_track = recount_update_start(
721 thread->th_recount.rth_lifetime, recount_thread_plan.rpl_topo,
722 processor);
723 struct recount_track *tk_track = was_idle ? NULL : recount_update_start(
724 task->tk_recount.rtk_lifetime, recount_task_plan.rpl_topo,
725 processor);
726 struct recount_track *pr_track = was_idle ? NULL : recount_update_start(
727 &processor->pr_recount.rpr_active, recount_processor_plan.rpl_topo,
728 processor);
729 recount_update_commit();
730
731 uint64_t *th_time = NULL, *tk_time = NULL, *pr_time = NULL;
732 if (from_user) {
733 th_time = &th_track->rt_usage.ru_user_time_mach;
734 tk_time = &tk_track->rt_usage.ru_user_time_mach;
735 pr_time = &pr_track->rt_usage.ru_user_time_mach;
736 } else {
737 th_time = &th_track->rt_usage.ru_system_time_mach;
738 tk_time = &tk_track->rt_usage.ru_system_time_mach;
739 pr_time = &pr_track->rt_usage.ru_system_time_mach;
740 }
741
742 recount_usage_add_snap(&th_track->rt_usage, th_time, to_add);
743 if (!was_idle) {
744 recount_usage_add_snap(&tk_track->rt_usage, tk_time, to_add);
745 recount_usage_add_snap(&pr_track->rt_usage, pr_time, to_add);
746 }
747
748 recount_update_commit();
749 recount_update_end(th_track);
750 if (!was_idle) {
751 recount_update_end(tk_track);
752 recount_update_end(pr_track);
753 }
754 }
755
756 void
recount_switch_thread(struct recount_snap * cur,struct thread * off_thread,struct task * off_task)757 recount_switch_thread(struct recount_snap *cur, struct thread *off_thread,
758 struct task *off_task)
759 {
760 assert(ml_get_interrupts_enabled() == FALSE);
761
762 if (__improbable(!recount_started)) {
763 return;
764 }
765
766 processor_t processor = current_processor();
767
768 struct recount_snap *last = PERCPU_GET(_snaps_percpu);
769 struct recount_snap diff = { 0 };
770 recount_snap_diff(&diff, cur, last);
771 recount_absorb_snap(&diff, off_thread, off_task, processor, false);
772 recount_update_snap(cur);
773 }
774
775 void
recount_add_energy(struct thread * off_thread,struct task * off_task,uint64_t energy_nj)776 recount_add_energy(struct thread *off_thread, struct task *off_task,
777 uint64_t energy_nj)
778 {
779 #if RECOUNT_ENERGY
780 assert(ml_get_interrupts_enabled() == FALSE);
781 if (__improbable(!recount_started)) {
782 return;
783 }
784
785 bool was_idle = (off_thread->options & TH_OPT_IDLE_THREAD) != 0;
786 processor_t processor = current_processor();
787
788 struct recount_track *th_track = recount_update_single_start(
789 off_thread->th_recount.rth_lifetime, recount_thread_plan.rpl_topo,
790 processor);
791 struct recount_track *tk_track = was_idle ? NULL :
792 recount_update_single_start(off_task->tk_recount.rtk_lifetime,
793 recount_task_plan.rpl_topo, processor);
794 struct recount_track *pr_track = was_idle ? NULL :
795 recount_update_single_start(&processor->pr_recount.rpr_active,
796 recount_processor_plan.rpl_topo, processor);
797
798 th_track->rt_usage.ru_energy_nj += energy_nj;
799
800 if (!was_idle) {
801 tk_track->rt_usage.ru_energy_nj += energy_nj;
802 pr_track->rt_usage.ru_energy_nj += energy_nj;
803 }
804 #else // RECOUNT_ENERGY
805 #pragma unused(off_thread, off_task, energy_nj)
806 #endif // !RECOUNT_ENERGY
807 }
808
809
810 #define MT_KDBG_IC_CPU_CSWITCH \
811 KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_INSTRS_CYCLES, 1)
812
813 void
recount_log_switch_thread(const struct recount_snap * snap)814 recount_log_switch_thread(const struct recount_snap *snap)
815 {
816 #if CONFIG_PERVASIVE_CPI
817 if (kdebug_debugid_explicitly_enabled(MT_KDBG_IC_CPU_CSWITCH)) {
818 // In Monotonic's event hierarchy for backwards-compatibility.
819 KDBG_RELEASE(MT_KDBG_IC_CPU_CSWITCH, snap->rsn_insns, snap->rsn_cycles);
820 }
821 #else // CONFIG_PERVASIVE_CPI
822 #pragma unused(snap)
823 #endif // CONFIG_PERVASIVE_CPI
824 }
825
826 PRECISE_TIME_ONLY_FUNC
827 static void
recount_precise_transition_diff(struct recount_snap * diff,struct recount_snap * last,struct recount_snap * cur)828 recount_precise_transition_diff(struct recount_snap *diff,
829 struct recount_snap *last, struct recount_snap *cur)
830 {
831 #if PRECISE_USER_KERNEL_PMCS
832 #if PRECISE_USER_KERNEL_PMC_TUNABLE
833 // The full `recount_snapshot_speculative` shouldn't get PMCs with a tunable
834 // in this configuration.
835 if (__improbable(no_precise_pmcs)) {
836 cur->rsn_time_mach = recount_timestamp_speculative();
837 diff->rsn_time_mach = cur->rsn_time_mach - last->rsn_time_mach;
838 } else
839 #endif // PRECISE_USER_KERNEL_PMC_TUNABLE
840 {
841 recount_snapshot_speculative(cur);
842 recount_snap_diff(diff, cur, last);
843 }
844 #else // PRECISE_USER_KERNEL_PMCS
845 cur->rsn_time_mach = recount_timestamp_speculative();
846 diff->rsn_time_mach = cur->rsn_time_mach - last->rsn_time_mach;
847 #endif // !PRECISE_USER_KERNEL_PMCS
848 }
849
850 // Must be called with interrupts disabled (no assertion because this is on the
851 // kernel-user transition boundary, performance sensitive).
852 PRECISE_TIME_FATAL_FUNC
853 static uint64_t
recount_kernel_transition(bool from_user)854 recount_kernel_transition(bool from_user)
855 {
856 #if PRECISE_USER_KERNEL_TIME
857 thread_t thread = current_thread();
858
859 task_t task = get_threadtask(thread);
860 processor_t processor = current_processor();
861
862 struct recount_snap *last = PERCPU_GET(_snaps_percpu);
863 struct recount_snap diff = { 0 };
864 struct recount_snap cur = { 0 };
865 recount_precise_transition_diff(&diff, last, &cur);
866 recount_absorb_snap(&diff, thread, task, processor, from_user);
867 recount_update_snap(&cur);
868
869 return cur.rsn_time_mach;
870 #else // PRECISE_USER_KERNEL_TIME
871 #pragma unused(from_user)
872 panic("recount: kernel transition called with precise time off");
873 #endif // !PRECISE_USER_KERNEL_TIME
874 }
875
876 PRECISE_TIME_FATAL_FUNC
877 void
recount_leave_user(void)878 recount_leave_user(void)
879 {
880 recount_kernel_transition(true);
881 }
882
883 PRECISE_TIME_FATAL_FUNC
884 void
recount_enter_user(void)885 recount_enter_user(void)
886 {
887 recount_kernel_transition(false);
888 }
889
890 #if __x86_64__
891
892 void
recount_enter_intel_interrupt(x86_saved_state_t * state)893 recount_enter_intel_interrupt(x86_saved_state_t *state)
894 {
895 // The low bits of `%cs` being set indicate interrupt was delivered while
896 // executing in user space.
897 bool from_user = (is_saved_state64(state) ? state->ss_64.isf.cs :
898 state->ss_32.cs) & 0x03;
899 uint64_t timestamp = recount_kernel_transition(from_user);
900 current_cpu_datap()->cpu_int_event_time = timestamp;
901 }
902
903 void
recount_leave_intel_interrupt(void)904 recount_leave_intel_interrupt(void)
905 {
906 // XXX This is not actually entering user space, but it does update the
907 // system timer, which is desirable.
908 recount_enter_user();
909 current_cpu_datap()->cpu_int_event_time = 0;
910 }
911
912 #endif // __x86_64__
913
914 // Set on rpr_state_last_abs_time when the processor is idle.
915 #define RCT_PR_IDLING (0x1ULL << 63)
916
917 void
recount_processor_idle(struct recount_processor * pr,struct recount_snap * snap)918 recount_processor_idle(struct recount_processor *pr, struct recount_snap *snap)
919 {
920 __assert_only uint64_t state_time = os_atomic_load_wide(
921 &pr->rpr_state_last_abs_time, relaxed);
922 assert((state_time & RCT_PR_IDLING) == 0);
923 assert((snap->rsn_time_mach & RCT_PR_IDLING) == 0);
924 uint64_t new_state_stamp = RCT_PR_IDLING | snap->rsn_time_mach;
925 os_atomic_store_wide(&pr->rpr_state_last_abs_time, new_state_stamp,
926 relaxed);
927 }
928
929 OS_PURE OS_ALWAYS_INLINE
930 static inline uint64_t
_state_time(uint64_t state_stamp)931 _state_time(uint64_t state_stamp)
932 {
933 return state_stamp & ~(RCT_PR_IDLING);
934 }
935
936 void
recount_processor_init(processor_t processor)937 recount_processor_init(processor_t processor)
938 {
939 #if __AMP__
940 processor->pr_recount.rpr_cpu_kind_index =
941 processor->processor_set->pset_cluster_type == PSET_AMP_P ? 1 : 0;
942 #else // __AMP__
943 #pragma unused(processor)
944 #endif // !__AMP__
945 }
946
947 void
recount_processor_run(struct recount_processor * pr,struct recount_snap * snap)948 recount_processor_run(struct recount_processor *pr, struct recount_snap *snap)
949 {
950 uint64_t state = os_atomic_load_wide(&pr->rpr_state_last_abs_time, relaxed);
951 assert(state == 0 || (state & RCT_PR_IDLING) == RCT_PR_IDLING);
952 assert((snap->rsn_time_mach & RCT_PR_IDLING) == 0);
953 uint64_t new_state_stamp = snap->rsn_time_mach;
954 pr->rpr_idle_time_mach += snap->rsn_time_mach - _state_time(state);
955 os_atomic_store_wide(&pr->rpr_state_last_abs_time, new_state_stamp,
956 relaxed);
957 }
958
959 void
recount_processor_usage(struct recount_processor * pr,struct recount_usage * usage,uint64_t * idle_time_out)960 recount_processor_usage(struct recount_processor *pr,
961 struct recount_usage *usage, uint64_t *idle_time_out)
962 {
963 recount_sum(&recount_processor_plan, &pr->rpr_active, usage);
964 _fix_time_precision(usage);
965
966 uint64_t idle_time = pr->rpr_idle_time_mach;
967 uint64_t idle_stamp = os_atomic_load_wide(&pr->rpr_state_last_abs_time,
968 relaxed);
969 bool idle = (idle_stamp & RCT_PR_IDLING) == RCT_PR_IDLING;
970 if (idle) {
971 // Since processors can idle for some time without an update, make sure
972 // the idle time is up-to-date with respect to the caller.
973 idle_time += mach_absolute_time() - _state_time(idle_stamp);
974 }
975 *idle_time_out = idle_time;
976 }
977
978 bool
recount_task_thread_perf_level_usage(struct task * task,uint64_t tid,struct recount_usage * usage_levels)979 recount_task_thread_perf_level_usage(struct task *task, uint64_t tid,
980 struct recount_usage *usage_levels)
981 {
982 thread_t thread = task_findtid(task, tid);
983 if (thread != THREAD_NULL) {
984 if (thread == current_thread()) {
985 boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
986 recount_current_thread_perf_level_usage(usage_levels);
987 ml_set_interrupts_enabled(interrupt_state);
988 } else {
989 recount_thread_perf_level_usage(thread, usage_levels);
990 }
991 }
992 return thread != THREAD_NULL;
993 }
994
995 #pragma mark - utilities
996
997 // For rolling up counts, convert an index from one topography to another.
998 static size_t
recount_convert_topo_index(recount_topo_t from,recount_topo_t to,size_t i)999 recount_convert_topo_index(recount_topo_t from, recount_topo_t to, size_t i)
1000 {
1001 if (from == to) {
1002 return i;
1003 } else if (to == RCT_TOPO_SYSTEM) {
1004 return 0;
1005 } else if (from == RCT_TOPO_CPU) {
1006 assertf(to == RCT_TOPO_CPU_KIND,
1007 "recount: cannot convert from CPU topography to %d", to);
1008 return _topo_cpu_kinds[i];
1009 } else {
1010 panic("recount: unexpected rollup request from %d to %d", from, to);
1011 }
1012 }
1013
1014 // Get the track index of the provided processor and topography.
1015 OS_ALWAYS_INLINE
1016 static size_t
recount_topo_index(recount_topo_t topo,processor_t processor)1017 recount_topo_index(recount_topo_t topo, processor_t processor)
1018 {
1019 switch (topo) {
1020 case RCT_TOPO_SYSTEM:
1021 return 0;
1022 case RCT_TOPO_CPU:
1023 return processor->cpu_id;
1024 case RCT_TOPO_CPU_KIND:
1025 #if __AMP__
1026 return processor->pr_recount.rpr_cpu_kind_index;
1027 #else // __AMP__
1028 return 0;
1029 #endif // !__AMP__
1030 default:
1031 panic("recount: invalid topology %u to index", topo);
1032 }
1033 }
1034
1035 // Return the number of tracks needed for a given topography.
1036 size_t
recount_topo_count(recount_topo_t topo)1037 recount_topo_count(recount_topo_t topo)
1038 {
1039 // Allow the compiler to reason about at least the system and CPU kind
1040 // counts.
1041 switch (topo) {
1042 case RCT_TOPO_SYSTEM:
1043 return 1;
1044
1045 case RCT_TOPO_CPU_KIND:
1046 #if __AMP__
1047 return 2;
1048 #else // __AMP__
1049 return 1;
1050 #endif // !__AMP__
1051
1052 case RCT_TOPO_CPU:
1053 #if __arm__ || __arm64__
1054 return ml_get_cpu_count();
1055 #else // __arm__ || __arm64__
1056 return ml_early_cpu_max_number() + 1;
1057 #endif // !__arm__ && !__arm64__
1058
1059 default:
1060 panic("recount: invalid topography %d", topo);
1061 }
1062 }
1063
1064 static bool
recount_topo_matches_cpu_kind(recount_topo_t topo,recount_cpu_kind_t kind,size_t idx)1065 recount_topo_matches_cpu_kind(recount_topo_t topo, recount_cpu_kind_t kind,
1066 size_t idx)
1067 {
1068 #if !__AMP__
1069 #pragma unused(kind, idx)
1070 #endif // !__AMP__
1071 switch (topo) {
1072 case RCT_TOPO_SYSTEM:
1073 return true;
1074
1075 case RCT_TOPO_CPU_KIND:
1076 #if __AMP__
1077 return kind == idx;
1078 #else // __AMP__
1079 return false;
1080 #endif // !__AMP__
1081
1082 case RCT_TOPO_CPU: {
1083 #if __AMP__
1084 return _topo_cpu_kinds[idx] == kind;
1085 #else // __AMP__
1086 return false;
1087 #endif // !__AMP__
1088 }
1089
1090 default:
1091 panic("recount: unexpected topography %d", topo);
1092 }
1093 }
1094
1095 struct recount_track *
recount_tracks_create(recount_plan_t plan)1096 recount_tracks_create(recount_plan_t plan)
1097 {
1098 return kalloc_type_tag(struct recount_track,
1099 recount_topo_count(plan->rpl_topo), Z_WAITOK | Z_ZERO | Z_NOFAIL,
1100 VM_KERN_MEMORY_RECOUNT);
1101 }
1102
1103 static void
recount_tracks_copy(recount_plan_t plan,struct recount_track * dst,struct recount_track * src)1104 recount_tracks_copy(recount_plan_t plan, struct recount_track *dst,
1105 struct recount_track *src)
1106 {
1107 size_t topo_count = recount_topo_count(plan->rpl_topo);
1108 for (size_t i = 0; i < topo_count; i++) {
1109 recount_read_track(&dst[i].rt_usage, &src[i]);
1110 }
1111 }
1112
1113 void
recount_tracks_destroy(recount_plan_t plan,struct recount_track * tracks)1114 recount_tracks_destroy(recount_plan_t plan, struct recount_track *tracks)
1115 {
1116 kfree_type(struct recount_track, recount_topo_count(plan->rpl_topo),
1117 tracks);
1118 }
1119
1120 void
recount_thread_init(struct recount_thread * th)1121 recount_thread_init(struct recount_thread *th)
1122 {
1123 th->rth_lifetime = recount_tracks_create(&recount_thread_plan);
1124 }
1125
1126 void
recount_thread_copy(struct recount_thread * dst,struct recount_thread * src)1127 recount_thread_copy(struct recount_thread *dst, struct recount_thread *src)
1128 {
1129 recount_tracks_copy(&recount_thread_plan, dst->rth_lifetime,
1130 src->rth_lifetime);
1131 }
1132
1133 void
recount_task_copy(struct recount_task * dst,const struct recount_task * src)1134 recount_task_copy(struct recount_task *dst, const struct recount_task *src)
1135 {
1136 recount_tracks_copy(&recount_task_plan, dst->rtk_lifetime,
1137 src->rtk_lifetime);
1138 }
1139
1140 void
recount_thread_deinit(struct recount_thread * th)1141 recount_thread_deinit(struct recount_thread *th)
1142 {
1143 recount_tracks_destroy(&recount_thread_plan, th->rth_lifetime);
1144 }
1145
1146 void
recount_task_init(struct recount_task * tk)1147 recount_task_init(struct recount_task *tk)
1148 {
1149 tk->rtk_lifetime = recount_tracks_create(&recount_task_plan);
1150 tk->rtk_terminated = recount_usage_alloc(
1151 recount_task_terminated_plan.rpl_topo);
1152 }
1153
1154 void
recount_task_deinit(struct recount_task * tk)1155 recount_task_deinit(struct recount_task *tk)
1156 {
1157 recount_tracks_destroy(&recount_task_plan, tk->rtk_lifetime);
1158 recount_usage_free(recount_task_terminated_plan.rpl_topo,
1159 tk->rtk_terminated);
1160 }
1161
1162 void
recount_coalition_init(struct recount_coalition * co)1163 recount_coalition_init(struct recount_coalition *co)
1164 {
1165 co->rco_exited = recount_usage_alloc(recount_coalition_plan.rpl_topo);
1166 }
1167
1168 void
recount_coalition_deinit(struct recount_coalition * co)1169 recount_coalition_deinit(struct recount_coalition *co)
1170 {
1171 recount_usage_free(recount_coalition_plan.rpl_topo, co->rco_exited);
1172 }
1173
1174 struct recount_usage *
recount_usage_alloc(recount_topo_t topo)1175 recount_usage_alloc(recount_topo_t topo)
1176 {
1177 return kalloc_type_tag(struct recount_usage, recount_topo_count(topo),
1178 Z_WAITOK | Z_ZERO | Z_NOFAIL, VM_KERN_MEMORY_RECOUNT);
1179 }
1180
1181 void
recount_usage_free(recount_topo_t topo,struct recount_usage * usage)1182 recount_usage_free(recount_topo_t topo, struct recount_usage *usage)
1183 {
1184 kfree_type(struct recount_usage, recount_topo_count(topo),
1185 usage);
1186 }
1187