1 /*
2 * Copyright (c) 2011-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * This file manages the timers used for on-CPU samples and PET.
31 *
32 * Each timer configured by a tool is represented by a kptimer structure.
33 * The timer calls present in each structure are used to schedule CPU-local
34 * timers. As each timer fires, that CPU samples itself and schedules another
35 * timer to fire at the next deadline. The first timer to fire across all CPUs
36 * determines that deadline. This causes the timers to fire at a consistent
37 * cadence.
38 *
39 * Traditional PET uses a timer call to wake up its sampling thread and take
40 * on-CPU samples.
41 *
42 * Synchronization for start and stop is provided by the ktrace subsystem lock.
43 * Global state is stored in a single struct, to ease debugging.
44 */
45
46 #include <mach/mach_types.h>
47 #include <kern/cpu_data.h> /* current_thread() */
48 #include <kern/kalloc.h>
49 #include <kern/timer_queue.h>
50 #include <libkern/section_keywords.h>
51 #include <stdatomic.h>
52 #include <sys/errno.h>
53 #include <sys/vm.h>
54 #include <sys/ktrace.h>
55
56 #include <machine/machine_routines.h>
57 #if defined(__x86_64__)
58 #include <i386/mp.h>
59 #endif /* defined(__x86_64__) */
60
61 #include <kperf/kperf.h>
62 #include <kperf/buffer.h>
63 #include <kperf/context.h>
64 #include <kperf/action.h>
65 #include <kperf/kptimer.h>
66 #include <kperf/pet.h>
67 #include <kperf/sample.h>
68
69 #define KPTIMER_PET_INACTIVE (999)
70 #define KPTIMER_MAX (8)
71
72 struct kptimer {
73 uint32_t kt_actionid;
74 uint64_t kt_period_abs;
75 /*
76 * The `kt_cur_deadline` field represents when the timer should next fire.
77 * It's used to synchronize between timers firing on each CPU. In the timer
78 * handler, each CPU will take the `kt_lock` and see if the
79 * `kt_cur_deadline` still needs to be updated for the timer fire. If so,
80 * it updates it and logs the timer fire event under the lock.
81 */
82 lck_spin_t kt_lock;
83 uint64_t kt_cur_deadline;
84
85 #if DEVELOPMENT || DEBUG
86 /*
87 * To be set by the timer leader as a debugging aid for timeouts, if kperf
88 * happens to be on-CPU when they occur.
89 */
90 uint64_t kt_fire_time;
91 #endif /* DEVELOPMENT || DEBUG */
92 };
93
94 static struct {
95 struct kptimer *g_timers;
96 uint64_t *g_cpu_deadlines;
97 unsigned int g_ntimers;
98 unsigned int g_pet_timerid;
99
100 bool g_setup:1;
101 bool g_pet_active:1;
102 bool g_started:1;
103
104 struct timer_call g_pet_timer;
105 } kptimer = {
106 .g_pet_timerid = KPTIMER_PET_INACTIVE,
107 };
108
109 SECURITY_READ_ONLY_LATE(static uint64_t) kptimer_minperiods_mtu[KTPL_MAX];
110
111 /*
112 * Enforce a minimum timer period to prevent interrupt storms.
113 */
114 const uint64_t kptimer_minperiods_ns[KTPL_MAX] = {
115 #if defined(__x86_64__)
116 [KTPL_FG] = 20 * NSEC_PER_USEC, /* The minimum timer period in xnu, period. */
117 [KTPL_BG] = 1 * NSEC_PER_MSEC,
118 [KTPL_FG_PET] = 1 * NSEC_PER_MSEC,
119 [KTPL_BG_PET] = 1 * NSEC_PER_MSEC,
120 #elif defined(__arm64__)
121 [KTPL_FG] = 50 * NSEC_PER_USEC,
122 [KTPL_BG] = 1 * NSEC_PER_MSEC,
123 [KTPL_FG_PET] = 1 * NSEC_PER_MSEC,
124 [KTPL_BG_PET] = 1 * NSEC_PER_MSEC,
125 #else
126 #error unexpected architecture
127 #endif
128 };
129
130 static void kptimer_pet_handler(void * __unused param1, void * __unused param2);
131 static void kptimer_stop_cpu(processor_t processor);
132
133 void
kptimer_init(void)134 kptimer_init(void)
135 {
136 for (int i = 0; i < KTPL_MAX; i++) {
137 nanoseconds_to_absolutetime(kptimer_minperiods_ns[i],
138 &kptimer_minperiods_mtu[i]);
139 }
140 }
141
142 static void
kptimer_set_cpu_deadline(int cpuid,int timerid,uint64_t deadline)143 kptimer_set_cpu_deadline(int cpuid, int timerid, uint64_t deadline)
144 {
145 kptimer.g_cpu_deadlines[(cpuid * KPTIMER_MAX) + timerid] =
146 deadline;
147 }
148
149 static void
kptimer_setup(void)150 kptimer_setup(void)
151 {
152 if (kptimer.g_setup) {
153 return;
154 }
155 static lck_grp_t kptimer_lock_grp;
156 lck_grp_init(&kptimer_lock_grp, "kptimer", LCK_GRP_ATTR_NULL);
157
158 const size_t timers_size = KPTIMER_MAX * sizeof(struct kptimer);
159 kptimer.g_timers = zalloc_permanent_tag(timers_size,
160 ZALIGN(struct kptimer), VM_KERN_MEMORY_DIAG);
161 for (int i = 0; i < KPTIMER_MAX; i++) {
162 lck_spin_init(&kptimer.g_timers[i].kt_lock, &kptimer_lock_grp,
163 LCK_ATTR_NULL);
164 }
165
166 const size_t deadlines_size = machine_info.logical_cpu_max * KPTIMER_MAX *
167 sizeof(kptimer.g_cpu_deadlines[0]);
168 kptimer.g_cpu_deadlines = zalloc_permanent_tag(deadlines_size,
169 ZALIGN_64, VM_KERN_MEMORY_DIAG);
170 for (int i = 0; i < KPTIMER_MAX; i++) {
171 for (int j = 0; j < machine_info.logical_cpu_max; j++) {
172 kptimer_set_cpu_deadline(j, i, EndOfAllTime);
173 }
174 }
175
176 timer_call_setup(&kptimer.g_pet_timer, kptimer_pet_handler, NULL);
177
178 kptimer.g_setup = true;
179 }
180
181 void
kptimer_reset(void)182 kptimer_reset(void)
183 {
184 kptimer_stop();
185 kptimer_set_pet_timerid(KPTIMER_PET_INACTIVE);
186
187 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
188 kptimer.g_timers[i].kt_period_abs = 0;
189 kptimer.g_timers[i].kt_actionid = 0;
190 for (int j = 0; j < machine_info.logical_cpu_max; j++) {
191 kptimer_set_cpu_deadline(j, i, EndOfAllTime);
192 }
193 }
194 }
195
196 #pragma mark - deadline management
197
198 static uint64_t
kptimer_get_cpu_deadline(int cpuid,int timerid)199 kptimer_get_cpu_deadline(int cpuid, int timerid)
200 {
201 return kptimer.g_cpu_deadlines[(cpuid * KPTIMER_MAX) + timerid];
202 }
203
204 static void
kptimer_sample_curcpu(unsigned int actionid,unsigned int timerid,uint32_t flags)205 kptimer_sample_curcpu(unsigned int actionid, unsigned int timerid,
206 uint32_t flags)
207 {
208 struct kperf_sample *intbuf = kperf_intr_sample_buffer();
209 #if DEVELOPMENT || DEBUG
210 intbuf->sample_time = mach_absolute_time();
211 #endif /* DEVELOPMENT || DEBUG */
212
213 BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START);
214
215 thread_t thread = current_thread();
216 task_t task = get_threadtask(thread);
217 struct kperf_context ctx = {
218 .cur_thread = thread,
219 .cur_task = task,
220 .cur_pid = task_pid(task),
221 .trigger_type = TRIGGER_TYPE_TIMER,
222 .trigger_id = timerid,
223 };
224
225 (void)kperf_sample(intbuf, &ctx, actionid,
226 SAMPLE_FLAG_PEND_USER | flags);
227
228 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END);
229 }
230
231 static void
kptimer_lock(struct kptimer * timer)232 kptimer_lock(struct kptimer *timer)
233 {
234 lck_spin_lock(&timer->kt_lock);
235 }
236
237 static void
kptimer_unlock(struct kptimer * timer)238 kptimer_unlock(struct kptimer *timer)
239 {
240 lck_spin_unlock(&timer->kt_lock);
241 }
242
243 /*
244 * If the deadline expired in the past, find the next deadline to program,
245 * locked into the cadence provided by the period.
246 */
247 static inline uint64_t
dead_reckon_deadline(uint64_t now,uint64_t deadline,uint64_t period)248 dead_reckon_deadline(uint64_t now, uint64_t deadline, uint64_t period)
249 {
250 if (deadline < now) {
251 uint64_t time_since = now - deadline;
252 uint64_t extra_time = period - (time_since % period);
253 return now + extra_time;
254 }
255 return deadline;
256 }
257
258 static uint64_t
kptimer_fire(struct kptimer * timer,unsigned int timerid,uint64_t deadline,int __unused cpuid,uint64_t now)259 kptimer_fire(struct kptimer *timer, unsigned int timerid,
260 uint64_t deadline, int __unused cpuid, uint64_t now)
261 {
262 bool first = false;
263 uint64_t next_deadline = deadline + timer->kt_period_abs;
264
265 /*
266 * It's not straightforward to replace this lock with a compare-exchange,
267 * since the PERF_TM_FIRE event must be emitted *before* any subsequent
268 * PERF_TM_HNDLR events, so tools can understand the handlers are responding
269 * to this timer fire.
270 */
271 kptimer_lock(timer);
272 if (timer->kt_cur_deadline < next_deadline) {
273 first = true;
274 next_deadline = dead_reckon_deadline(now, next_deadline,
275 timer->kt_period_abs);
276 timer->kt_cur_deadline = next_deadline;
277 BUF_DATA(PERF_TM_FIRE, timerid, timerid == kptimer.g_pet_timerid,
278 timer->kt_period_abs, timer->kt_actionid);
279 #if DEVELOPMENT || DEBUG
280 /*
281 * Debugging aid to see the last time this timer fired.
282 */
283 timer->kt_fire_time = mach_absolute_time();
284 #endif /* DEVELOPMENT || DEBUG */
285 } else {
286 /*
287 * In case this CPU has missed several timer fires, get it back on track
288 * by synchronizing with the latest timer fire.
289 */
290 next_deadline = timer->kt_cur_deadline;
291 }
292 kptimer_unlock(timer);
293
294 if (!first && !kperf_action_has_non_system(timer->kt_actionid)) {
295 /*
296 * The first timer to fire will sample the system, so there's
297 * no need to run other timers if those are the only samplers
298 * for this action.
299 */
300 return next_deadline;
301 }
302
303 kptimer_sample_curcpu(timer->kt_actionid, timerid,
304 first ? SAMPLE_FLAG_SYSTEM : 0);
305
306 return next_deadline;
307 }
308
309 /*
310 * Determine which of the timers fired.
311 */
312 void
kptimer_expire(processor_t processor,int cpuid,uint64_t now)313 kptimer_expire(processor_t processor, int cpuid, uint64_t now)
314 {
315 uint64_t min_deadline = UINT64_MAX;
316
317 enum kperf_sampling status = os_atomic_load(&kperf_status, acquire);
318 switch (status) {
319 case KPERF_SAMPLING_ON:
320 break;
321 case KPERF_SAMPLING_SHUTDOWN:
322 kptimer_stop_cpu(processor);
323 return;
324 case KPERF_SAMPLING_OFF:
325 panic("kperf: timer fired at %llu, but sampling is disabled", now);
326 default:
327 panic("kperf: unknown sampling state 0x%x", status);
328 }
329
330 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
331 struct kptimer *timer = &kptimer.g_timers[i];
332 if (timer->kt_period_abs == 0) {
333 continue;
334 }
335
336 uint64_t cpudeadline = kptimer_get_cpu_deadline(cpuid, i);
337 if (now > cpudeadline) {
338 uint64_t deadline = kptimer_fire(timer, i, cpudeadline, cpuid, now);
339 if (deadline == 0) {
340 kptimer_set_cpu_deadline(cpuid, i, EndOfAllTime);
341 } else {
342 kptimer_set_cpu_deadline(cpuid, i, deadline);
343 if (deadline < min_deadline) {
344 min_deadline = deadline;
345 }
346 }
347 }
348 }
349 if (min_deadline < UINT64_MAX) {
350 running_timer_enter(processor, RUNNING_TIMER_KPERF, NULL,
351 min_deadline, mach_absolute_time());
352 }
353 }
354
355 #pragma mark - start/stop
356
357 static void
kptimer_broadcast(void (* fn)(void *))358 kptimer_broadcast(void (*fn)(void *))
359 {
360 ktrace_assert_lock_held();
361
362 #if defined(__x86_64__)
363 (void)mp_cpus_call(CPUMASK_ALL, ASYNC, fn, NULL);
364 #else /* defined(__x86_64__) */
365 _Atomic uint32_t xcsync = 0;
366 cpu_broadcast_xcall((uint32_t *)&xcsync, TRUE /* include self */, fn,
367 &xcsync);
368 #endif /* !defined(__x86_64__) */
369 }
370
371 static void
kptimer_broadcast_ack(void * arg)372 kptimer_broadcast_ack(void *arg)
373 {
374 #if defined(__x86_64__)
375 #pragma unused(arg)
376 #else /* defined(__x86_64__) */
377 _Atomic uint32_t *xcsync = arg;
378 int pending = os_atomic_dec(xcsync, relaxed);
379 if (pending == 0) {
380 thread_wakeup(xcsync);
381 }
382 #endif /* !defined(__x86_64__) */
383 }
384
385 static void
kptimer_sample_pet_remote(void * __unused arg)386 kptimer_sample_pet_remote(void * __unused arg)
387 {
388 if (!kperf_is_sampling()) {
389 return;
390 }
391 struct kptimer *timer = &kptimer.g_timers[kptimer.g_pet_timerid];
392 kptimer_sample_curcpu(timer->kt_actionid, kptimer.g_pet_timerid, 0);
393 }
394
395 #if !defined(__x86_64__)
396
397 #include <arm/cpu_internal.h>
398
399 void kperf_signal_handler(void);
400 void
kperf_signal_handler(void)401 kperf_signal_handler(void)
402 {
403 kptimer_sample_pet_remote(NULL);
404 }
405
406 #endif /* !defined(__x86_64__) */
407
408 #include <stdatomic.h>
409 _Atomic uint64_t mycounter = 0;
410
411 static void
kptimer_broadcast_pet(void)412 kptimer_broadcast_pet(void)
413 {
414 atomic_fetch_add(&mycounter, 1);
415 #if defined(__x86_64__)
416 (void)mp_cpus_call(CPUMASK_OTHERS, NOSYNC, kptimer_sample_pet_remote,
417 NULL);
418 #else /* defined(__x86_64__) */
419 int curcpu = cpu_number();
420 for (int i = 0; i < machine_info.logical_cpu_max; i++) {
421 if (i != curcpu) {
422 cpu_signal(cpu_datap(i), SIGPkppet, NULL, NULL);
423 }
424 }
425 #endif /* !defined(__x86_64__) */
426 }
427
428 static void
kptimer_pet_handler(void * __unused param1,void * __unused param2)429 kptimer_pet_handler(void * __unused param1, void * __unused param2)
430 {
431 if (!kptimer.g_pet_active) {
432 return;
433 }
434
435 struct kptimer *timer = &kptimer.g_timers[kptimer.g_pet_timerid];
436
437 BUF_DATA(PERF_TM_FIRE, kptimer.g_pet_timerid, 1, timer->kt_period_abs,
438 timer->kt_actionid);
439
440 /*
441 * To get the on-CPU samples as close to this timer fire as possible, first
442 * broadcast to them to sample themselves.
443 */
444 kptimer_broadcast_pet();
445
446 /*
447 * Wakeup the PET thread afterwards so it's not inadvertently sampled (it's a
448 * high-priority kernel thread). If the scheduler needs to IPI to run it,
449 * that IPI will be handled after the IPIs issued during the broadcast.
450 */
451 kppet_wake_thread();
452
453 /*
454 * Finally, sample this CPU, who's stacks and state have been preserved while
455 * running this handler. Make sure to include system measurements.
456 */
457 kptimer_sample_curcpu(timer->kt_actionid, kptimer.g_pet_timerid,
458 SAMPLE_FLAG_SYSTEM);
459
460 BUF_INFO(PERF_TM_FIRE | DBG_FUNC_END);
461
462 /*
463 * The PET thread will re-arm the timer when it's done.
464 */
465 }
466
467 void
kptimer_pet_enter(uint64_t sampledur_abs)468 kptimer_pet_enter(uint64_t sampledur_abs)
469 {
470 if (!kperf_is_sampling()) {
471 return;
472 }
473
474 uint64_t period_abs = kptimer.g_timers[kptimer.g_pet_timerid].kt_period_abs;
475 uint64_t orig_period_abs = period_abs;
476
477 if (period_abs > sampledur_abs) {
478 period_abs -= sampledur_abs;
479 }
480 period_abs = MAX(kptimer_min_period_abs(true), period_abs);
481 uint64_t deadline_abs = mach_absolute_time() + period_abs;
482
483 BUF_INFO(PERF_PET_SCHED, orig_period_abs, period_abs, sampledur_abs,
484 deadline_abs);
485
486 timer_call_enter(&kptimer.g_pet_timer, deadline_abs, TIMER_CALL_SYS_CRITICAL);
487 }
488
489 static uint64_t
kptimer_earliest_deadline(processor_t processor,uint64_t now)490 kptimer_earliest_deadline(processor_t processor, uint64_t now)
491 {
492 uint64_t min_deadline = UINT64_MAX;
493 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
494 struct kptimer *timer = &kptimer.g_timers[i];
495 uint64_t cur_deadline = timer->kt_cur_deadline;
496 if (cur_deadline == 0) {
497 continue;
498 }
499 cur_deadline = dead_reckon_deadline(now, cur_deadline,
500 timer->kt_period_abs);
501 kptimer_set_cpu_deadline(processor->cpu_id, i, cur_deadline);
502 if (cur_deadline < min_deadline) {
503 min_deadline = cur_deadline;
504 }
505 }
506 return min_deadline;
507 }
508
509 void kptimer_running_setup(processor_t processor, uint64_t now);
510 void
kptimer_running_setup(processor_t processor,uint64_t now)511 kptimer_running_setup(processor_t processor, uint64_t now)
512 {
513 uint64_t deadline = kptimer_earliest_deadline(processor, now);
514 if (deadline < UINT64_MAX) {
515 running_timer_setup(processor, RUNNING_TIMER_KPERF, NULL, deadline,
516 now);
517 }
518 }
519
520 static void
kptimer_start_cpu(processor_t processor)521 kptimer_start_cpu(processor_t processor)
522 {
523 uint64_t now = mach_absolute_time();
524 uint64_t deadline = kptimer_earliest_deadline(processor, now);
525 if (deadline < UINT64_MAX) {
526 running_timer_enter(processor, RUNNING_TIMER_KPERF, NULL, deadline,
527 now);
528 }
529 }
530
531 static void
kptimer_start_remote(void * arg)532 kptimer_start_remote(void *arg)
533 {
534 kptimer_start_cpu(current_processor());
535 kptimer_broadcast_ack(arg);
536 }
537
538 static void
kptimer_stop_cpu(processor_t processor)539 kptimer_stop_cpu(processor_t processor)
540 {
541 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
542 kptimer_set_cpu_deadline(processor->cpu_id, i, EndOfAllTime);
543 }
544 running_timer_cancel(processor, RUNNING_TIMER_KPERF);
545 }
546
547 void
kptimer_stop_curcpu(void)548 kptimer_stop_curcpu(void)
549 {
550 kptimer_stop_cpu(current_processor());
551 }
552
553 static void
kptimer_stop_remote(void * __unused arg)554 kptimer_stop_remote(void * __unused arg)
555 {
556 assert(ml_get_interrupts_enabled() == FALSE);
557 kptimer_stop_cpu(current_processor());
558 kptimer_broadcast_ack(arg);
559 }
560
561 /*
562 * Called when a CPU is brought online. Handles the cases where the kperf timer may have
563 * been either enabled or disabled while the CPU was offline (preventing the enabling/disabling
564 * IPIs from reaching this CPU).
565 */
566 void
kptimer_curcpu_up(void)567 kptimer_curcpu_up(void)
568 {
569 enum kperf_sampling status = os_atomic_load(&kperf_status, acquire);
570 processor_t processor = current_processor();
571
572 assert(ml_get_interrupts_enabled() == FALSE);
573
574 /*
575 * If the CPU was taken offline, THEN kperf was enabled, this CPU would have missed
576 * the enabling IPI, so fix that here. Also, if the CPU was taken offline (after having
577 * enabled kperf), recompute the deadline (since we may have missed a timer update) and
578 * keep the timer enabled.
579 */
580 if (status == KPERF_SAMPLING_ON) {
581 kptimer_start_cpu(processor);
582 } else {
583 /*
584 * Similarly, If the CPU is resuming after having previously armed the kperf timer
585 * before going down, and kperf is currently disabled, disable the kperf running
586 * timer on this CPU.
587 */
588 kptimer_stop_cpu(processor);
589 }
590 }
591
592 void
kptimer_start(void)593 kptimer_start(void)
594 {
595 ktrace_assert_lock_held();
596
597 if (kptimer.g_started) {
598 return;
599 }
600
601 uint64_t now = mach_absolute_time();
602 unsigned int ntimers_active = 0;
603 kptimer.g_started = true;
604 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
605 struct kptimer *timer = &kptimer.g_timers[i];
606 if (timer->kt_period_abs == 0 || timer->kt_actionid == 0) {
607 /*
608 * No period or action means the timer is inactive.
609 */
610 continue;
611 }
612 if (i == kptimer.g_pet_timerid) {
613 kppet_set_period(timer->kt_period_abs);
614 if (!kppet_get_lightweight_pet()) {
615 kptimer.g_pet_active = true;
616 timer_call_enter(&kptimer.g_pet_timer, now + timer->kt_period_abs,
617 TIMER_CALL_SYS_CRITICAL);
618 continue;
619 }
620 }
621 timer->kt_cur_deadline = now + timer->kt_period_abs;
622 ntimers_active++;
623 }
624 if (ntimers_active > 0) {
625 kptimer_broadcast(kptimer_start_remote);
626 }
627 }
628
629 void
kptimer_stop(void)630 kptimer_stop(void)
631 {
632 ktrace_assert_lock_held();
633
634 if (!kptimer.g_started) {
635 return;
636 }
637
638 int intrs_en = ml_set_interrupts_enabled(FALSE);
639
640 if (kptimer.g_pet_active) {
641 kptimer.g_pet_active = false;
642 timer_call_cancel(&kptimer.g_pet_timer);
643 }
644 kptimer.g_started = false;
645 kptimer_broadcast(kptimer_stop_remote);
646 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
647 kptimer.g_timers[i].kt_cur_deadline = 0;
648 }
649
650 ml_set_interrupts_enabled(intrs_en);
651 }
652
653 #pragma mark - accessors
654
655 int
kptimer_get_period(unsigned int timerid,uint64_t * period_abs)656 kptimer_get_period(unsigned int timerid, uint64_t *period_abs)
657 {
658 if (timerid >= kptimer.g_ntimers) {
659 return EINVAL;
660 }
661 *period_abs = kptimer.g_timers[timerid].kt_period_abs;
662 return 0;
663 }
664
665 int
kptimer_set_period(unsigned int timerid,uint64_t period_abs)666 kptimer_set_period(unsigned int timerid, uint64_t period_abs)
667 {
668 if (timerid >= kptimer.g_ntimers) {
669 return EINVAL;
670 }
671 if (kptimer.g_started) {
672 return EBUSY;
673 }
674
675 bool pet = kptimer.g_pet_timerid == timerid;
676 uint64_t min_period = kptimer_min_period_abs(pet);
677 if (period_abs != 0 && period_abs < min_period) {
678 period_abs = min_period;
679 }
680 if (pet && !kppet_get_lightweight_pet()) {
681 kppet_config(kptimer.g_timers[timerid].kt_actionid);
682 }
683
684 kptimer.g_timers[timerid].kt_period_abs = period_abs;
685 return 0;
686 }
687
688 int
kptimer_get_action(unsigned int timerid,unsigned int * actionid)689 kptimer_get_action(unsigned int timerid, unsigned int *actionid)
690 {
691 if (timerid >= kptimer.g_ntimers) {
692 return EINVAL;
693 }
694 *actionid = kptimer.g_timers[timerid].kt_actionid;
695 return 0;
696 }
697
698 int
kptimer_set_action(unsigned int timerid,unsigned int actionid)699 kptimer_set_action(unsigned int timerid, unsigned int actionid)
700 {
701 if (timerid >= kptimer.g_ntimers) {
702 return EINVAL;
703 }
704 if (kptimer.g_started) {
705 return EBUSY;
706 }
707
708 kptimer.g_timers[timerid].kt_actionid = actionid;
709 if (kptimer.g_pet_timerid == timerid && !kppet_get_lightweight_pet()) {
710 kppet_config(actionid);
711 }
712 return 0;
713 }
714
715 unsigned int
kptimer_get_count(void)716 kptimer_get_count(void)
717 {
718 return kptimer.g_ntimers;
719 }
720
721 int
kptimer_set_count(unsigned int count)722 kptimer_set_count(unsigned int count)
723 {
724 kptimer_setup();
725 if (kptimer.g_started) {
726 return EBUSY;
727 }
728 if (count > KPTIMER_MAX) {
729 return EINVAL;
730 }
731 kptimer.g_ntimers = count;
732 return 0;
733 }
734
735 uint64_t
kptimer_min_period_abs(bool pet)736 kptimer_min_period_abs(bool pet)
737 {
738 enum kptimer_period_limit limit = 0;
739 if (ktrace_background_active()) {
740 limit = pet ? KTPL_BG_PET : KTPL_BG;
741 } else {
742 limit = pet ? KTPL_FG_PET : KTPL_FG;
743 }
744 return kptimer_minperiods_mtu[limit];
745 }
746
747 uint32_t
kptimer_get_pet_timerid(void)748 kptimer_get_pet_timerid(void)
749 {
750 return kptimer.g_pet_timerid;
751 }
752
753 int
kptimer_set_pet_timerid(uint32_t petid)754 kptimer_set_pet_timerid(uint32_t petid)
755 {
756 if (kptimer.g_started) {
757 return EBUSY;
758 }
759 if (petid >= kptimer.g_ntimers) {
760 kppet_config(0);
761 } else {
762 kppet_config(kptimer.g_timers[petid].kt_actionid);
763 uint64_t period_abs = MAX(kptimer_min_period_abs(true),
764 kptimer.g_timers[petid].kt_period_abs);
765 kptimer.g_timers[petid].kt_period_abs = period_abs;
766 }
767
768 kptimer.g_pet_timerid = petid;
769
770 return 0;
771 }
772