1 /*
2 * Copyright (c) 2011-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * This file manages the timers used for on-CPU samples and PET.
31 *
32 * Each timer configured by a tool is represented by a kptimer structure.
33 * The timer calls present in each structure are used to schedule CPU-local
34 * timers. As each timer fires, that CPU samples itself and schedules another
35 * timer to fire at the next deadline. The first timer to fire across all CPUs
36 * determines that deadline. This causes the timers to fire at a consistent
37 * cadence.
38 *
39 * Traditional PET uses a timer call to wake up its sampling thread and take
40 * on-CPU samples.
41 *
42 * Synchronization for start and stop is provided by the ktrace subsystem lock.
43 * Global state is stored in a single struct, to ease debugging.
44 */
45
46 #include <mach/mach_types.h>
47 #include <kern/cpu_data.h> /* current_thread() */
48 #include <kern/kalloc.h>
49 #include <kern/timer_queue.h>
50 #include <libkern/section_keywords.h>
51 #include <stdatomic.h>
52 #include <sys/errno.h>
53 #include <sys/vm.h>
54 #include <sys/ktrace.h>
55
56 #include <machine/machine_routines.h>
57 #if defined(__x86_64__)
58 #include <i386/mp.h>
59 #endif /* defined(__x86_64__) */
60
61 #include <kperf/kperf.h>
62 #include <kperf/buffer.h>
63 #include <kperf/context.h>
64 #include <kperf/action.h>
65 #include <kperf/kptimer.h>
66 #include <kperf/pet.h>
67 #include <kperf/sample.h>
68
69 #define KPTIMER_PET_INACTIVE (999)
70 #define KPTIMER_MAX (8)
71
72 struct kptimer {
73 uint32_t kt_actionid;
74 uint64_t kt_period_abs;
75 /*
76 * The `kt_cur_deadline` field represents when the timer should next fire.
77 * It's used to synchronize between timers firing on each CPU. In the timer
78 * handler, each CPU will take the `kt_lock` and see if the
79 * `kt_cur_deadline` still needs to be updated for the timer fire. If so,
80 * it updates it and logs the timer fire event under the lock.
81 */
82 lck_spin_t kt_lock;
83 uint64_t kt_cur_deadline;
84
85 #if DEVELOPMENT || DEBUG
86 /*
87 * To be set by the timer leader as a debugging aid for timeouts, if kperf
88 * happens to be on-CPU when they occur.
89 */
90 uint64_t kt_fire_time;
91 #endif /* DEVELOPMENT || DEBUG */
92 };
93
94 static struct {
95 struct kptimer *g_timers;
96 uint64_t *g_cpu_deadlines;
97 unsigned int g_ntimers;
98 unsigned int g_pet_timerid;
99
100 bool g_setup:1;
101 bool g_pet_active:1;
102 bool g_started:1;
103
104 struct timer_call g_pet_timer;
105 } kptimer = {
106 .g_pet_timerid = KPTIMER_PET_INACTIVE,
107 };
108
109 SECURITY_READ_ONLY_LATE(static uint64_t) kptimer_minperiods_mtu[KTPL_MAX];
110
111 /*
112 * Enforce a minimum timer period to prevent interrupt storms.
113 */
114 const uint64_t kptimer_minperiods_ns[KTPL_MAX] = {
115 #if defined(__x86_64__)
116 [KTPL_FG] = 20 * NSEC_PER_USEC, /* The minimum timer period in xnu, period. */
117 [KTPL_BG] = 1 * NSEC_PER_MSEC,
118 [KTPL_FG_PET] = 2 * NSEC_PER_MSEC,
119 [KTPL_BG_PET] = 5 * NSEC_PER_MSEC,
120 #elif defined(__arm64__)
121 [KTPL_FG] = 50 * NSEC_PER_USEC,
122 [KTPL_BG] = 1 * NSEC_PER_MSEC,
123 [KTPL_FG_PET] = 2 * NSEC_PER_MSEC,
124 [KTPL_BG_PET] = 10 * NSEC_PER_MSEC,
125 #elif defined(__arm__)
126 [KTPL_FG] = 100 * NSEC_PER_USEC,
127 [KTPL_BG] = 10 * NSEC_PER_MSEC,
128 [KTPL_FG_PET] = 2 * NSEC_PER_MSEC,
129 [KTPL_BG_PET] = 50 * NSEC_PER_MSEC,
130 #else
131 #error unexpected architecture
132 #endif
133 };
134
135 static void kptimer_pet_handler(void * __unused param1, void * __unused param2);
136 static void kptimer_stop_cpu(processor_t processor);
137
138 void
kptimer_init(void)139 kptimer_init(void)
140 {
141 for (int i = 0; i < KTPL_MAX; i++) {
142 nanoseconds_to_absolutetime(kptimer_minperiods_ns[i],
143 &kptimer_minperiods_mtu[i]);
144 }
145 }
146
147 static void
kptimer_set_cpu_deadline(int cpuid,int timerid,uint64_t deadline)148 kptimer_set_cpu_deadline(int cpuid, int timerid, uint64_t deadline)
149 {
150 kptimer.g_cpu_deadlines[(cpuid * KPTIMER_MAX) + timerid] =
151 deadline;
152 }
153
154 static void
kptimer_setup(void)155 kptimer_setup(void)
156 {
157 if (kptimer.g_setup) {
158 return;
159 }
160 static lck_grp_t kptimer_lock_grp;
161 lck_grp_init(&kptimer_lock_grp, "kptimer", LCK_GRP_ATTR_NULL);
162
163 const size_t timers_size = KPTIMER_MAX * sizeof(struct kptimer);
164 kptimer.g_timers = zalloc_permanent(timers_size, ZALIGN(struct kptimer));
165 for (int i = 0; i < KPTIMER_MAX; i++) {
166 lck_spin_init(&kptimer.g_timers[i].kt_lock, &kptimer_lock_grp,
167 LCK_ATTR_NULL);
168 }
169
170 const size_t deadlines_size = machine_info.logical_cpu_max * KPTIMER_MAX *
171 sizeof(kptimer.g_cpu_deadlines[0]);
172 kptimer.g_cpu_deadlines = zalloc_permanent(deadlines_size, ZALIGN_64);
173 for (int i = 0; i < KPTIMER_MAX; i++) {
174 for (int j = 0; j < machine_info.logical_cpu_max; j++) {
175 kptimer_set_cpu_deadline(j, i, EndOfAllTime);
176 }
177 }
178
179 timer_call_setup(&kptimer.g_pet_timer, kptimer_pet_handler, NULL);
180
181 kptimer.g_setup = true;
182 }
183
184 void
kptimer_reset(void)185 kptimer_reset(void)
186 {
187 kptimer_stop();
188 kptimer_set_pet_timerid(KPTIMER_PET_INACTIVE);
189
190 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
191 kptimer.g_timers[i].kt_period_abs = 0;
192 kptimer.g_timers[i].kt_actionid = 0;
193 for (int j = 0; j < machine_info.logical_cpu_max; j++) {
194 kptimer_set_cpu_deadline(j, i, EndOfAllTime);
195 }
196 }
197 }
198
199 #pragma mark - deadline management
200
201 static uint64_t
kptimer_get_cpu_deadline(int cpuid,int timerid)202 kptimer_get_cpu_deadline(int cpuid, int timerid)
203 {
204 return kptimer.g_cpu_deadlines[(cpuid * KPTIMER_MAX) + timerid];
205 }
206
207 static void
kptimer_sample_curcpu(unsigned int actionid,unsigned int timerid,uint32_t flags)208 kptimer_sample_curcpu(unsigned int actionid, unsigned int timerid,
209 uint32_t flags)
210 {
211 struct kperf_sample *intbuf = kperf_intr_sample_buffer();
212 #if DEVELOPMENT || DEBUG
213 intbuf->sample_time = mach_absolute_time();
214 #endif /* DEVELOPMENT || DEBUG */
215
216 BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START);
217
218 thread_t thread = current_thread();
219 task_t task = get_threadtask(thread);
220 struct kperf_context ctx = {
221 .cur_thread = thread,
222 .cur_task = task,
223 .cur_pid = task_pid(task),
224 .trigger_type = TRIGGER_TYPE_TIMER,
225 .trigger_id = timerid,
226 };
227
228 (void)kperf_sample(intbuf, &ctx, actionid,
229 SAMPLE_FLAG_PEND_USER | flags);
230
231 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END);
232 }
233
234 static void
kptimer_lock(struct kptimer * timer)235 kptimer_lock(struct kptimer *timer)
236 {
237 lck_spin_lock(&timer->kt_lock);
238 }
239
240 static void
kptimer_unlock(struct kptimer * timer)241 kptimer_unlock(struct kptimer *timer)
242 {
243 lck_spin_unlock(&timer->kt_lock);
244 }
245
246 /*
247 * If the deadline expired in the past, find the next deadline to program,
248 * locked into the cadence provided by the period.
249 */
250 static inline uint64_t
dead_reckon_deadline(uint64_t now,uint64_t deadline,uint64_t period)251 dead_reckon_deadline(uint64_t now, uint64_t deadline, uint64_t period)
252 {
253 if (deadline < now) {
254 uint64_t time_since = now - deadline;
255 uint64_t extra_time = period - (time_since % period);
256 return now + extra_time;
257 }
258 return deadline;
259 }
260
261 static uint64_t
kptimer_fire(struct kptimer * timer,unsigned int timerid,uint64_t deadline,int __unused cpuid,uint64_t now)262 kptimer_fire(struct kptimer *timer, unsigned int timerid,
263 uint64_t deadline, int __unused cpuid, uint64_t now)
264 {
265 bool first = false;
266 uint64_t next_deadline = deadline + timer->kt_period_abs;
267
268 /*
269 * It's not straightforward to replace this lock with a compare-exchange,
270 * since the PERF_TM_FIRE event must be emitted *before* any subsequent
271 * PERF_TM_HNDLR events, so tools can understand the handlers are responding
272 * to this timer fire.
273 */
274 kptimer_lock(timer);
275 if (timer->kt_cur_deadline < next_deadline) {
276 first = true;
277 next_deadline = dead_reckon_deadline(now, next_deadline,
278 timer->kt_period_abs);
279 timer->kt_cur_deadline = next_deadline;
280 BUF_DATA(PERF_TM_FIRE, timerid, timerid == kptimer.g_pet_timerid,
281 timer->kt_period_abs, timer->kt_actionid);
282 #if DEVELOPMENT || DEBUG
283 /*
284 * Debugging aid to see the last time this timer fired.
285 */
286 timer->kt_fire_time = mach_absolute_time();
287 #endif /* DEVELOPMENT || DEBUG */
288 if (timerid == kptimer.g_pet_timerid && kppet_get_lightweight_pet()) {
289 os_atomic_inc(&kppet_gencount, relaxed);
290 }
291 } else {
292 /*
293 * In case this CPU has missed several timer fires, get it back on track
294 * by synchronizing with the latest timer fire.
295 */
296 next_deadline = timer->kt_cur_deadline;
297 }
298 kptimer_unlock(timer);
299
300 if (!first && !kperf_action_has_non_system(timer->kt_actionid)) {
301 /*
302 * The first timer to fire will sample the system, so there's
303 * no need to run other timers if those are the only samplers
304 * for this action.
305 */
306 return next_deadline;
307 }
308
309 kptimer_sample_curcpu(timer->kt_actionid, timerid,
310 first ? SAMPLE_FLAG_SYSTEM : 0);
311
312 return next_deadline;
313 }
314
315 /*
316 * Determine which of the timers fired.
317 */
318 void
kptimer_expire(processor_t processor,int cpuid,uint64_t now)319 kptimer_expire(processor_t processor, int cpuid, uint64_t now)
320 {
321 uint64_t min_deadline = UINT64_MAX;
322
323 enum kperf_sampling status = os_atomic_load(&kperf_status, acquire);
324 switch (status) {
325 case KPERF_SAMPLING_ON:
326 break;
327 case KPERF_SAMPLING_SHUTDOWN:
328 kptimer_stop_cpu(processor);
329 return;
330 case KPERF_SAMPLING_OFF:
331 panic("kperf: timer fired at %llu, but sampling is disabled", now);
332 default:
333 panic("kperf: unknown sampling state 0x%x", status);
334 }
335
336 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
337 struct kptimer *timer = &kptimer.g_timers[i];
338 if (timer->kt_period_abs == 0) {
339 continue;
340 }
341
342 uint64_t cpudeadline = kptimer_get_cpu_deadline(cpuid, i);
343 if (now > cpudeadline) {
344 uint64_t deadline = kptimer_fire(timer, i, cpudeadline, cpuid, now);
345 if (deadline == 0) {
346 kptimer_set_cpu_deadline(cpuid, i, EndOfAllTime);
347 } else {
348 kptimer_set_cpu_deadline(cpuid, i, deadline);
349 if (deadline < min_deadline) {
350 min_deadline = deadline;
351 }
352 }
353 }
354 }
355 if (min_deadline < UINT64_MAX) {
356 running_timer_enter(processor, RUNNING_TIMER_KPERF, NULL,
357 min_deadline, mach_absolute_time());
358 }
359 }
360
361 #pragma mark - start/stop
362
363 static void
kptimer_broadcast(void (* fn)(void *))364 kptimer_broadcast(void (*fn)(void *))
365 {
366 ktrace_assert_lock_held();
367
368 #if defined(__x86_64__)
369 (void)mp_cpus_call(CPUMASK_ALL, ASYNC, fn, NULL);
370 #else /* defined(__x86_64__) */
371 _Atomic uint32_t xcsync = 0;
372 cpu_broadcast_xcall((uint32_t *)&xcsync, TRUE /* include self */, fn,
373 &xcsync);
374 #endif /* !defined(__x86_64__) */
375 }
376
377 static void
kptimer_broadcast_ack(void * arg)378 kptimer_broadcast_ack(void *arg)
379 {
380 #if defined(__x86_64__)
381 #pragma unused(arg)
382 #else /* defined(__x86_64__) */
383 _Atomic uint32_t *xcsync = arg;
384 int pending = os_atomic_dec(xcsync, relaxed);
385 if (pending == 0) {
386 thread_wakeup(xcsync);
387 }
388 #endif /* !defined(__x86_64__) */
389 }
390
391 static void
kptimer_sample_pet_remote(void * __unused arg)392 kptimer_sample_pet_remote(void * __unused arg)
393 {
394 if (!kperf_is_sampling()) {
395 return;
396 }
397 struct kptimer *timer = &kptimer.g_timers[kptimer.g_pet_timerid];
398 kptimer_sample_curcpu(timer->kt_actionid, kptimer.g_pet_timerid, 0);
399 }
400
401 #if !defined(__x86_64__)
402
403 #include <arm/cpu_internal.h>
404
405 void kperf_signal_handler(void);
406 void
kperf_signal_handler(void)407 kperf_signal_handler(void)
408 {
409 kptimer_sample_pet_remote(NULL);
410 }
411
412 #endif /* !defined(__x86_64__) */
413
414 #include <stdatomic.h>
415 _Atomic uint64_t mycounter = 0;
416
417 static void
kptimer_broadcast_pet(void)418 kptimer_broadcast_pet(void)
419 {
420 atomic_fetch_add(&mycounter, 1);
421 #if defined(__x86_64__)
422 (void)mp_cpus_call(CPUMASK_OTHERS, NOSYNC, kptimer_sample_pet_remote,
423 NULL);
424 #else /* defined(__x86_64__) */
425 int curcpu = cpu_number();
426 for (int i = 0; i < machine_info.logical_cpu_max; i++) {
427 if (i != curcpu) {
428 cpu_signal(cpu_datap(i), SIGPkppet, NULL, NULL);
429 }
430 }
431 #endif /* !defined(__x86_64__) */
432 }
433
434 static void
kptimer_pet_handler(void * __unused param1,void * __unused param2)435 kptimer_pet_handler(void * __unused param1, void * __unused param2)
436 {
437 if (!kptimer.g_pet_active) {
438 return;
439 }
440
441 struct kptimer *timer = &kptimer.g_timers[kptimer.g_pet_timerid];
442
443 BUF_DATA(PERF_TM_FIRE, kptimer.g_pet_timerid, 1, timer->kt_period_abs,
444 timer->kt_actionid);
445
446 /*
447 * To get the on-CPU samples as close to this timer fire as possible, first
448 * broadcast to them to sample themselves.
449 */
450 kptimer_broadcast_pet();
451
452 /*
453 * Wakeup the PET thread afterwards so it's not inadvertently sampled (it's a
454 * high-priority kernel thread). If the scheduler needs to IPI to run it,
455 * that IPI will be handled after the IPIs issued during the broadcast.
456 */
457 kppet_wake_thread();
458
459 /*
460 * Finally, sample this CPU, who's stacks and state have been preserved while
461 * running this handler. Make sure to include system measurements.
462 */
463 kptimer_sample_curcpu(timer->kt_actionid, kptimer.g_pet_timerid,
464 SAMPLE_FLAG_SYSTEM);
465
466 BUF_INFO(PERF_TM_FIRE | DBG_FUNC_END);
467
468 /*
469 * The PET thread will re-arm the timer when it's done.
470 */
471 }
472
473 void
kptimer_pet_enter(uint64_t sampledur_abs)474 kptimer_pet_enter(uint64_t sampledur_abs)
475 {
476 if (!kperf_is_sampling()) {
477 return;
478 }
479
480 uint64_t period_abs = kptimer.g_timers[kptimer.g_pet_timerid].kt_period_abs;
481 uint64_t orig_period_abs = period_abs;
482
483 if (period_abs > sampledur_abs) {
484 period_abs -= sampledur_abs;
485 }
486 period_abs = MAX(kptimer_min_period_abs(true), period_abs);
487 uint64_t deadline_abs = mach_absolute_time() + period_abs;
488
489 BUF_INFO(PERF_PET_SCHED, orig_period_abs, period_abs, sampledur_abs,
490 deadline_abs);
491
492 timer_call_enter(&kptimer.g_pet_timer, deadline_abs, TIMER_CALL_SYS_CRITICAL);
493 }
494
495 static uint64_t
kptimer_earliest_deadline(processor_t processor,uint64_t now)496 kptimer_earliest_deadline(processor_t processor, uint64_t now)
497 {
498 uint64_t min_deadline = UINT64_MAX;
499 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
500 struct kptimer *timer = &kptimer.g_timers[i];
501 uint64_t cur_deadline = timer->kt_cur_deadline;
502 if (cur_deadline == 0) {
503 continue;
504 }
505 cur_deadline = dead_reckon_deadline(now, cur_deadline,
506 timer->kt_period_abs);
507 kptimer_set_cpu_deadline(processor->cpu_id, i, cur_deadline);
508 if (cur_deadline < min_deadline) {
509 min_deadline = cur_deadline;
510 }
511 }
512 return min_deadline;
513 }
514
515 void kptimer_running_setup(processor_t processor, uint64_t now);
516 void
kptimer_running_setup(processor_t processor,uint64_t now)517 kptimer_running_setup(processor_t processor, uint64_t now)
518 {
519 uint64_t deadline = kptimer_earliest_deadline(processor, now);
520 if (deadline < UINT64_MAX) {
521 running_timer_setup(processor, RUNNING_TIMER_KPERF, NULL, deadline,
522 now);
523 }
524 }
525
526 static void
kptimer_start_cpu(processor_t processor)527 kptimer_start_cpu(processor_t processor)
528 {
529 uint64_t now = mach_absolute_time();
530 uint64_t deadline = kptimer_earliest_deadline(processor, now);
531 if (deadline < UINT64_MAX) {
532 running_timer_enter(processor, RUNNING_TIMER_KPERF, NULL, deadline,
533 now);
534 }
535 }
536
537 static void
kptimer_start_remote(void * arg)538 kptimer_start_remote(void *arg)
539 {
540 kptimer_start_cpu(current_processor());
541 kptimer_broadcast_ack(arg);
542 }
543
544 static void
kptimer_stop_cpu(processor_t processor)545 kptimer_stop_cpu(processor_t processor)
546 {
547 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
548 kptimer_set_cpu_deadline(processor->cpu_id, i, EndOfAllTime);
549 }
550 running_timer_cancel(processor, RUNNING_TIMER_KPERF);
551 }
552
553 void
kptimer_stop_curcpu(void)554 kptimer_stop_curcpu(void)
555 {
556 kptimer_stop_cpu(current_processor());
557 }
558
559 static void
kptimer_stop_remote(void * __unused arg)560 kptimer_stop_remote(void * __unused arg)
561 {
562 assert(ml_get_interrupts_enabled() == FALSE);
563 kptimer_stop_cpu(current_processor());
564 kptimer_broadcast_ack(arg);
565 }
566
567 /*
568 * Called when a CPU is brought online. Handles the cases where the kperf timer may have
569 * been either enabled or disabled while the CPU was offline (preventing the enabling/disabling
570 * IPIs from reaching this CPU).
571 */
572 void
kptimer_curcpu_up(void)573 kptimer_curcpu_up(void)
574 {
575 enum kperf_sampling status = os_atomic_load(&kperf_status, acquire);
576 processor_t processor = current_processor();
577
578 assert(ml_get_interrupts_enabled() == FALSE);
579
580 /*
581 * If the CPU was taken offline, THEN kperf was enabled, this CPU would have missed
582 * the enabling IPI, so fix that here. Also, if the CPU was taken offline (after having
583 * enabled kperf), recompute the deadline (since we may have missed a timer update) and
584 * keep the timer enabled.
585 */
586 if (status == KPERF_SAMPLING_ON) {
587 kptimer_start_cpu(processor);
588 } else {
589 /*
590 * Similarly, If the CPU is resuming after having previously armed the kperf timer
591 * before going down, and kperf is currently disabled, disable the kperf running
592 * timer on this CPU.
593 */
594 kptimer_stop_cpu(processor);
595 }
596 }
597
598 void
kptimer_start(void)599 kptimer_start(void)
600 {
601 ktrace_assert_lock_held();
602
603 if (kptimer.g_started) {
604 return;
605 }
606
607 uint64_t now = mach_absolute_time();
608 unsigned int ntimers_active = 0;
609 kptimer.g_started = true;
610 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
611 struct kptimer *timer = &kptimer.g_timers[i];
612 if (timer->kt_period_abs == 0 || timer->kt_actionid == 0) {
613 /*
614 * No period or action means the timer is inactive.
615 */
616 continue;
617 } else if (!kppet_get_lightweight_pet() &&
618 i == kptimer.g_pet_timerid) {
619 kptimer.g_pet_active = true;
620 timer_call_enter(&kptimer.g_pet_timer, now + timer->kt_period_abs,
621 TIMER_CALL_SYS_CRITICAL);
622 } else {
623 timer->kt_cur_deadline = now + timer->kt_period_abs;
624 ntimers_active++;
625 }
626 }
627 if (ntimers_active > 0) {
628 kptimer_broadcast(kptimer_start_remote);
629 }
630 }
631
632 void
kptimer_stop(void)633 kptimer_stop(void)
634 {
635 ktrace_assert_lock_held();
636
637 if (!kptimer.g_started) {
638 return;
639 }
640
641 int intrs_en = ml_set_interrupts_enabled(FALSE);
642
643 if (kptimer.g_pet_active) {
644 kptimer.g_pet_active = false;
645 timer_call_cancel(&kptimer.g_pet_timer);
646 }
647 kptimer.g_started = false;
648 kptimer_broadcast(kptimer_stop_remote);
649 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
650 kptimer.g_timers[i].kt_cur_deadline = 0;
651 }
652
653 ml_set_interrupts_enabled(intrs_en);
654 }
655
656 #pragma mark - accessors
657
658 int
kptimer_get_period(unsigned int timerid,uint64_t * period_abs)659 kptimer_get_period(unsigned int timerid, uint64_t *period_abs)
660 {
661 if (timerid >= kptimer.g_ntimers) {
662 return EINVAL;
663 }
664 *period_abs = kptimer.g_timers[timerid].kt_period_abs;
665 return 0;
666 }
667
668 int
kptimer_set_period(unsigned int timerid,uint64_t period_abs)669 kptimer_set_period(unsigned int timerid, uint64_t period_abs)
670 {
671 if (timerid >= kptimer.g_ntimers) {
672 return EINVAL;
673 }
674 if (kptimer.g_started) {
675 return EBUSY;
676 }
677
678 bool pet = kptimer.g_pet_timerid == timerid;
679 uint64_t min_period = kptimer_min_period_abs(pet);
680 if (period_abs != 0 && period_abs < min_period) {
681 period_abs = min_period;
682 }
683 if (pet && !kppet_get_lightweight_pet()) {
684 kppet_config(kptimer.g_timers[timerid].kt_actionid);
685 }
686
687 kptimer.g_timers[timerid].kt_period_abs = period_abs;
688 return 0;
689 }
690
691 int
kptimer_get_action(unsigned int timerid,unsigned int * actionid)692 kptimer_get_action(unsigned int timerid, unsigned int *actionid)
693 {
694 if (timerid >= kptimer.g_ntimers) {
695 return EINVAL;
696 }
697 *actionid = kptimer.g_timers[timerid].kt_actionid;
698 return 0;
699 }
700
701 int
kptimer_set_action(unsigned int timerid,unsigned int actionid)702 kptimer_set_action(unsigned int timerid, unsigned int actionid)
703 {
704 if (timerid >= kptimer.g_ntimers) {
705 return EINVAL;
706 }
707 if (kptimer.g_started) {
708 return EBUSY;
709 }
710
711 kptimer.g_timers[timerid].kt_actionid = actionid;
712 if (kptimer.g_pet_timerid == timerid && !kppet_get_lightweight_pet()) {
713 kppet_config(actionid);
714 }
715 return 0;
716 }
717
718 unsigned int
kptimer_get_count(void)719 kptimer_get_count(void)
720 {
721 return kptimer.g_ntimers;
722 }
723
724 int
kptimer_set_count(unsigned int count)725 kptimer_set_count(unsigned int count)
726 {
727 kptimer_setup();
728 if (kptimer.g_started) {
729 return EBUSY;
730 }
731 if (count > KPTIMER_MAX) {
732 return EINVAL;
733 }
734 kptimer.g_ntimers = count;
735 return 0;
736 }
737
738 uint64_t
kptimer_min_period_abs(bool pet)739 kptimer_min_period_abs(bool pet)
740 {
741 enum kptimer_period_limit limit = 0;
742 if (ktrace_background_active()) {
743 limit = pet ? KTPL_BG_PET : KTPL_BG;
744 } else {
745 limit = pet ? KTPL_FG_PET : KTPL_FG;
746 }
747 return kptimer_minperiods_mtu[limit];
748 }
749
750 uint32_t
kptimer_get_pet_timerid(void)751 kptimer_get_pet_timerid(void)
752 {
753 return kptimer.g_pet_timerid;
754 }
755
756 int
kptimer_set_pet_timerid(uint32_t petid)757 kptimer_set_pet_timerid(uint32_t petid)
758 {
759 if (kptimer.g_started) {
760 return EBUSY;
761 }
762 if (petid >= kptimer.g_ntimers) {
763 kppet_config(0);
764 } else {
765 kppet_config(kptimer.g_timers[petid].kt_actionid);
766 uint64_t period_abs = MAX(kptimer_min_period_abs(true),
767 kptimer.g_timers[petid].kt_period_abs);
768 kptimer.g_timers[petid].kt_period_abs = period_abs;
769 }
770
771 kptimer.g_pet_timerid = petid;
772
773 return 0;
774 }
775