1 /*
2 * Copyright (c) 2011-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * This file manages the timers used for on-CPU samples and PET.
31 *
32 * Each timer configured by a tool is represented by a kptimer structure.
33 * The timer calls present in each structure are used to schedule CPU-local
34 * timers. As each timer fires, that CPU samples itself and schedules another
35 * timer to fire at the next deadline. The first timer to fire across all CPUs
36 * determines that deadline. This causes the timers to fire at a consistent
37 * cadence.
38 *
39 * Traditional PET uses a timer call to wake up its sampling thread and take
40 * on-CPU samples.
41 *
42 * Synchronization for start and stop is provided by the ktrace subsystem lock.
43 * Global state is stored in a single struct, to ease debugging.
44 */
45
46 #include <mach/mach_types.h>
47 #include <kern/cpu_data.h> /* current_thread() */
48 #include <kern/kalloc.h>
49 #include <kern/timer_queue.h>
50 #include <libkern/section_keywords.h>
51 #include <stdatomic.h>
52 #include <sys/errno.h>
53 #include <sys/vm.h>
54 #include <sys/ktrace.h>
55
56 #include <machine/machine_routines.h>
57 #if defined(__x86_64__)
58 #include <i386/mp.h>
59 #endif /* defined(__x86_64__) */
60
61 #include <kperf/kperf.h>
62 #include <kperf/buffer.h>
63 #include <kperf/context.h>
64 #include <kperf/action.h>
65 #include <kperf/kptimer.h>
66 #include <kperf/pet.h>
67 #include <kperf/sample.h>
68
69 #define KPTIMER_PET_INACTIVE (999)
70 #define KPTIMER_MAX (8)
71
72 struct kptimer {
73 uint32_t kt_actionid;
74 uint64_t kt_period_abs;
75 /*
76 * The `kt_cur_deadline` field represents when the timer should next fire.
77 * It's used to synchronize between timers firing on each CPU. In the timer
78 * handler, each CPU will take the `kt_lock` and see if the
79 * `kt_cur_deadline` still needs to be updated for the timer fire. If so,
80 * it updates it and logs the timer fire event under the lock.
81 */
82 lck_spin_t kt_lock;
83 uint64_t kt_cur_deadline;
84
85 #if DEVELOPMENT || DEBUG
86 /*
87 * To be set by the timer leader as a debugging aid for timeouts, if kperf
88 * happens to be on-CPU when they occur.
89 */
90 uint64_t kt_fire_time;
91 #endif /* DEVELOPMENT || DEBUG */
92 };
93
94 static struct {
95 struct kptimer *g_timers;
96 uint64_t *g_cpu_deadlines;
97 unsigned int g_ntimers;
98 unsigned int g_pet_timerid;
99
100 bool g_setup:1;
101 bool g_pet_active:1;
102 bool g_started:1;
103
104 struct timer_call g_pet_timer;
105 } kptimer = {
106 .g_pet_timerid = KPTIMER_PET_INACTIVE,
107 };
108
109 SECURITY_READ_ONLY_LATE(static uint64_t) kptimer_minperiods_mtu[KTPL_MAX];
110
111 /*
112 * Enforce a minimum timer period to prevent interrupt storms.
113 */
114 const uint64_t kptimer_minperiods_ns[KTPL_MAX] = {
115 #if defined(__x86_64__)
116 [KTPL_FG] = 20 * NSEC_PER_USEC, /* The minimum timer period in xnu, period. */
117 [KTPL_BG] = 1 * NSEC_PER_MSEC,
118 [KTPL_FG_PET] = 2 * NSEC_PER_MSEC,
119 [KTPL_BG_PET] = 5 * NSEC_PER_MSEC,
120 #elif defined(__arm64__)
121 [KTPL_FG] = 50 * NSEC_PER_USEC,
122 [KTPL_BG] = 1 * NSEC_PER_MSEC,
123 [KTPL_FG_PET] = 2 * NSEC_PER_MSEC,
124 [KTPL_BG_PET] = 10 * NSEC_PER_MSEC,
125 #elif defined(__arm__)
126 [KTPL_FG] = 100 * NSEC_PER_USEC,
127 [KTPL_BG] = 10 * NSEC_PER_MSEC,
128 [KTPL_FG_PET] = 2 * NSEC_PER_MSEC,
129 [KTPL_BG_PET] = 50 * NSEC_PER_MSEC,
130 #else
131 #error unexpected architecture
132 #endif
133 };
134
135 static void kptimer_pet_handler(void * __unused param1, void * __unused param2);
136 static void kptimer_stop_cpu(processor_t processor);
137
138 void
kptimer_init(void)139 kptimer_init(void)
140 {
141 for (int i = 0; i < KTPL_MAX; i++) {
142 nanoseconds_to_absolutetime(kptimer_minperiods_ns[i],
143 &kptimer_minperiods_mtu[i]);
144 }
145 }
146
147 static void
kptimer_set_cpu_deadline(int cpuid,int timerid,uint64_t deadline)148 kptimer_set_cpu_deadline(int cpuid, int timerid, uint64_t deadline)
149 {
150 kptimer.g_cpu_deadlines[(cpuid * KPTIMER_MAX) + timerid] =
151 deadline;
152 }
153
154 static void
kptimer_setup(void)155 kptimer_setup(void)
156 {
157 if (kptimer.g_setup) {
158 return;
159 }
160 static lck_grp_t kptimer_lock_grp;
161 lck_grp_init(&kptimer_lock_grp, "kptimer", LCK_GRP_ATTR_NULL);
162
163 const size_t timers_size = KPTIMER_MAX * sizeof(struct kptimer);
164 kptimer.g_timers = zalloc_permanent_tag(timers_size,
165 ZALIGN(struct kptimer), VM_KERN_MEMORY_DIAG);
166 for (int i = 0; i < KPTIMER_MAX; i++) {
167 lck_spin_init(&kptimer.g_timers[i].kt_lock, &kptimer_lock_grp,
168 LCK_ATTR_NULL);
169 }
170
171 const size_t deadlines_size = machine_info.logical_cpu_max * KPTIMER_MAX *
172 sizeof(kptimer.g_cpu_deadlines[0]);
173 kptimer.g_cpu_deadlines = zalloc_permanent_tag(deadlines_size,
174 ZALIGN_64, VM_KERN_MEMORY_DIAG);
175 for (int i = 0; i < KPTIMER_MAX; i++) {
176 for (int j = 0; j < machine_info.logical_cpu_max; j++) {
177 kptimer_set_cpu_deadline(j, i, EndOfAllTime);
178 }
179 }
180
181 timer_call_setup(&kptimer.g_pet_timer, kptimer_pet_handler, NULL);
182
183 kptimer.g_setup = true;
184 }
185
186 void
kptimer_reset(void)187 kptimer_reset(void)
188 {
189 kptimer_stop();
190 kptimer_set_pet_timerid(KPTIMER_PET_INACTIVE);
191
192 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
193 kptimer.g_timers[i].kt_period_abs = 0;
194 kptimer.g_timers[i].kt_actionid = 0;
195 for (int j = 0; j < machine_info.logical_cpu_max; j++) {
196 kptimer_set_cpu_deadline(j, i, EndOfAllTime);
197 }
198 }
199 }
200
201 #pragma mark - deadline management
202
203 static uint64_t
kptimer_get_cpu_deadline(int cpuid,int timerid)204 kptimer_get_cpu_deadline(int cpuid, int timerid)
205 {
206 return kptimer.g_cpu_deadlines[(cpuid * KPTIMER_MAX) + timerid];
207 }
208
209 static void
kptimer_sample_curcpu(unsigned int actionid,unsigned int timerid,uint32_t flags)210 kptimer_sample_curcpu(unsigned int actionid, unsigned int timerid,
211 uint32_t flags)
212 {
213 struct kperf_sample *intbuf = kperf_intr_sample_buffer();
214 #if DEVELOPMENT || DEBUG
215 intbuf->sample_time = mach_absolute_time();
216 #endif /* DEVELOPMENT || DEBUG */
217
218 BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START);
219
220 thread_t thread = current_thread();
221 task_t task = get_threadtask(thread);
222 struct kperf_context ctx = {
223 .cur_thread = thread,
224 .cur_task = task,
225 .cur_pid = task_pid(task),
226 .trigger_type = TRIGGER_TYPE_TIMER,
227 .trigger_id = timerid,
228 };
229
230 (void)kperf_sample(intbuf, &ctx, actionid,
231 SAMPLE_FLAG_PEND_USER | flags);
232
233 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END);
234 }
235
236 static void
kptimer_lock(struct kptimer * timer)237 kptimer_lock(struct kptimer *timer)
238 {
239 lck_spin_lock(&timer->kt_lock);
240 }
241
242 static void
kptimer_unlock(struct kptimer * timer)243 kptimer_unlock(struct kptimer *timer)
244 {
245 lck_spin_unlock(&timer->kt_lock);
246 }
247
248 /*
249 * If the deadline expired in the past, find the next deadline to program,
250 * locked into the cadence provided by the period.
251 */
252 static inline uint64_t
dead_reckon_deadline(uint64_t now,uint64_t deadline,uint64_t period)253 dead_reckon_deadline(uint64_t now, uint64_t deadline, uint64_t period)
254 {
255 if (deadline < now) {
256 uint64_t time_since = now - deadline;
257 uint64_t extra_time = period - (time_since % period);
258 return now + extra_time;
259 }
260 return deadline;
261 }
262
263 static uint64_t
kptimer_fire(struct kptimer * timer,unsigned int timerid,uint64_t deadline,int __unused cpuid,uint64_t now)264 kptimer_fire(struct kptimer *timer, unsigned int timerid,
265 uint64_t deadline, int __unused cpuid, uint64_t now)
266 {
267 bool first = false;
268 uint64_t next_deadline = deadline + timer->kt_period_abs;
269
270 /*
271 * It's not straightforward to replace this lock with a compare-exchange,
272 * since the PERF_TM_FIRE event must be emitted *before* any subsequent
273 * PERF_TM_HNDLR events, so tools can understand the handlers are responding
274 * to this timer fire.
275 */
276 kptimer_lock(timer);
277 if (timer->kt_cur_deadline < next_deadline) {
278 first = true;
279 next_deadline = dead_reckon_deadline(now, next_deadline,
280 timer->kt_period_abs);
281 timer->kt_cur_deadline = next_deadline;
282 BUF_DATA(PERF_TM_FIRE, timerid, timerid == kptimer.g_pet_timerid,
283 timer->kt_period_abs, timer->kt_actionid);
284 #if DEVELOPMENT || DEBUG
285 /*
286 * Debugging aid to see the last time this timer fired.
287 */
288 timer->kt_fire_time = mach_absolute_time();
289 #endif /* DEVELOPMENT || DEBUG */
290 if (timerid == kptimer.g_pet_timerid && kppet_get_lightweight_pet()) {
291 os_atomic_inc(&kppet_gencount, relaxed);
292 }
293 } else {
294 /*
295 * In case this CPU has missed several timer fires, get it back on track
296 * by synchronizing with the latest timer fire.
297 */
298 next_deadline = timer->kt_cur_deadline;
299 }
300 kptimer_unlock(timer);
301
302 if (!first && !kperf_action_has_non_system(timer->kt_actionid)) {
303 /*
304 * The first timer to fire will sample the system, so there's
305 * no need to run other timers if those are the only samplers
306 * for this action.
307 */
308 return next_deadline;
309 }
310
311 kptimer_sample_curcpu(timer->kt_actionid, timerid,
312 first ? SAMPLE_FLAG_SYSTEM : 0);
313
314 return next_deadline;
315 }
316
317 /*
318 * Determine which of the timers fired.
319 */
320 void
kptimer_expire(processor_t processor,int cpuid,uint64_t now)321 kptimer_expire(processor_t processor, int cpuid, uint64_t now)
322 {
323 uint64_t min_deadline = UINT64_MAX;
324
325 enum kperf_sampling status = os_atomic_load(&kperf_status, acquire);
326 switch (status) {
327 case KPERF_SAMPLING_ON:
328 break;
329 case KPERF_SAMPLING_SHUTDOWN:
330 kptimer_stop_cpu(processor);
331 return;
332 case KPERF_SAMPLING_OFF:
333 panic("kperf: timer fired at %llu, but sampling is disabled", now);
334 default:
335 panic("kperf: unknown sampling state 0x%x", status);
336 }
337
338 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
339 struct kptimer *timer = &kptimer.g_timers[i];
340 if (timer->kt_period_abs == 0) {
341 continue;
342 }
343
344 uint64_t cpudeadline = kptimer_get_cpu_deadline(cpuid, i);
345 if (now > cpudeadline) {
346 uint64_t deadline = kptimer_fire(timer, i, cpudeadline, cpuid, now);
347 if (deadline == 0) {
348 kptimer_set_cpu_deadline(cpuid, i, EndOfAllTime);
349 } else {
350 kptimer_set_cpu_deadline(cpuid, i, deadline);
351 if (deadline < min_deadline) {
352 min_deadline = deadline;
353 }
354 }
355 }
356 }
357 if (min_deadline < UINT64_MAX) {
358 running_timer_enter(processor, RUNNING_TIMER_KPERF, NULL,
359 min_deadline, mach_absolute_time());
360 }
361 }
362
363 #pragma mark - start/stop
364
365 static void
kptimer_broadcast(void (* fn)(void *))366 kptimer_broadcast(void (*fn)(void *))
367 {
368 ktrace_assert_lock_held();
369
370 #if defined(__x86_64__)
371 (void)mp_cpus_call(CPUMASK_ALL, ASYNC, fn, NULL);
372 #else /* defined(__x86_64__) */
373 _Atomic uint32_t xcsync = 0;
374 cpu_broadcast_xcall((uint32_t *)&xcsync, TRUE /* include self */, fn,
375 &xcsync);
376 #endif /* !defined(__x86_64__) */
377 }
378
379 static void
kptimer_broadcast_ack(void * arg)380 kptimer_broadcast_ack(void *arg)
381 {
382 #if defined(__x86_64__)
383 #pragma unused(arg)
384 #else /* defined(__x86_64__) */
385 _Atomic uint32_t *xcsync = arg;
386 int pending = os_atomic_dec(xcsync, relaxed);
387 if (pending == 0) {
388 thread_wakeup(xcsync);
389 }
390 #endif /* !defined(__x86_64__) */
391 }
392
393 static void
kptimer_sample_pet_remote(void * __unused arg)394 kptimer_sample_pet_remote(void * __unused arg)
395 {
396 if (!kperf_is_sampling()) {
397 return;
398 }
399 struct kptimer *timer = &kptimer.g_timers[kptimer.g_pet_timerid];
400 kptimer_sample_curcpu(timer->kt_actionid, kptimer.g_pet_timerid, 0);
401 }
402
403 #if !defined(__x86_64__)
404
405 #include <arm/cpu_internal.h>
406
407 void kperf_signal_handler(void);
408 void
kperf_signal_handler(void)409 kperf_signal_handler(void)
410 {
411 kptimer_sample_pet_remote(NULL);
412 }
413
414 #endif /* !defined(__x86_64__) */
415
416 #include <stdatomic.h>
417 _Atomic uint64_t mycounter = 0;
418
419 static void
kptimer_broadcast_pet(void)420 kptimer_broadcast_pet(void)
421 {
422 atomic_fetch_add(&mycounter, 1);
423 #if defined(__x86_64__)
424 (void)mp_cpus_call(CPUMASK_OTHERS, NOSYNC, kptimer_sample_pet_remote,
425 NULL);
426 #else /* defined(__x86_64__) */
427 int curcpu = cpu_number();
428 for (int i = 0; i < machine_info.logical_cpu_max; i++) {
429 if (i != curcpu) {
430 cpu_signal(cpu_datap(i), SIGPkppet, NULL, NULL);
431 }
432 }
433 #endif /* !defined(__x86_64__) */
434 }
435
436 static void
kptimer_pet_handler(void * __unused param1,void * __unused param2)437 kptimer_pet_handler(void * __unused param1, void * __unused param2)
438 {
439 if (!kptimer.g_pet_active) {
440 return;
441 }
442
443 struct kptimer *timer = &kptimer.g_timers[kptimer.g_pet_timerid];
444
445 BUF_DATA(PERF_TM_FIRE, kptimer.g_pet_timerid, 1, timer->kt_period_abs,
446 timer->kt_actionid);
447
448 /*
449 * To get the on-CPU samples as close to this timer fire as possible, first
450 * broadcast to them to sample themselves.
451 */
452 kptimer_broadcast_pet();
453
454 /*
455 * Wakeup the PET thread afterwards so it's not inadvertently sampled (it's a
456 * high-priority kernel thread). If the scheduler needs to IPI to run it,
457 * that IPI will be handled after the IPIs issued during the broadcast.
458 */
459 kppet_wake_thread();
460
461 /*
462 * Finally, sample this CPU, who's stacks and state have been preserved while
463 * running this handler. Make sure to include system measurements.
464 */
465 kptimer_sample_curcpu(timer->kt_actionid, kptimer.g_pet_timerid,
466 SAMPLE_FLAG_SYSTEM);
467
468 BUF_INFO(PERF_TM_FIRE | DBG_FUNC_END);
469
470 /*
471 * The PET thread will re-arm the timer when it's done.
472 */
473 }
474
475 void
kptimer_pet_enter(uint64_t sampledur_abs)476 kptimer_pet_enter(uint64_t sampledur_abs)
477 {
478 if (!kperf_is_sampling()) {
479 return;
480 }
481
482 uint64_t period_abs = kptimer.g_timers[kptimer.g_pet_timerid].kt_period_abs;
483 uint64_t orig_period_abs = period_abs;
484
485 if (period_abs > sampledur_abs) {
486 period_abs -= sampledur_abs;
487 }
488 period_abs = MAX(kptimer_min_period_abs(true), period_abs);
489 uint64_t deadline_abs = mach_absolute_time() + period_abs;
490
491 BUF_INFO(PERF_PET_SCHED, orig_period_abs, period_abs, sampledur_abs,
492 deadline_abs);
493
494 timer_call_enter(&kptimer.g_pet_timer, deadline_abs, TIMER_CALL_SYS_CRITICAL);
495 }
496
497 static uint64_t
kptimer_earliest_deadline(processor_t processor,uint64_t now)498 kptimer_earliest_deadline(processor_t processor, uint64_t now)
499 {
500 uint64_t min_deadline = UINT64_MAX;
501 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
502 struct kptimer *timer = &kptimer.g_timers[i];
503 uint64_t cur_deadline = timer->kt_cur_deadline;
504 if (cur_deadline == 0) {
505 continue;
506 }
507 cur_deadline = dead_reckon_deadline(now, cur_deadline,
508 timer->kt_period_abs);
509 kptimer_set_cpu_deadline(processor->cpu_id, i, cur_deadline);
510 if (cur_deadline < min_deadline) {
511 min_deadline = cur_deadline;
512 }
513 }
514 return min_deadline;
515 }
516
517 void kptimer_running_setup(processor_t processor, uint64_t now);
518 void
kptimer_running_setup(processor_t processor,uint64_t now)519 kptimer_running_setup(processor_t processor, uint64_t now)
520 {
521 uint64_t deadline = kptimer_earliest_deadline(processor, now);
522 if (deadline < UINT64_MAX) {
523 running_timer_setup(processor, RUNNING_TIMER_KPERF, NULL, deadline,
524 now);
525 }
526 }
527
528 static void
kptimer_start_cpu(processor_t processor)529 kptimer_start_cpu(processor_t processor)
530 {
531 uint64_t now = mach_absolute_time();
532 uint64_t deadline = kptimer_earliest_deadline(processor, now);
533 if (deadline < UINT64_MAX) {
534 running_timer_enter(processor, RUNNING_TIMER_KPERF, NULL, deadline,
535 now);
536 }
537 }
538
539 static void
kptimer_start_remote(void * arg)540 kptimer_start_remote(void *arg)
541 {
542 kptimer_start_cpu(current_processor());
543 kptimer_broadcast_ack(arg);
544 }
545
546 static void
kptimer_stop_cpu(processor_t processor)547 kptimer_stop_cpu(processor_t processor)
548 {
549 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
550 kptimer_set_cpu_deadline(processor->cpu_id, i, EndOfAllTime);
551 }
552 running_timer_cancel(processor, RUNNING_TIMER_KPERF);
553 }
554
555 void
kptimer_stop_curcpu(void)556 kptimer_stop_curcpu(void)
557 {
558 kptimer_stop_cpu(current_processor());
559 }
560
561 static void
kptimer_stop_remote(void * __unused arg)562 kptimer_stop_remote(void * __unused arg)
563 {
564 assert(ml_get_interrupts_enabled() == FALSE);
565 kptimer_stop_cpu(current_processor());
566 kptimer_broadcast_ack(arg);
567 }
568
569 /*
570 * Called when a CPU is brought online. Handles the cases where the kperf timer may have
571 * been either enabled or disabled while the CPU was offline (preventing the enabling/disabling
572 * IPIs from reaching this CPU).
573 */
574 void
kptimer_curcpu_up(void)575 kptimer_curcpu_up(void)
576 {
577 enum kperf_sampling status = os_atomic_load(&kperf_status, acquire);
578 processor_t processor = current_processor();
579
580 assert(ml_get_interrupts_enabled() == FALSE);
581
582 /*
583 * If the CPU was taken offline, THEN kperf was enabled, this CPU would have missed
584 * the enabling IPI, so fix that here. Also, if the CPU was taken offline (after having
585 * enabled kperf), recompute the deadline (since we may have missed a timer update) and
586 * keep the timer enabled.
587 */
588 if (status == KPERF_SAMPLING_ON) {
589 kptimer_start_cpu(processor);
590 } else {
591 /*
592 * Similarly, If the CPU is resuming after having previously armed the kperf timer
593 * before going down, and kperf is currently disabled, disable the kperf running
594 * timer on this CPU.
595 */
596 kptimer_stop_cpu(processor);
597 }
598 }
599
600 void
kptimer_start(void)601 kptimer_start(void)
602 {
603 ktrace_assert_lock_held();
604
605 if (kptimer.g_started) {
606 return;
607 }
608
609 uint64_t now = mach_absolute_time();
610 unsigned int ntimers_active = 0;
611 kptimer.g_started = true;
612 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
613 struct kptimer *timer = &kptimer.g_timers[i];
614 if (timer->kt_period_abs == 0 || timer->kt_actionid == 0) {
615 /*
616 * No period or action means the timer is inactive.
617 */
618 continue;
619 } else if (!kppet_get_lightweight_pet() &&
620 i == kptimer.g_pet_timerid) {
621 kptimer.g_pet_active = true;
622 timer_call_enter(&kptimer.g_pet_timer, now + timer->kt_period_abs,
623 TIMER_CALL_SYS_CRITICAL);
624 } else {
625 timer->kt_cur_deadline = now + timer->kt_period_abs;
626 ntimers_active++;
627 }
628 }
629 if (ntimers_active > 0) {
630 kptimer_broadcast(kptimer_start_remote);
631 }
632 }
633
634 void
kptimer_stop(void)635 kptimer_stop(void)
636 {
637 ktrace_assert_lock_held();
638
639 if (!kptimer.g_started) {
640 return;
641 }
642
643 int intrs_en = ml_set_interrupts_enabled(FALSE);
644
645 if (kptimer.g_pet_active) {
646 kptimer.g_pet_active = false;
647 timer_call_cancel(&kptimer.g_pet_timer);
648 }
649 kptimer.g_started = false;
650 kptimer_broadcast(kptimer_stop_remote);
651 for (unsigned int i = 0; i < kptimer.g_ntimers; i++) {
652 kptimer.g_timers[i].kt_cur_deadline = 0;
653 }
654
655 ml_set_interrupts_enabled(intrs_en);
656 }
657
658 #pragma mark - accessors
659
660 int
kptimer_get_period(unsigned int timerid,uint64_t * period_abs)661 kptimer_get_period(unsigned int timerid, uint64_t *period_abs)
662 {
663 if (timerid >= kptimer.g_ntimers) {
664 return EINVAL;
665 }
666 *period_abs = kptimer.g_timers[timerid].kt_period_abs;
667 return 0;
668 }
669
670 int
kptimer_set_period(unsigned int timerid,uint64_t period_abs)671 kptimer_set_period(unsigned int timerid, uint64_t period_abs)
672 {
673 if (timerid >= kptimer.g_ntimers) {
674 return EINVAL;
675 }
676 if (kptimer.g_started) {
677 return EBUSY;
678 }
679
680 bool pet = kptimer.g_pet_timerid == timerid;
681 uint64_t min_period = kptimer_min_period_abs(pet);
682 if (period_abs != 0 && period_abs < min_period) {
683 period_abs = min_period;
684 }
685 if (pet && !kppet_get_lightweight_pet()) {
686 kppet_config(kptimer.g_timers[timerid].kt_actionid);
687 }
688
689 kptimer.g_timers[timerid].kt_period_abs = period_abs;
690 return 0;
691 }
692
693 int
kptimer_get_action(unsigned int timerid,unsigned int * actionid)694 kptimer_get_action(unsigned int timerid, unsigned int *actionid)
695 {
696 if (timerid >= kptimer.g_ntimers) {
697 return EINVAL;
698 }
699 *actionid = kptimer.g_timers[timerid].kt_actionid;
700 return 0;
701 }
702
703 int
kptimer_set_action(unsigned int timerid,unsigned int actionid)704 kptimer_set_action(unsigned int timerid, unsigned int actionid)
705 {
706 if (timerid >= kptimer.g_ntimers) {
707 return EINVAL;
708 }
709 if (kptimer.g_started) {
710 return EBUSY;
711 }
712
713 kptimer.g_timers[timerid].kt_actionid = actionid;
714 if (kptimer.g_pet_timerid == timerid && !kppet_get_lightweight_pet()) {
715 kppet_config(actionid);
716 }
717 return 0;
718 }
719
720 unsigned int
kptimer_get_count(void)721 kptimer_get_count(void)
722 {
723 return kptimer.g_ntimers;
724 }
725
726 int
kptimer_set_count(unsigned int count)727 kptimer_set_count(unsigned int count)
728 {
729 kptimer_setup();
730 if (kptimer.g_started) {
731 return EBUSY;
732 }
733 if (count > KPTIMER_MAX) {
734 return EINVAL;
735 }
736 kptimer.g_ntimers = count;
737 return 0;
738 }
739
740 uint64_t
kptimer_min_period_abs(bool pet)741 kptimer_min_period_abs(bool pet)
742 {
743 enum kptimer_period_limit limit = 0;
744 if (ktrace_background_active()) {
745 limit = pet ? KTPL_BG_PET : KTPL_BG;
746 } else {
747 limit = pet ? KTPL_FG_PET : KTPL_FG;
748 }
749 return kptimer_minperiods_mtu[limit];
750 }
751
752 uint32_t
kptimer_get_pet_timerid(void)753 kptimer_get_pet_timerid(void)
754 {
755 return kptimer.g_pet_timerid;
756 }
757
758 int
kptimer_set_pet_timerid(uint32_t petid)759 kptimer_set_pet_timerid(uint32_t petid)
760 {
761 if (kptimer.g_started) {
762 return EBUSY;
763 }
764 if (petid >= kptimer.g_ntimers) {
765 kppet_config(0);
766 } else {
767 kppet_config(kptimer.g_timers[petid].kt_actionid);
768 uint64_t period_abs = MAX(kptimer_min_period_abs(true),
769 kptimer.g_timers[petid].kt_period_abs);
770 kptimer.g_timers[petid].kt_period_abs = period_abs;
771 }
772
773 kptimer.g_pet_timerid = petid;
774
775 return 0;
776 }
777