xref: /xnu-8020.121.3/osfmk/kern/timer_call.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 1993-2008 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Timer interrupt callout module.
30  */
31 
32 #include <mach/mach_types.h>
33 
34 #include <kern/clock.h>
35 #include <kern/smp.h>
36 #include <kern/processor.h>
37 #include <kern/timer_call.h>
38 #include <kern/timer_queue.h>
39 #include <kern/thread.h>
40 #include <kern/policy_internal.h>
41 
42 #include <sys/kdebug.h>
43 
44 #if CONFIG_DTRACE
45 #include <mach/sdt.h>
46 #endif
47 
48 
49 #if DEBUG
50 #define TIMER_ASSERT    1
51 #endif
52 
53 //#define TIMER_ASSERT	1
54 //#define TIMER_DBG	1
55 
56 #if TIMER_DBG
57 #define DBG(x...) kprintf("DBG: " x);
58 #else
59 #define DBG(x...)
60 #endif
61 
62 #if TIMER_TRACE
63 #define TIMER_KDEBUG_TRACE      KERNEL_DEBUG_CONSTANT_IST
64 #else
65 #define TIMER_KDEBUG_TRACE(x...)
66 #endif
67 
68 LCK_GRP_DECLARE(timer_call_lck_grp, "timer_call");
69 LCK_GRP_DECLARE(timer_longterm_lck_grp, "timer_longterm");
70 
71 /* Timer queue lock must be acquired with interrupts disabled (under splclock()) */
72 #define timer_queue_lock_spin(queue)                                    \
73 	lck_mtx_lock_spin_always(&queue->lock_data)
74 
75 #define timer_queue_unlock(queue)               \
76 	lck_mtx_unlock_always(&queue->lock_data)
77 
78 /*
79  * The longterm timer object is a global structure holding all timers
80  * beyond the short-term, local timer queue threshold. The boot processor
81  * is responsible for moving each timer to its local timer queue
82  * if and when that timer becomes due within the threshold.
83  */
84 
85 /* Sentinel for "no time set": */
86 #define TIMER_LONGTERM_NONE             EndOfAllTime
87 /* The default threadhold is the delta above which a timer is "long-term" */
88 #if defined(__x86_64__)
89 #define TIMER_LONGTERM_THRESHOLD        (1ULL * NSEC_PER_SEC)   /* 1 sec */
90 #else
91 #define TIMER_LONGTERM_THRESHOLD        TIMER_LONGTERM_NONE     /* disabled */
92 #endif
93 
94 /*
95  * The scan_limit throttles processing of the longterm queue.
96  * If the scan time exceeds this limit, we terminate, unlock
97  * and defer for scan_interval. This prevents unbounded holding of
98  * timer queue locks with interrupts masked.
99  */
100 #define TIMER_LONGTERM_SCAN_LIMIT       (100ULL * NSEC_PER_USEC)        /* 100 us */
101 #define TIMER_LONGTERM_SCAN_INTERVAL    (100ULL * NSEC_PER_USEC)        /* 100 us */
102 /* Sentinel for "scan limit exceeded": */
103 #define TIMER_LONGTERM_SCAN_AGAIN       0
104 
105 typedef struct {
106 	uint64_t        interval;       /* longterm timer interval */
107 	uint64_t        margin;         /* fudge factor (10% of interval */
108 	uint64_t        deadline;       /* first/soonest longterm deadline */
109 	uint64_t        preempted;      /* sooner timer has pre-empted */
110 	timer_call_t    call;           /* first/soonest longterm timer call */
111 	uint64_t        deadline_set;   /* next timer set */
112 	timer_call_data_t timer;        /* timer used by threshold management */
113 	                                /* Stats: */
114 	uint64_t        scans;          /*   num threshold timer scans */
115 	uint64_t        preempts;       /*   num threshold reductions */
116 	uint64_t        latency;        /*   average threshold latency */
117 	uint64_t        latency_min;    /*   minimum threshold latency */
118 	uint64_t        latency_max;    /*   maximum threshold latency */
119 } threshold_t;
120 
121 typedef struct {
122 	mpqueue_head_t  queue;          /* longterm timer list */
123 	uint64_t        enqueues;       /* num timers queued */
124 	uint64_t        dequeues;       /* num timers dequeued */
125 	uint64_t        escalates;      /* num timers becoming shortterm */
126 	uint64_t        scan_time;      /* last time the list was scanned */
127 	threshold_t     threshold;      /* longterm timer threshold */
128 	uint64_t        scan_limit;     /* maximum scan time */
129 	uint64_t        scan_interval;  /* interval between LT "escalation" scans */
130 	uint64_t        scan_pauses;    /* num scans exceeding time limit */
131 } timer_longterm_t;
132 
133 timer_longterm_t                timer_longterm = {
134 	.scan_limit = TIMER_LONGTERM_SCAN_LIMIT,
135 	.scan_interval = TIMER_LONGTERM_SCAN_INTERVAL,
136 };
137 
138 static mpqueue_head_t           *timer_longterm_queue = NULL;
139 
140 static void                     timer_longterm_init(void);
141 static void                     timer_longterm_callout(
142 	timer_call_param_t      p0,
143 	timer_call_param_t      p1);
144 extern void                     timer_longterm_scan(
145 	timer_longterm_t        *tlp,
146 	uint64_t                now);
147 static void                     timer_longterm_update(
148 	timer_longterm_t *tlp);
149 static void                     timer_longterm_update_locked(
150 	timer_longterm_t *tlp);
151 static mpqueue_head_t *         timer_longterm_enqueue_unlocked(
152 	timer_call_t            call,
153 	uint64_t                now,
154 	uint64_t                deadline,
155 	mpqueue_head_t **       old_queue,
156 	uint64_t                soft_deadline,
157 	uint64_t                ttd,
158 	timer_call_param_t      param1,
159 	uint32_t                callout_flags);
160 static void                     timer_longterm_dequeued_locked(
161 	timer_call_t            call);
162 
163 uint64_t past_deadline_timers;
164 uint64_t past_deadline_deltas;
165 uint64_t past_deadline_longest;
166 uint64_t past_deadline_shortest = ~0ULL;
167 enum {PAST_DEADLINE_TIMER_ADJUSTMENT_NS = 10 * 1000};
168 
169 uint64_t past_deadline_timer_adjustment;
170 
171 static boolean_t timer_call_enter_internal(timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint64_t leeway, uint32_t flags, boolean_t ratelimited);
172 boolean_t       mach_timer_coalescing_enabled = TRUE;
173 
174 mpqueue_head_t  *timer_call_enqueue_deadline_unlocked(
175 	timer_call_t            call,
176 	mpqueue_head_t          *queue,
177 	uint64_t                deadline,
178 	uint64_t                soft_deadline,
179 	uint64_t                ttd,
180 	timer_call_param_t      param1,
181 	uint32_t                flags);
182 
183 mpqueue_head_t  *timer_call_dequeue_unlocked(
184 	timer_call_t            call);
185 
186 timer_coalescing_priority_params_t tcoal_prio_params;
187 
188 #if TCOAL_PRIO_STATS
189 int32_t nc_tcl, rt_tcl, bg_tcl, kt_tcl, fp_tcl, ts_tcl, qos_tcl;
190 #define TCOAL_PRIO_STAT(x) (x++)
191 #else
192 #define TCOAL_PRIO_STAT(x)
193 #endif
194 
195 static void
timer_call_init_abstime(void)196 timer_call_init_abstime(void)
197 {
198 	int i;
199 	uint64_t result;
200 	timer_coalescing_priority_params_ns_t * tcoal_prio_params_init = timer_call_get_priority_params();
201 	nanoseconds_to_absolutetime(PAST_DEADLINE_TIMER_ADJUSTMENT_NS, &past_deadline_timer_adjustment);
202 	nanoseconds_to_absolutetime(tcoal_prio_params_init->idle_entry_timer_processing_hdeadline_threshold_ns, &result);
203 	tcoal_prio_params.idle_entry_timer_processing_hdeadline_threshold_abstime = (uint32_t)result;
204 	nanoseconds_to_absolutetime(tcoal_prio_params_init->interrupt_timer_coalescing_ilat_threshold_ns, &result);
205 	tcoal_prio_params.interrupt_timer_coalescing_ilat_threshold_abstime = (uint32_t)result;
206 	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_resort_threshold_ns, &result);
207 	tcoal_prio_params.timer_resort_threshold_abstime = (uint32_t)result;
208 	tcoal_prio_params.timer_coalesce_rt_shift = tcoal_prio_params_init->timer_coalesce_rt_shift;
209 	tcoal_prio_params.timer_coalesce_bg_shift = tcoal_prio_params_init->timer_coalesce_bg_shift;
210 	tcoal_prio_params.timer_coalesce_kt_shift = tcoal_prio_params_init->timer_coalesce_kt_shift;
211 	tcoal_prio_params.timer_coalesce_fp_shift = tcoal_prio_params_init->timer_coalesce_fp_shift;
212 	tcoal_prio_params.timer_coalesce_ts_shift = tcoal_prio_params_init->timer_coalesce_ts_shift;
213 
214 	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_rt_ns_max,
215 	    &tcoal_prio_params.timer_coalesce_rt_abstime_max);
216 	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_bg_ns_max,
217 	    &tcoal_prio_params.timer_coalesce_bg_abstime_max);
218 	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_kt_ns_max,
219 	    &tcoal_prio_params.timer_coalesce_kt_abstime_max);
220 	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_fp_ns_max,
221 	    &tcoal_prio_params.timer_coalesce_fp_abstime_max);
222 	nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_ts_ns_max,
223 	    &tcoal_prio_params.timer_coalesce_ts_abstime_max);
224 
225 	for (i = 0; i < NUM_LATENCY_QOS_TIERS; i++) {
226 		tcoal_prio_params.latency_qos_scale[i] = tcoal_prio_params_init->latency_qos_scale[i];
227 		nanoseconds_to_absolutetime(tcoal_prio_params_init->latency_qos_ns_max[i],
228 		    &tcoal_prio_params.latency_qos_abstime_max[i]);
229 		tcoal_prio_params.latency_tier_rate_limited[i] = tcoal_prio_params_init->latency_tier_rate_limited[i];
230 	}
231 }
232 
233 
234 void
timer_call_init(void)235 timer_call_init(void)
236 {
237 	timer_longterm_init();
238 	timer_call_init_abstime();
239 }
240 
241 
242 void
timer_call_queue_init(mpqueue_head_t * queue)243 timer_call_queue_init(mpqueue_head_t *queue)
244 {
245 	DBG("timer_call_queue_init(%p)\n", queue);
246 	mpqueue_init(queue, &timer_call_lck_grp, LCK_ATTR_NULL);
247 }
248 
249 
250 void
timer_call_setup(timer_call_t call,timer_call_func_t func,timer_call_param_t param0)251 timer_call_setup(
252 	timer_call_t                    call,
253 	timer_call_func_t               func,
254 	timer_call_param_t              param0)
255 {
256 	DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0);
257 
258 	*call = (struct timer_call) {
259 		.tc_func = func,
260 		.tc_param0 = param0,
261 		.tc_async_dequeue = false,
262 	};
263 
264 	simple_lock_init(&(call)->tc_lock, 0);
265 }
266 
267 timer_call_t
timer_call_alloc(timer_call_func_t func,timer_call_param_t param0)268 timer_call_alloc(
269 	timer_call_func_t       func,
270 	timer_call_param_t      param0)
271 {
272 	timer_call_t call;
273 
274 	call = kalloc_type(struct timer_call, Z_ZERO | Z_WAITOK | Z_NOFAIL);
275 	timer_call_setup(call, func, param0);
276 	return call;
277 }
278 
279 void
timer_call_free(timer_call_t call)280 timer_call_free(
281 	timer_call_t            call)
282 {
283 	kfree_type(struct timer_call, call);
284 }
285 
286 static mpqueue_head_t*
mpqueue_for_timer_call(timer_call_t entry)287 mpqueue_for_timer_call(timer_call_t entry)
288 {
289 	queue_t queue_entry_is_on = entry->tc_queue;
290 	/* 'cast' the queue back to the orignal mpqueue */
291 	return __container_of(queue_entry_is_on, struct mpqueue_head, head);
292 }
293 
294 
295 static __inline__ mpqueue_head_t *
timer_call_entry_dequeue(timer_call_t entry)296 timer_call_entry_dequeue(
297 	timer_call_t            entry)
298 {
299 	mpqueue_head_t  *old_mpqueue = mpqueue_for_timer_call(entry);
300 
301 	/* The entry was always on a queue */
302 	assert(old_mpqueue != NULL);
303 
304 #if TIMER_ASSERT
305 	if (!hw_lock_held((hw_lock_t)&entry->tc_lock)) {
306 		panic("_call_entry_dequeue() "
307 		    "entry %p is not locked\n", entry);
308 	}
309 
310 	/*
311 	 * XXX The queue lock is actually a mutex in spin mode
312 	 *     but there's no way to test for it being held
313 	 *     so we pretend it's a spinlock!
314 	 */
315 	if (!hw_lock_held((hw_lock_t)&old_mpqueue->lock_data)) {
316 		panic("_call_entry_dequeue() "
317 		    "queue %p is not locked\n", old_mpqueue);
318 	}
319 #endif /* TIMER_ASSERT */
320 
321 	if (old_mpqueue != timer_longterm_queue) {
322 		priority_queue_remove(&old_mpqueue->mpq_pqhead,
323 		    &entry->tc_pqlink);
324 	}
325 
326 	remqueue(&entry->tc_qlink);
327 
328 	entry->tc_queue = NULL;
329 
330 	old_mpqueue->count--;
331 
332 	return old_mpqueue;
333 }
334 
335 static __inline__ mpqueue_head_t *
timer_call_entry_enqueue_deadline(timer_call_t entry,mpqueue_head_t * new_mpqueue,uint64_t deadline)336 timer_call_entry_enqueue_deadline(
337 	timer_call_t                    entry,
338 	mpqueue_head_t                  *new_mpqueue,
339 	uint64_t                        deadline)
340 {
341 	mpqueue_head_t  *old_mpqueue = mpqueue_for_timer_call(entry);
342 
343 #if TIMER_ASSERT
344 	if (!hw_lock_held((hw_lock_t)&entry->tc_lock)) {
345 		panic("_call_entry_enqueue_deadline() "
346 		    "entry %p is not locked\n", entry);
347 	}
348 
349 	/* XXX More lock pretense:  */
350 	if (!hw_lock_held((hw_lock_t)&new_mpqueue->lock_data)) {
351 		panic("_call_entry_enqueue_deadline() "
352 		    "queue %p is not locked\n", new_mpqueue);
353 	}
354 
355 	if (old_mpqueue != NULL && old_mpqueue != new_mpqueue) {
356 		panic("_call_entry_enqueue_deadline() "
357 		    "old_mpqueue %p != new_mpqueue", old_mpqueue);
358 	}
359 #endif /* TIMER_ASSERT */
360 
361 	/* no longterm queue involved */
362 	assert(new_mpqueue != timer_longterm_queue);
363 	assert(old_mpqueue != timer_longterm_queue);
364 
365 	if (old_mpqueue == new_mpqueue) {
366 		/* optimize the same-queue case to avoid a full re-insert */
367 		uint64_t old_deadline = entry->tc_pqlink.deadline;
368 		entry->tc_pqlink.deadline = deadline;
369 
370 		if (old_deadline < deadline) {
371 			priority_queue_entry_increased(&new_mpqueue->mpq_pqhead,
372 			    &entry->tc_pqlink);
373 		} else {
374 			priority_queue_entry_decreased(&new_mpqueue->mpq_pqhead,
375 			    &entry->tc_pqlink);
376 		}
377 	} else {
378 		if (old_mpqueue != NULL) {
379 			priority_queue_remove(&old_mpqueue->mpq_pqhead,
380 			    &entry->tc_pqlink);
381 
382 			re_queue_tail(&new_mpqueue->head, &entry->tc_qlink);
383 		} else {
384 			enqueue_tail(&new_mpqueue->head, &entry->tc_qlink);
385 		}
386 
387 		entry->tc_queue = &new_mpqueue->head;
388 		entry->tc_pqlink.deadline = deadline;
389 
390 		priority_queue_insert(&new_mpqueue->mpq_pqhead, &entry->tc_pqlink);
391 	}
392 
393 
394 	/* For efficiency, track the earliest soft deadline on the queue,
395 	 * so that fuzzy decisions can be made without lock acquisitions.
396 	 */
397 
398 	timer_call_t thead = priority_queue_min(&new_mpqueue->mpq_pqhead, struct timer_call, tc_pqlink);
399 
400 	new_mpqueue->earliest_soft_deadline = thead->tc_flags & TIMER_CALL_RATELIMITED ? thead->tc_pqlink.deadline : thead->tc_soft_deadline;
401 
402 	if (old_mpqueue) {
403 		old_mpqueue->count--;
404 	}
405 	new_mpqueue->count++;
406 
407 	return old_mpqueue;
408 }
409 
410 static __inline__ void
timer_call_entry_enqueue_tail(timer_call_t entry,mpqueue_head_t * queue)411 timer_call_entry_enqueue_tail(
412 	timer_call_t                    entry,
413 	mpqueue_head_t                  *queue)
414 {
415 	/* entry is always dequeued before this call */
416 	assert(entry->tc_queue == NULL);
417 
418 	/*
419 	 * this is only used for timer_longterm_queue, which is unordered
420 	 * and thus needs no priority queueing
421 	 */
422 	assert(queue == timer_longterm_queue);
423 
424 	enqueue_tail(&queue->head, &entry->tc_qlink);
425 
426 	entry->tc_queue = &queue->head;
427 
428 	queue->count++;
429 	return;
430 }
431 
432 /*
433  * Remove timer entry from its queue but don't change the queue pointer
434  * and set the async_dequeue flag. This is locking case 2b.
435  */
436 static __inline__ void
timer_call_entry_dequeue_async(timer_call_t entry)437 timer_call_entry_dequeue_async(
438 	timer_call_t            entry)
439 {
440 	mpqueue_head_t  *old_mpqueue = mpqueue_for_timer_call(entry);
441 	if (old_mpqueue) {
442 		old_mpqueue->count--;
443 
444 		if (old_mpqueue != timer_longterm_queue) {
445 			priority_queue_remove(&old_mpqueue->mpq_pqhead,
446 			    &entry->tc_pqlink);
447 		}
448 
449 		remqueue(&entry->tc_qlink);
450 		entry->tc_async_dequeue = true;
451 	}
452 	return;
453 }
454 
455 #if TIMER_ASSERT
456 unsigned timer_call_enqueue_deadline_unlocked_async1;
457 unsigned timer_call_enqueue_deadline_unlocked_async2;
458 #endif
459 /*
460  * Assumes call_entry and queues unlocked, interrupts disabled.
461  */
462 __inline__ mpqueue_head_t *
timer_call_enqueue_deadline_unlocked(timer_call_t call,mpqueue_head_t * queue,uint64_t deadline,uint64_t soft_deadline,uint64_t ttd,timer_call_param_t param1,uint32_t callout_flags)463 timer_call_enqueue_deadline_unlocked(
464 	timer_call_t                    call,
465 	mpqueue_head_t                  *queue,
466 	uint64_t                        deadline,
467 	uint64_t                        soft_deadline,
468 	uint64_t                        ttd,
469 	timer_call_param_t              param1,
470 	uint32_t                        callout_flags)
471 {
472 	DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);
473 
474 	simple_lock(&call->tc_lock, LCK_GRP_NULL);
475 
476 	mpqueue_head_t  *old_queue = mpqueue_for_timer_call(call);
477 
478 	if (old_queue != NULL) {
479 		timer_queue_lock_spin(old_queue);
480 		if (call->tc_async_dequeue) {
481 			/* collision (1c): timer already dequeued, clear flag */
482 #if TIMER_ASSERT
483 			TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
484 			    DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
485 			    VM_KERNEL_UNSLIDE_OR_PERM(call),
486 			    call->tc_async_dequeue,
487 			    VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue),
488 			    0x1c, 0);
489 			timer_call_enqueue_deadline_unlocked_async1++;
490 #endif
491 			call->tc_async_dequeue = false;
492 			call->tc_queue = NULL;
493 		} else if (old_queue != queue) {
494 			timer_call_entry_dequeue(call);
495 #if TIMER_ASSERT
496 			timer_call_enqueue_deadline_unlocked_async2++;
497 #endif
498 		}
499 		if (old_queue == timer_longterm_queue) {
500 			timer_longterm_dequeued_locked(call);
501 		}
502 		if (old_queue != queue) {
503 			timer_queue_unlock(old_queue);
504 			timer_queue_lock_spin(queue);
505 		}
506 	} else {
507 		timer_queue_lock_spin(queue);
508 	}
509 
510 	call->tc_soft_deadline = soft_deadline;
511 	call->tc_flags = callout_flags;
512 	call->tc_param1 = param1;
513 	call->tc_ttd = ttd;
514 
515 	timer_call_entry_enqueue_deadline(call, queue, deadline);
516 	timer_queue_unlock(queue);
517 	simple_unlock(&call->tc_lock);
518 
519 	return old_queue;
520 }
521 
522 #if TIMER_ASSERT
523 unsigned timer_call_dequeue_unlocked_async1;
524 unsigned timer_call_dequeue_unlocked_async2;
525 #endif
526 mpqueue_head_t *
timer_call_dequeue_unlocked(timer_call_t call)527 timer_call_dequeue_unlocked(
528 	timer_call_t            call)
529 {
530 	DBG("timer_call_dequeue_unlocked(%p)\n", call);
531 
532 	simple_lock(&call->tc_lock, LCK_GRP_NULL);
533 
534 	mpqueue_head_t  *old_queue = mpqueue_for_timer_call(call);
535 
536 #if TIMER_ASSERT
537 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
538 	    DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
539 	    VM_KERNEL_UNSLIDE_OR_PERM(call),
540 	    call->tc_async_dequeue,
541 	    VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue),
542 	    0, 0);
543 #endif
544 	if (old_queue != NULL) {
545 		timer_queue_lock_spin(old_queue);
546 		if (call->tc_async_dequeue) {
547 			/* collision (1c): timer already dequeued, clear flag */
548 #if TIMER_ASSERT
549 			TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
550 			    DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
551 			    VM_KERNEL_UNSLIDE_OR_PERM(call),
552 			    call->tc_async_dequeue,
553 			    VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue),
554 			    0x1c, 0);
555 			timer_call_dequeue_unlocked_async1++;
556 #endif
557 			call->tc_async_dequeue = false;
558 			call->tc_queue = NULL;
559 		} else {
560 			timer_call_entry_dequeue(call);
561 		}
562 		if (old_queue == timer_longterm_queue) {
563 			timer_longterm_dequeued_locked(call);
564 		}
565 		timer_queue_unlock(old_queue);
566 	}
567 	simple_unlock(&call->tc_lock);
568 	return old_queue;
569 }
570 
571 uint64_t
timer_call_past_deadline_timer_handle(uint64_t deadline,uint64_t ctime)572 timer_call_past_deadline_timer_handle(uint64_t deadline, uint64_t ctime)
573 {
574 	uint64_t delta = (ctime - deadline);
575 
576 	past_deadline_timers++;
577 	past_deadline_deltas += delta;
578 	if (delta > past_deadline_longest) {
579 		past_deadline_longest = deadline;
580 	}
581 	if (delta < past_deadline_shortest) {
582 		past_deadline_shortest = delta;
583 	}
584 
585 	return ctime + past_deadline_timer_adjustment;
586 }
587 
588 /*
589  * Timer call entry locking model
590  * ==============================
591  *
592  * Timer call entries are linked on per-cpu timer queues which are protected
593  * by the queue lock and the call entry lock. The locking protocol is:
594  *
595  *  0) The canonical locking order is timer call entry followed by queue.
596  *
597  *  1) With only the entry lock held, entry.queue is valid:
598  *    1a) NULL: the entry is not queued, or
599  *    1b) non-NULL: this queue must be locked before the entry is modified.
600  *        After locking the queue, the call.async_dequeue flag must be checked:
601  *    1c) TRUE: the entry was removed from the queue by another thread
602  *	        and we must NULL the entry.queue and reset this flag, or
603  *    1d) FALSE: (ie. queued), the entry can be manipulated.
604  *
605  *  2) If a queue lock is obtained first, the queue is stable:
606  *    2a) If a try-lock of a queued entry succeeds, the call can be operated on
607  *	  and dequeued.
608  *    2b) If a try-lock fails, it indicates that another thread is attempting
609  *        to change the entry and move it to a different position in this queue
610  *        or to different queue. The entry can be dequeued but it should not be
611  *        operated upon since it is being changed. Furthermore, we don't null
612  *	  the entry.queue pointer (protected by the entry lock we don't own).
613  *	  Instead, we set the async_dequeue flag -- see (1c).
614  *    2c) Same as 2b but occurring when a longterm timer is matured.
615  *  3) A callout's parameters (deadline, flags, parameters, soft deadline &c.)
616  *     should be manipulated with the appropriate timer queue lock held,
617  *     to prevent queue traversal observations from observing inconsistent
618  *     updates to an in-flight callout.
619  */
620 
621 /*
622  * In the debug case, we assert that the timer call locking protocol
623  * is being obeyed.
624  */
625 
626 static boolean_t
timer_call_enter_internal(timer_call_t call,timer_call_param_t param1,uint64_t deadline,uint64_t leeway,uint32_t flags,boolean_t ratelimited)627 timer_call_enter_internal(
628 	timer_call_t            call,
629 	timer_call_param_t      param1,
630 	uint64_t                deadline,
631 	uint64_t                leeway,
632 	uint32_t                flags,
633 	boolean_t               ratelimited)
634 {
635 	mpqueue_head_t          *queue = NULL;
636 	mpqueue_head_t          *old_queue;
637 	spl_t                   s;
638 	uint64_t                slop;
639 	uint32_t                urgency;
640 	uint64_t                sdeadline, ttd;
641 
642 	assert(call->tc_func != NULL);
643 	s = splclock();
644 
645 	sdeadline = deadline;
646 	uint64_t ctime = mach_absolute_time();
647 
648 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
649 	    DECR_TIMER_ENTER | DBG_FUNC_START,
650 	    VM_KERNEL_UNSLIDE_OR_PERM(call),
651 	    VM_KERNEL_ADDRHIDE(param1), deadline, flags, 0);
652 
653 	urgency = (flags & TIMER_CALL_URGENCY_MASK);
654 
655 	boolean_t slop_ratelimited = FALSE;
656 	slop = timer_call_slop(deadline, ctime, urgency, current_thread(), &slop_ratelimited);
657 
658 	if ((flags & TIMER_CALL_LEEWAY) != 0 && leeway > slop) {
659 		slop = leeway;
660 	}
661 
662 	if (UINT64_MAX - deadline <= slop) {
663 		deadline = UINT64_MAX;
664 	} else {
665 		deadline += slop;
666 	}
667 
668 	if (__improbable(deadline < ctime)) {
669 		deadline = timer_call_past_deadline_timer_handle(deadline, ctime);
670 		sdeadline = deadline;
671 	}
672 
673 	if (ratelimited || slop_ratelimited) {
674 		flags |= TIMER_CALL_RATELIMITED;
675 	} else {
676 		flags &= ~TIMER_CALL_RATELIMITED;
677 	}
678 
679 	ttd =  sdeadline - ctime;
680 #if CONFIG_DTRACE
681 	DTRACE_TMR7(callout__create, timer_call_func_t, call->tc_func,
682 	    timer_call_param_t, call->tc_param0, uint32_t, flags,
683 	    (deadline - sdeadline),
684 	    (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call);
685 #endif
686 
687 	/* Program timer callout parameters under the appropriate per-CPU or
688 	 * longterm queue lock. The callout may have been previously enqueued
689 	 * and in-flight on this or another timer queue.
690 	 */
691 	if (!ratelimited && !slop_ratelimited) {
692 		queue = timer_longterm_enqueue_unlocked(call, ctime, deadline, &old_queue, sdeadline, ttd, param1, flags);
693 	}
694 
695 	if (queue == NULL) {
696 		queue = timer_queue_assign(deadline);
697 		old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline, sdeadline, ttd, param1, flags);
698 	}
699 
700 #if TIMER_TRACE
701 	call->tc_entry_time = ctime;
702 #endif
703 
704 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
705 	    DECR_TIMER_ENTER | DBG_FUNC_END,
706 	    VM_KERNEL_UNSLIDE_OR_PERM(call),
707 	    (old_queue != NULL), deadline, queue->count, 0);
708 
709 	splx(s);
710 
711 	return old_queue != NULL;
712 }
713 
714 /*
715  * timer_call_*()
716  *	return boolean indicating whether the call was previously queued.
717  */
718 boolean_t
timer_call_enter(timer_call_t call,uint64_t deadline,uint32_t flags)719 timer_call_enter(
720 	timer_call_t            call,
721 	uint64_t                deadline,
722 	uint32_t                flags)
723 {
724 	return timer_call_enter_internal(call, NULL, deadline, 0, flags, FALSE);
725 }
726 
727 boolean_t
timer_call_enter1(timer_call_t call,timer_call_param_t param1,uint64_t deadline,uint32_t flags)728 timer_call_enter1(
729 	timer_call_t            call,
730 	timer_call_param_t      param1,
731 	uint64_t                deadline,
732 	uint32_t                flags)
733 {
734 	return timer_call_enter_internal(call, param1, deadline, 0, flags, FALSE);
735 }
736 
737 boolean_t
timer_call_enter_with_leeway(timer_call_t call,timer_call_param_t param1,uint64_t deadline,uint64_t leeway,uint32_t flags,boolean_t ratelimited)738 timer_call_enter_with_leeway(
739 	timer_call_t            call,
740 	timer_call_param_t      param1,
741 	uint64_t                deadline,
742 	uint64_t                leeway,
743 	uint32_t                flags,
744 	boolean_t               ratelimited)
745 {
746 	return timer_call_enter_internal(call, param1, deadline, leeway, flags, ratelimited);
747 }
748 
749 boolean_t
timer_call_cancel(timer_call_t call)750 timer_call_cancel(
751 	timer_call_t            call)
752 {
753 	mpqueue_head_t          *old_queue;
754 	spl_t                   s;
755 
756 	s = splclock();
757 
758 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
759 	    DECR_TIMER_CANCEL | DBG_FUNC_START,
760 	    VM_KERNEL_UNSLIDE_OR_PERM(call),
761 	    call->tc_pqlink.deadline, call->tc_soft_deadline, call->tc_flags, 0);
762 
763 	old_queue = timer_call_dequeue_unlocked(call);
764 
765 	if (old_queue != NULL) {
766 		timer_queue_lock_spin(old_queue);
767 
768 		timer_call_t new_head = priority_queue_min(&old_queue->mpq_pqhead, struct timer_call, tc_pqlink);
769 
770 		if (new_head) {
771 			timer_queue_cancel(old_queue, call->tc_pqlink.deadline, new_head->tc_pqlink.deadline);
772 			old_queue->earliest_soft_deadline = new_head->tc_flags & TIMER_CALL_RATELIMITED ? new_head->tc_pqlink.deadline : new_head->tc_soft_deadline;
773 		} else {
774 			timer_queue_cancel(old_queue, call->tc_pqlink.deadline, UINT64_MAX);
775 			old_queue->earliest_soft_deadline = UINT64_MAX;
776 		}
777 
778 		timer_queue_unlock(old_queue);
779 	}
780 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
781 	    DECR_TIMER_CANCEL | DBG_FUNC_END,
782 	    VM_KERNEL_UNSLIDE_OR_PERM(call),
783 	    VM_KERNEL_UNSLIDE_OR_PERM(old_queue),
784 	    call->tc_pqlink.deadline - mach_absolute_time(),
785 	    call->tc_pqlink.deadline - call->tc_entry_time, 0);
786 	splx(s);
787 
788 #if CONFIG_DTRACE
789 	DTRACE_TMR6(callout__cancel, timer_call_func_t, call->tc_func,
790 	    timer_call_param_t, call->tc_param0, uint32_t, call->tc_flags, 0,
791 	    (call->tc_ttd >> 32), (unsigned) (call->tc_ttd & 0xFFFFFFFF));
792 #endif /* CONFIG_DTRACE */
793 
794 	return old_queue != NULL;
795 }
796 
797 static uint32_t timer_queue_shutdown_lock_skips;
798 static uint32_t timer_queue_shutdown_discarded;
799 
800 void
timer_queue_shutdown(mpqueue_head_t * queue)801 timer_queue_shutdown(
802 	mpqueue_head_t          *queue)
803 {
804 	timer_call_t            call;
805 	mpqueue_head_t          *new_queue;
806 	spl_t                   s;
807 
808 
809 	DBG("timer_queue_shutdown(%p)\n", queue);
810 
811 	s = splclock();
812 
813 	while (TRUE) {
814 		timer_queue_lock_spin(queue);
815 
816 		call = qe_queue_first(&queue->head, struct timer_call, tc_qlink);
817 
818 		if (call == NULL) {
819 			break;
820 		}
821 
822 		if (!simple_lock_try(&call->tc_lock, LCK_GRP_NULL)) {
823 			/*
824 			 * case (2b) lock order inversion, dequeue and skip
825 			 * Don't change the call_entry queue back-pointer
826 			 * but set the async_dequeue field.
827 			 */
828 			timer_queue_shutdown_lock_skips++;
829 			timer_call_entry_dequeue_async(call);
830 #if TIMER_ASSERT
831 			TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
832 			    DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
833 			    VM_KERNEL_UNSLIDE_OR_PERM(call),
834 			    call->tc_async_dequeue,
835 			    VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue),
836 			    0x2b, 0);
837 #endif
838 			timer_queue_unlock(queue);
839 			continue;
840 		}
841 
842 		boolean_t call_local = ((call->tc_flags & TIMER_CALL_LOCAL) != 0);
843 
844 		/* remove entry from old queue */
845 		timer_call_entry_dequeue(call);
846 		timer_queue_unlock(queue);
847 
848 		if (call_local == FALSE) {
849 			/* and queue it on new, discarding LOCAL timers */
850 			new_queue = timer_queue_assign(call->tc_pqlink.deadline);
851 			timer_queue_lock_spin(new_queue);
852 			timer_call_entry_enqueue_deadline(
853 				call, new_queue, call->tc_pqlink.deadline);
854 			timer_queue_unlock(new_queue);
855 		} else {
856 			timer_queue_shutdown_discarded++;
857 		}
858 
859 		assert(call_local == FALSE);
860 		simple_unlock(&call->tc_lock);
861 	}
862 
863 	timer_queue_unlock(queue);
864 	splx(s);
865 }
866 
867 
868 static uint32_t timer_queue_expire_lock_skips;
869 uint64_t
timer_queue_expire_with_options(mpqueue_head_t * queue,uint64_t deadline,boolean_t rescan)870 timer_queue_expire_with_options(
871 	mpqueue_head_t          *queue,
872 	uint64_t                deadline,
873 	boolean_t               rescan)
874 {
875 	timer_call_t    call = NULL;
876 	uint32_t tc_iterations = 0;
877 	DBG("timer_queue_expire(%p,)\n", queue);
878 
879 	/* 'rescan' means look at every timer in the list, instead of
880 	 * early-exiting when the head of the list expires in the future.
881 	 * when 'rescan' is true, iterate by linked list instead of priority queue.
882 	 *
883 	 * TODO: if we keep a deadline ordered and soft-deadline ordered
884 	 * priority queue, then it's no longer necessary to do that
885 	 */
886 
887 	uint64_t cur_deadline = deadline;
888 	timer_queue_lock_spin(queue);
889 
890 	while (!queue_empty(&queue->head)) {
891 		/* Upon processing one or more timer calls, refresh the
892 		 * deadline to account for time elapsed in the callout
893 		 */
894 		if (++tc_iterations > 1) {
895 			cur_deadline = mach_absolute_time();
896 		}
897 
898 		if (call == NULL) {
899 			if (rescan == FALSE) {
900 				call = priority_queue_min(&queue->mpq_pqhead, struct timer_call, tc_pqlink);
901 			} else {
902 				call = qe_queue_first(&queue->head, struct timer_call, tc_qlink);
903 			}
904 		}
905 
906 		if (call->tc_soft_deadline <= cur_deadline) {
907 			timer_call_func_t               func;
908 			timer_call_param_t              param0, param1;
909 
910 			TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->tc_soft_deadline, 0, 0, 0);
911 			TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
912 			    DECR_TIMER_EXPIRE | DBG_FUNC_NONE,
913 			    VM_KERNEL_UNSLIDE_OR_PERM(call),
914 			    call->tc_soft_deadline,
915 			    call->tc_pqlink.deadline,
916 			    call->tc_entry_time, 0);
917 
918 			if ((call->tc_flags & TIMER_CALL_RATELIMITED) &&
919 			    (call->tc_pqlink.deadline > cur_deadline)) {
920 				if (rescan == FALSE) {
921 					break;
922 				}
923 			}
924 
925 			if (!simple_lock_try(&call->tc_lock, LCK_GRP_NULL)) {
926 				/* case (2b) lock inversion, dequeue and skip */
927 				timer_queue_expire_lock_skips++;
928 				timer_call_entry_dequeue_async(call);
929 				call = NULL;
930 				continue;
931 			}
932 
933 			timer_call_entry_dequeue(call);
934 
935 			func = call->tc_func;
936 			param0 = call->tc_param0;
937 			param1 = call->tc_param1;
938 
939 			simple_unlock(&call->tc_lock);
940 			timer_queue_unlock(queue);
941 
942 			TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
943 			    DECR_TIMER_CALLOUT | DBG_FUNC_START,
944 			    VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func),
945 			    VM_KERNEL_ADDRHIDE(param0),
946 			    VM_KERNEL_ADDRHIDE(param1),
947 			    0);
948 
949 #if CONFIG_DTRACE
950 			DTRACE_TMR7(callout__start, timer_call_func_t, func,
951 			    timer_call_param_t, param0, unsigned, call->tc_flags,
952 			    0, (call->tc_ttd >> 32),
953 			    (unsigned) (call->tc_ttd & 0xFFFFFFFF), call);
954 #endif
955 			/* Maintain time-to-deadline in per-processor data
956 			 * structure for thread wakeup deadline statistics.
957 			 */
958 			uint64_t *ttdp = &current_processor()->timer_call_ttd;
959 			*ttdp = call->tc_ttd;
960 			(*func)(param0, param1);
961 			*ttdp = 0;
962 #if CONFIG_DTRACE
963 			DTRACE_TMR4(callout__end, timer_call_func_t, func,
964 			    param0, param1, call);
965 #endif
966 
967 			TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
968 			    DECR_TIMER_CALLOUT | DBG_FUNC_END,
969 			    VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func),
970 			    VM_KERNEL_ADDRHIDE(param0),
971 			    VM_KERNEL_ADDRHIDE(param1),
972 			    0);
973 			call = NULL;
974 			timer_queue_lock_spin(queue);
975 		} else {
976 			if (__probable(rescan == FALSE)) {
977 				break;
978 			} else {
979 				int64_t skew = call->tc_pqlink.deadline - call->tc_soft_deadline;
980 				assert(call->tc_pqlink.deadline >= call->tc_soft_deadline);
981 
982 				/* DRK: On a latency quality-of-service level change,
983 				 * re-sort potentially rate-limited timers. The platform
984 				 * layer determines which timers require
985 				 * this. In the absence of the per-callout
986 				 * synchronization requirement, a global resort could
987 				 * be more efficient. The re-sort effectively
988 				 * annuls all timer adjustments, i.e. the "soft
989 				 * deadline" is the sort key.
990 				 */
991 
992 				if (timer_resort_threshold(skew)) {
993 					if (__probable(simple_lock_try(&call->tc_lock, LCK_GRP_NULL))) {
994 						/* TODO: don't need to dequeue before enqueue */
995 						timer_call_entry_dequeue(call);
996 						timer_call_entry_enqueue_deadline(call, queue, call->tc_soft_deadline);
997 						simple_unlock(&call->tc_lock);
998 						call = NULL;
999 					}
1000 				}
1001 				if (call) {
1002 					call = qe_queue_next(&queue->head, call, struct timer_call, tc_qlink);
1003 
1004 					if (call == NULL) {
1005 						break;
1006 					}
1007 				}
1008 			}
1009 		}
1010 	}
1011 
1012 	call = priority_queue_min(&queue->mpq_pqhead, struct timer_call, tc_pqlink);
1013 
1014 	if (call) {
1015 		cur_deadline = call->tc_pqlink.deadline;
1016 		queue->earliest_soft_deadline = (call->tc_flags & TIMER_CALL_RATELIMITED) ? call->tc_pqlink.deadline: call->tc_soft_deadline;
1017 	} else {
1018 		queue->earliest_soft_deadline = cur_deadline = UINT64_MAX;
1019 	}
1020 
1021 	timer_queue_unlock(queue);
1022 
1023 	return cur_deadline;
1024 }
1025 
1026 uint64_t
timer_queue_expire(mpqueue_head_t * queue,uint64_t deadline)1027 timer_queue_expire(
1028 	mpqueue_head_t          *queue,
1029 	uint64_t                deadline)
1030 {
1031 	return timer_queue_expire_with_options(queue, deadline, FALSE);
1032 }
1033 
1034 extern int serverperfmode;
1035 static uint32_t timer_queue_migrate_lock_skips;
1036 /*
1037  * timer_queue_migrate() is called by timer_queue_migrate_cpu()
1038  * to move timer requests from the local processor (queue_from)
1039  * to a target processor's (queue_to).
1040  */
1041 int
timer_queue_migrate(mpqueue_head_t * queue_from,mpqueue_head_t * queue_to)1042 timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to)
1043 {
1044 	timer_call_t    call;
1045 	timer_call_t    head_to;
1046 	int             timers_migrated = 0;
1047 
1048 	DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to);
1049 
1050 	assert(!ml_get_interrupts_enabled());
1051 	assert(queue_from != queue_to);
1052 
1053 	if (serverperfmode) {
1054 		/*
1055 		 * if we're running a high end server
1056 		 * avoid migrations... they add latency
1057 		 * and don't save us power under typical
1058 		 * server workloads
1059 		 */
1060 		return -4;
1061 	}
1062 
1063 	/*
1064 	 * Take both local (from) and target (to) timer queue locks while
1065 	 * moving the timers from the local queue to the target processor.
1066 	 * We assume that the target is always the boot processor.
1067 	 * But only move if all of the following is true:
1068 	 *  - the target queue is non-empty
1069 	 *  - the local queue is non-empty
1070 	 *  - the local queue's first deadline is later than the target's
1071 	 *  - the local queue contains no non-migrateable "local" call
1072 	 * so that we need not have the target resync.
1073 	 */
1074 
1075 	timer_queue_lock_spin(queue_to);
1076 
1077 	head_to = priority_queue_min(&queue_to->mpq_pqhead, struct timer_call, tc_pqlink);
1078 
1079 	if (head_to == NULL) {
1080 		timers_migrated = -1;
1081 		goto abort1;
1082 	}
1083 
1084 	timer_queue_lock_spin(queue_from);
1085 
1086 	call = priority_queue_min(&queue_from->mpq_pqhead, struct timer_call, tc_pqlink);
1087 
1088 	if (call == NULL) {
1089 		timers_migrated = -2;
1090 		goto abort2;
1091 	}
1092 
1093 	if (call->tc_pqlink.deadline < head_to->tc_pqlink.deadline) {
1094 		timers_migrated = 0;
1095 		goto abort2;
1096 	}
1097 
1098 	/* perform scan for non-migratable timers */
1099 	qe_foreach_element(call, &queue_from->head, tc_qlink) {
1100 		if (call->tc_flags & TIMER_CALL_LOCAL) {
1101 			timers_migrated = -3;
1102 			goto abort2;
1103 		}
1104 	}
1105 
1106 	/* migration loop itself -- both queues are locked */
1107 	qe_foreach_element_safe(call, &queue_from->head, tc_qlink) {
1108 		if (!simple_lock_try(&call->tc_lock, LCK_GRP_NULL)) {
1109 			/* case (2b) lock order inversion, dequeue only */
1110 #ifdef TIMER_ASSERT
1111 			TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
1112 			    DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
1113 			    VM_KERNEL_UNSLIDE_OR_PERM(call),
1114 			    VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue),
1115 			    0,
1116 			    0x2b, 0);
1117 #endif
1118 			timer_queue_migrate_lock_skips++;
1119 			timer_call_entry_dequeue_async(call);
1120 			continue;
1121 		}
1122 		timer_call_entry_dequeue(call);
1123 		timer_call_entry_enqueue_deadline(
1124 			call, queue_to, call->tc_pqlink.deadline);
1125 		timers_migrated++;
1126 		simple_unlock(&call->tc_lock);
1127 	}
1128 	queue_from->earliest_soft_deadline = UINT64_MAX;
1129 abort2:
1130 	timer_queue_unlock(queue_from);
1131 abort1:
1132 	timer_queue_unlock(queue_to);
1133 
1134 	return timers_migrated;
1135 }
1136 
1137 void
timer_queue_trace_cpu(int ncpu)1138 timer_queue_trace_cpu(int ncpu)
1139 {
1140 	timer_call_nosync_cpu(
1141 		ncpu,
1142 		(void (*)(void *))timer_queue_trace,
1143 		(void*) timer_queue_cpu(ncpu));
1144 }
1145 
1146 void
timer_queue_trace(mpqueue_head_t * queue)1147 timer_queue_trace(
1148 	mpqueue_head_t                  *queue)
1149 {
1150 	timer_call_t    call;
1151 	spl_t           s;
1152 
1153 	if (!kdebug_enable) {
1154 		return;
1155 	}
1156 
1157 	s = splclock();
1158 	timer_queue_lock_spin(queue);
1159 
1160 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
1161 	    DECR_TIMER_QUEUE | DBG_FUNC_START,
1162 	    queue->count, mach_absolute_time(), 0, 0, 0);
1163 
1164 	qe_foreach_element(call, &queue->head, tc_qlink) {
1165 		TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
1166 		    DECR_TIMER_QUEUE | DBG_FUNC_NONE,
1167 		    call->tc_soft_deadline,
1168 		    call->tc_pqlink.deadline,
1169 		    call->tc_entry_time,
1170 		    VM_KERNEL_UNSLIDE(call->tc_func),
1171 		    0);
1172 	}
1173 
1174 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
1175 	    DECR_TIMER_QUEUE | DBG_FUNC_END,
1176 	    queue->count, mach_absolute_time(), 0, 0, 0);
1177 
1178 	timer_queue_unlock(queue);
1179 	splx(s);
1180 }
1181 
1182 void
timer_longterm_dequeued_locked(timer_call_t call)1183 timer_longterm_dequeued_locked(timer_call_t call)
1184 {
1185 	timer_longterm_t        *tlp = &timer_longterm;
1186 
1187 	tlp->dequeues++;
1188 	if (call == tlp->threshold.call) {
1189 		tlp->threshold.call = NULL;
1190 	}
1191 }
1192 
1193 /*
1194  * Place a timer call in the longterm list
1195  * and adjust the next timer callout deadline if the new timer is first.
1196  */
1197 mpqueue_head_t *
timer_longterm_enqueue_unlocked(timer_call_t call,uint64_t now,uint64_t deadline,mpqueue_head_t ** old_queue,uint64_t soft_deadline,uint64_t ttd,timer_call_param_t param1,uint32_t callout_flags)1198 timer_longterm_enqueue_unlocked(timer_call_t    call,
1199     uint64_t        now,
1200     uint64_t        deadline,
1201     mpqueue_head_t  **old_queue,
1202     uint64_t        soft_deadline,
1203     uint64_t        ttd,
1204     timer_call_param_t      param1,
1205     uint32_t        callout_flags)
1206 {
1207 	timer_longterm_t        *tlp = &timer_longterm;
1208 	boolean_t               update_required = FALSE;
1209 	uint64_t                longterm_threshold;
1210 
1211 	longterm_threshold = now + tlp->threshold.interval;
1212 
1213 	/*
1214 	 * Return NULL without doing anything if:
1215 	 *  - this timer is local, or
1216 	 *  - the longterm mechanism is disabled, or
1217 	 *  - this deadline is too short.
1218 	 */
1219 	if ((callout_flags & TIMER_CALL_LOCAL) != 0 ||
1220 	    (tlp->threshold.interval == TIMER_LONGTERM_NONE) ||
1221 	    (deadline <= longterm_threshold)) {
1222 		return NULL;
1223 	}
1224 
1225 	/*
1226 	 * Remove timer from its current queue, if any.
1227 	 */
1228 	*old_queue = timer_call_dequeue_unlocked(call);
1229 
1230 	/*
1231 	 * Lock the longterm queue, queue timer and determine
1232 	 * whether an update is necessary.
1233 	 */
1234 	assert(!ml_get_interrupts_enabled());
1235 	simple_lock(&call->tc_lock, LCK_GRP_NULL);
1236 	timer_queue_lock_spin(timer_longterm_queue);
1237 	call->tc_pqlink.deadline = deadline;
1238 	call->tc_param1 = param1;
1239 	call->tc_ttd = ttd;
1240 	call->tc_soft_deadline = soft_deadline;
1241 	call->tc_flags = callout_flags;
1242 	timer_call_entry_enqueue_tail(call, timer_longterm_queue);
1243 
1244 	tlp->enqueues++;
1245 
1246 	/*
1247 	 * We'll need to update the currently set threshold timer
1248 	 * if the new deadline is sooner and no sooner update is in flight.
1249 	 */
1250 	if (deadline < tlp->threshold.deadline &&
1251 	    deadline < tlp->threshold.preempted) {
1252 		tlp->threshold.preempted = deadline;
1253 		tlp->threshold.call = call;
1254 		update_required = TRUE;
1255 	}
1256 	timer_queue_unlock(timer_longterm_queue);
1257 	simple_unlock(&call->tc_lock);
1258 
1259 	if (update_required) {
1260 		/*
1261 		 * Note: this call expects that calling the master cpu
1262 		 * alone does not involve locking the topo lock.
1263 		 */
1264 		timer_call_nosync_cpu(
1265 			master_cpu,
1266 			(void (*)(void *))timer_longterm_update,
1267 			(void *)tlp);
1268 	}
1269 
1270 	return timer_longterm_queue;
1271 }
1272 
1273 /*
1274  * Scan for timers below the longterm threshold.
1275  * Move these to the local timer queue (of the boot processor on which the
1276  * calling thread is running).
1277  * Both the local (boot) queue and the longterm queue are locked.
1278  * The scan is similar to the timer migrate sequence but is performed by
1279  * successively examining each timer on the longterm queue:
1280  *  - if within the short-term threshold
1281  *    - enter on the local queue (unless being deleted),
1282  *  - otherwise:
1283  *    - if sooner, deadline becomes the next threshold deadline.
1284  * The total scan time is limited to TIMER_LONGTERM_SCAN_LIMIT. Should this be
1285  * exceeded, we abort and reschedule again so that we don't shut others from
1286  * the timer queues. Longterm timers firing late is not critical.
1287  */
1288 void
timer_longterm_scan(timer_longterm_t * tlp,uint64_t time_start)1289 timer_longterm_scan(timer_longterm_t    *tlp,
1290     uint64_t            time_start)
1291 {
1292 	timer_call_t    call;
1293 	uint64_t        threshold = TIMER_LONGTERM_NONE;
1294 	uint64_t        deadline;
1295 	uint64_t        time_limit = time_start + tlp->scan_limit;
1296 	mpqueue_head_t  *timer_master_queue;
1297 
1298 	assert(!ml_get_interrupts_enabled());
1299 	assert(cpu_number() == master_cpu);
1300 
1301 	if (tlp->threshold.interval != TIMER_LONGTERM_NONE) {
1302 		threshold = time_start + tlp->threshold.interval;
1303 	}
1304 
1305 	tlp->threshold.deadline = TIMER_LONGTERM_NONE;
1306 	tlp->threshold.call = NULL;
1307 
1308 	if (queue_empty(&timer_longterm_queue->head)) {
1309 		return;
1310 	}
1311 
1312 	timer_master_queue = timer_queue_cpu(master_cpu);
1313 	timer_queue_lock_spin(timer_master_queue);
1314 
1315 	qe_foreach_element_safe(call, &timer_longterm_queue->head, tc_qlink) {
1316 		deadline = call->tc_soft_deadline;
1317 		if (!simple_lock_try(&call->tc_lock, LCK_GRP_NULL)) {
1318 			/* case (2c) lock order inversion, dequeue only */
1319 #ifdef TIMER_ASSERT
1320 			TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
1321 			    DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
1322 			    VM_KERNEL_UNSLIDE_OR_PERM(call),
1323 			    VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue),
1324 			    0,
1325 			    0x2c, 0);
1326 #endif
1327 			timer_call_entry_dequeue_async(call);
1328 			continue;
1329 		}
1330 		if (deadline < threshold) {
1331 			/*
1332 			 * This timer needs moving (escalating)
1333 			 * to the local (boot) processor's queue.
1334 			 */
1335 #ifdef TIMER_ASSERT
1336 			if (deadline < time_start) {
1337 				TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
1338 				    DECR_TIMER_OVERDUE | DBG_FUNC_NONE,
1339 				    VM_KERNEL_UNSLIDE_OR_PERM(call),
1340 				    deadline,
1341 				    time_start,
1342 				    threshold,
1343 				    0);
1344 			}
1345 #endif
1346 			TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
1347 			    DECR_TIMER_ESCALATE | DBG_FUNC_NONE,
1348 			    VM_KERNEL_UNSLIDE_OR_PERM(call),
1349 			    call->tc_pqlink.deadline,
1350 			    call->tc_entry_time,
1351 			    VM_KERNEL_UNSLIDE(call->tc_func),
1352 			    0);
1353 			tlp->escalates++;
1354 			timer_call_entry_dequeue(call);
1355 			timer_call_entry_enqueue_deadline(
1356 				call, timer_master_queue, call->tc_pqlink.deadline);
1357 			/*
1358 			 * A side-effect of the following call is to update
1359 			 * the actual hardware deadline if required.
1360 			 */
1361 			(void) timer_queue_assign(deadline);
1362 		} else {
1363 			if (deadline < tlp->threshold.deadline) {
1364 				tlp->threshold.deadline = deadline;
1365 				tlp->threshold.call = call;
1366 			}
1367 		}
1368 		simple_unlock(&call->tc_lock);
1369 
1370 		/* Abort scan if we're taking too long. */
1371 		if (mach_absolute_time() > time_limit) {
1372 			tlp->threshold.deadline = TIMER_LONGTERM_SCAN_AGAIN;
1373 			tlp->scan_pauses++;
1374 			DBG("timer_longterm_scan() paused %llu, qlen: %llu\n",
1375 			    time_limit, tlp->queue.count);
1376 			break;
1377 		}
1378 	}
1379 
1380 	timer_queue_unlock(timer_master_queue);
1381 }
1382 
1383 void
timer_longterm_callout(timer_call_param_t p0,__unused timer_call_param_t p1)1384 timer_longterm_callout(timer_call_param_t p0, __unused timer_call_param_t p1)
1385 {
1386 	timer_longterm_t        *tlp = (timer_longterm_t *) p0;
1387 
1388 	timer_longterm_update(tlp);
1389 }
1390 
1391 void
timer_longterm_update_locked(timer_longterm_t * tlp)1392 timer_longterm_update_locked(timer_longterm_t *tlp)
1393 {
1394 	uint64_t        latency;
1395 
1396 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
1397 	    DECR_TIMER_UPDATE | DBG_FUNC_START,
1398 	    VM_KERNEL_UNSLIDE_OR_PERM(&tlp->queue),
1399 	    tlp->threshold.deadline,
1400 	    tlp->threshold.preempted,
1401 	    tlp->queue.count, 0);
1402 
1403 	tlp->scan_time = mach_absolute_time();
1404 	if (tlp->threshold.preempted != TIMER_LONGTERM_NONE) {
1405 		tlp->threshold.preempts++;
1406 		tlp->threshold.deadline = tlp->threshold.preempted;
1407 		tlp->threshold.preempted = TIMER_LONGTERM_NONE;
1408 		/*
1409 		 * Note: in the unlikely event that a pre-empted timer has
1410 		 * itself been cancelled, we'll simply re-scan later at the
1411 		 * time of the preempted/cancelled timer.
1412 		 */
1413 	} else {
1414 		tlp->threshold.scans++;
1415 
1416 		/*
1417 		 * Maintain a moving average of our wakeup latency.
1418 		 * Clamp latency to 0 and ignore above threshold interval.
1419 		 */
1420 		if (tlp->scan_time > tlp->threshold.deadline_set) {
1421 			latency = tlp->scan_time - tlp->threshold.deadline_set;
1422 		} else {
1423 			latency = 0;
1424 		}
1425 		if (latency < tlp->threshold.interval) {
1426 			tlp->threshold.latency_min =
1427 			    MIN(tlp->threshold.latency_min, latency);
1428 			tlp->threshold.latency_max =
1429 			    MAX(tlp->threshold.latency_max, latency);
1430 			tlp->threshold.latency =
1431 			    (tlp->threshold.latency * 99 + latency) / 100;
1432 		}
1433 
1434 		timer_longterm_scan(tlp, tlp->scan_time);
1435 	}
1436 
1437 	tlp->threshold.deadline_set = tlp->threshold.deadline;
1438 	/* The next deadline timer to be set is adjusted */
1439 	if (tlp->threshold.deadline != TIMER_LONGTERM_NONE &&
1440 	    tlp->threshold.deadline != TIMER_LONGTERM_SCAN_AGAIN) {
1441 		tlp->threshold.deadline_set -= tlp->threshold.margin;
1442 		tlp->threshold.deadline_set -= tlp->threshold.latency;
1443 	}
1444 
1445 	/* Throttle next scan time */
1446 	uint64_t scan_clamp = mach_absolute_time() + tlp->scan_interval;
1447 	if (tlp->threshold.deadline_set < scan_clamp) {
1448 		tlp->threshold.deadline_set = scan_clamp;
1449 	}
1450 
1451 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
1452 	    DECR_TIMER_UPDATE | DBG_FUNC_END,
1453 	    VM_KERNEL_UNSLIDE_OR_PERM(&tlp->queue),
1454 	    tlp->threshold.deadline,
1455 	    tlp->threshold.scans,
1456 	    tlp->queue.count, 0);
1457 }
1458 
1459 void
timer_longterm_update(timer_longterm_t * tlp)1460 timer_longterm_update(timer_longterm_t *tlp)
1461 {
1462 	spl_t   s = splclock();
1463 
1464 	timer_queue_lock_spin(timer_longterm_queue);
1465 
1466 	if (cpu_number() != master_cpu) {
1467 		panic("timer_longterm_update_master() on non-boot cpu");
1468 	}
1469 
1470 	timer_longterm_update_locked(tlp);
1471 
1472 	if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) {
1473 		timer_call_enter(
1474 			&tlp->threshold.timer,
1475 			tlp->threshold.deadline_set,
1476 			TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL);
1477 	}
1478 
1479 	timer_queue_unlock(timer_longterm_queue);
1480 	splx(s);
1481 }
1482 
1483 void
timer_longterm_init(void)1484 timer_longterm_init(void)
1485 {
1486 	uint32_t                longterm;
1487 	timer_longterm_t        *tlp = &timer_longterm;
1488 
1489 	DBG("timer_longterm_init() tlp: %p, queue: %p\n", tlp, &tlp->queue);
1490 
1491 	/*
1492 	 * Set the longterm timer threshold. Defaults to TIMER_LONGTERM_THRESHOLD
1493 	 * or TIMER_LONGTERM_NONE (disabled) for server;
1494 	 * overridden longterm boot-arg
1495 	 */
1496 	tlp->threshold.interval = serverperfmode ? TIMER_LONGTERM_NONE
1497 	    : TIMER_LONGTERM_THRESHOLD;
1498 	if (PE_parse_boot_argn("longterm", &longterm, sizeof(longterm))) {
1499 		tlp->threshold.interval = (longterm == 0) ?
1500 		    TIMER_LONGTERM_NONE :
1501 		    longterm * NSEC_PER_MSEC;
1502 	}
1503 	if (tlp->threshold.interval != TIMER_LONGTERM_NONE) {
1504 		printf("Longterm timer threshold: %llu ms\n",
1505 		    tlp->threshold.interval / NSEC_PER_MSEC);
1506 		kprintf("Longterm timer threshold: %llu ms\n",
1507 		    tlp->threshold.interval / NSEC_PER_MSEC);
1508 		nanoseconds_to_absolutetime(tlp->threshold.interval,
1509 		    &tlp->threshold.interval);
1510 		tlp->threshold.margin = tlp->threshold.interval / 10;
1511 		tlp->threshold.latency_min = EndOfAllTime;
1512 		tlp->threshold.latency_max = 0;
1513 	}
1514 
1515 	tlp->threshold.preempted = TIMER_LONGTERM_NONE;
1516 	tlp->threshold.deadline = TIMER_LONGTERM_NONE;
1517 
1518 	mpqueue_init(&tlp->queue, &timer_longterm_lck_grp, LCK_ATTR_NULL);
1519 
1520 	timer_call_setup(&tlp->threshold.timer,
1521 	    timer_longterm_callout, (timer_call_param_t) tlp);
1522 
1523 	timer_longterm_queue = &tlp->queue;
1524 }
1525 
1526 enum {
1527 	THRESHOLD, QCOUNT,
1528 	ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
1529 	LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, SCAN_INTERVAL, PAUSES
1530 };
1531 uint64_t
timer_sysctl_get(int oid)1532 timer_sysctl_get(int oid)
1533 {
1534 	timer_longterm_t        *tlp = &timer_longterm;
1535 
1536 	switch (oid) {
1537 	case THRESHOLD:
1538 		return (tlp->threshold.interval == TIMER_LONGTERM_NONE) ?
1539 		       0 : tlp->threshold.interval / NSEC_PER_MSEC;
1540 	case QCOUNT:
1541 		return tlp->queue.count;
1542 	case ENQUEUES:
1543 		return tlp->enqueues;
1544 	case DEQUEUES:
1545 		return tlp->dequeues;
1546 	case ESCALATES:
1547 		return tlp->escalates;
1548 	case SCANS:
1549 		return tlp->threshold.scans;
1550 	case PREEMPTS:
1551 		return tlp->threshold.preempts;
1552 	case LATENCY:
1553 		return tlp->threshold.latency;
1554 	case LATENCY_MIN:
1555 		return tlp->threshold.latency_min;
1556 	case LATENCY_MAX:
1557 		return tlp->threshold.latency_max;
1558 	case SCAN_LIMIT:
1559 		return tlp->scan_limit;
1560 	case SCAN_INTERVAL:
1561 		return tlp->scan_interval;
1562 	case PAUSES:
1563 		return tlp->scan_pauses;
1564 	default:
1565 		return 0;
1566 	}
1567 }
1568 
1569 /*
1570  * timer_master_scan() is the inverse of timer_longterm_scan()
1571  * since it un-escalates timers to the longterm queue.
1572  */
1573 static void
timer_master_scan(timer_longterm_t * tlp,uint64_t now)1574 timer_master_scan(timer_longterm_t      *tlp,
1575     uint64_t              now)
1576 {
1577 	timer_call_t    call;
1578 	uint64_t        threshold;
1579 	uint64_t        deadline;
1580 	mpqueue_head_t  *timer_master_queue;
1581 
1582 	if (tlp->threshold.interval != TIMER_LONGTERM_NONE) {
1583 		threshold = now + tlp->threshold.interval;
1584 	} else {
1585 		threshold = TIMER_LONGTERM_NONE;
1586 	}
1587 
1588 	timer_master_queue = timer_queue_cpu(master_cpu);
1589 	timer_queue_lock_spin(timer_master_queue);
1590 
1591 	qe_foreach_element_safe(call, &timer_master_queue->head, tc_qlink) {
1592 		deadline = call->tc_pqlink.deadline;
1593 		if ((call->tc_flags & TIMER_CALL_LOCAL) != 0) {
1594 			continue;
1595 		}
1596 		if (!simple_lock_try(&call->tc_lock, LCK_GRP_NULL)) {
1597 			/* case (2c) lock order inversion, dequeue only */
1598 			timer_call_entry_dequeue_async(call);
1599 			continue;
1600 		}
1601 		if (deadline > threshold) {
1602 			/* move from master to longterm */
1603 			timer_call_entry_dequeue(call);
1604 			timer_call_entry_enqueue_tail(call, timer_longterm_queue);
1605 			if (deadline < tlp->threshold.deadline) {
1606 				tlp->threshold.deadline = deadline;
1607 				tlp->threshold.call = call;
1608 			}
1609 		}
1610 		simple_unlock(&call->tc_lock);
1611 	}
1612 	timer_queue_unlock(timer_master_queue);
1613 }
1614 
1615 static void
timer_sysctl_set_threshold(uint64_t value)1616 timer_sysctl_set_threshold(uint64_t value)
1617 {
1618 	timer_longterm_t        *tlp = &timer_longterm;
1619 	spl_t                   s = splclock();
1620 	boolean_t               threshold_increase;
1621 
1622 	timer_queue_lock_spin(timer_longterm_queue);
1623 
1624 	timer_call_cancel(&tlp->threshold.timer);
1625 
1626 	/*
1627 	 * Set the new threshold and note whther it's increasing.
1628 	 */
1629 	if (value == 0) {
1630 		tlp->threshold.interval = TIMER_LONGTERM_NONE;
1631 		threshold_increase = TRUE;
1632 		timer_call_cancel(&tlp->threshold.timer);
1633 	} else {
1634 		uint64_t        old_interval = tlp->threshold.interval;
1635 		tlp->threshold.interval = value * NSEC_PER_MSEC;
1636 		nanoseconds_to_absolutetime(tlp->threshold.interval,
1637 		    &tlp->threshold.interval);
1638 		tlp->threshold.margin = tlp->threshold.interval / 10;
1639 		if (old_interval == TIMER_LONGTERM_NONE) {
1640 			threshold_increase = FALSE;
1641 		} else {
1642 			threshold_increase = (tlp->threshold.interval > old_interval);
1643 		}
1644 	}
1645 
1646 	if (threshold_increase /* or removal */) {
1647 		/* Escalate timers from the longterm queue */
1648 		timer_longterm_scan(tlp, mach_absolute_time());
1649 	} else { /* decrease or addition  */
1650 		/*
1651 		 * We scan the local/master queue for timers now longterm.
1652 		 * To be strictly correct, we should scan all processor queues
1653 		 * but timer migration results in most timers gravitating to the
1654 		 * master processor in any case.
1655 		 */
1656 		timer_master_scan(tlp, mach_absolute_time());
1657 	}
1658 
1659 	/* Set new timer accordingly */
1660 	tlp->threshold.deadline_set = tlp->threshold.deadline;
1661 	if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) {
1662 		tlp->threshold.deadline_set -= tlp->threshold.margin;
1663 		tlp->threshold.deadline_set -= tlp->threshold.latency;
1664 		timer_call_enter(
1665 			&tlp->threshold.timer,
1666 			tlp->threshold.deadline_set,
1667 			TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL);
1668 	}
1669 
1670 	/* Reset stats */
1671 	tlp->enqueues = 0;
1672 	tlp->dequeues = 0;
1673 	tlp->escalates = 0;
1674 	tlp->scan_pauses = 0;
1675 	tlp->threshold.scans = 0;
1676 	tlp->threshold.preempts = 0;
1677 	tlp->threshold.latency = 0;
1678 	tlp->threshold.latency_min = EndOfAllTime;
1679 	tlp->threshold.latency_max = 0;
1680 
1681 	timer_queue_unlock(timer_longterm_queue);
1682 	splx(s);
1683 }
1684 
1685 int
timer_sysctl_set(int oid,uint64_t value)1686 timer_sysctl_set(int oid, uint64_t value)
1687 {
1688 	switch (oid) {
1689 	case THRESHOLD:
1690 		timer_call_cpu(
1691 			master_cpu,
1692 			(void (*)(void *))timer_sysctl_set_threshold,
1693 			(void *) value);
1694 		return KERN_SUCCESS;
1695 	case SCAN_LIMIT:
1696 		timer_longterm.scan_limit = value;
1697 		return KERN_SUCCESS;
1698 	case SCAN_INTERVAL:
1699 		timer_longterm.scan_interval = value;
1700 		return KERN_SUCCESS;
1701 	default:
1702 		return KERN_INVALID_ARGUMENT;
1703 	}
1704 }
1705 
1706 
1707 /* Select timer coalescing window based on per-task quality-of-service hints */
1708 static boolean_t
tcoal_qos_adjust(thread_t t,int32_t * tshift,uint64_t * tmax_abstime,boolean_t * pratelimited)1709 tcoal_qos_adjust(thread_t t, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited)
1710 {
1711 	uint32_t latency_qos;
1712 	boolean_t adjusted = FALSE;
1713 	task_t ctask = get_threadtask(t);
1714 
1715 	if (ctask) {
1716 		latency_qos = proc_get_effective_thread_policy(t, TASK_POLICY_LATENCY_QOS);
1717 
1718 		assert(latency_qos <= NUM_LATENCY_QOS_TIERS);
1719 
1720 		if (latency_qos) {
1721 			*tshift = tcoal_prio_params.latency_qos_scale[latency_qos - 1];
1722 			*tmax_abstime = tcoal_prio_params.latency_qos_abstime_max[latency_qos - 1];
1723 			*pratelimited = tcoal_prio_params.latency_tier_rate_limited[latency_qos - 1];
1724 			adjusted = TRUE;
1725 		}
1726 	}
1727 	return adjusted;
1728 }
1729 
1730 
1731 /* Adjust timer deadlines based on priority of the thread and the
1732  * urgency value provided at timeout establishment. With this mechanism,
1733  * timers are no longer necessarily sorted in order of soft deadline
1734  * on a given timer queue, i.e. they may be differentially skewed.
1735  * In the current scheme, this could lead to fewer pending timers
1736  * processed than is technically possible when the HW deadline arrives.
1737  */
1738 static void
timer_compute_leeway(thread_t cthread,int32_t urgency,int32_t * tshift,uint64_t * tmax_abstime,boolean_t * pratelimited)1739 timer_compute_leeway(thread_t cthread, int32_t urgency, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited)
1740 {
1741 	int16_t tpri = cthread->sched_pri;
1742 	if ((urgency & TIMER_CALL_USER_MASK) != 0) {
1743 		if (tpri >= BASEPRI_RTQUEUES ||
1744 		    urgency == TIMER_CALL_USER_CRITICAL) {
1745 			*tshift = tcoal_prio_params.timer_coalesce_rt_shift;
1746 			*tmax_abstime = tcoal_prio_params.timer_coalesce_rt_abstime_max;
1747 			TCOAL_PRIO_STAT(rt_tcl);
1748 		} else if (proc_get_effective_thread_policy(cthread, TASK_POLICY_DARWIN_BG) ||
1749 		    (urgency == TIMER_CALL_USER_BACKGROUND)) {
1750 			/* Determine if timer should be subjected to a lower QoS */
1751 			if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) {
1752 				if (*tmax_abstime > tcoal_prio_params.timer_coalesce_bg_abstime_max) {
1753 					return;
1754 				} else {
1755 					*pratelimited = FALSE;
1756 				}
1757 			}
1758 			*tshift = tcoal_prio_params.timer_coalesce_bg_shift;
1759 			*tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max;
1760 			TCOAL_PRIO_STAT(bg_tcl);
1761 		} else if (tpri >= MINPRI_KERNEL) {
1762 			*tshift = tcoal_prio_params.timer_coalesce_kt_shift;
1763 			*tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max;
1764 			TCOAL_PRIO_STAT(kt_tcl);
1765 		} else if (cthread->sched_mode == TH_MODE_FIXED) {
1766 			*tshift = tcoal_prio_params.timer_coalesce_fp_shift;
1767 			*tmax_abstime = tcoal_prio_params.timer_coalesce_fp_abstime_max;
1768 			TCOAL_PRIO_STAT(fp_tcl);
1769 		} else if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) {
1770 			TCOAL_PRIO_STAT(qos_tcl);
1771 		} else if (cthread->sched_mode == TH_MODE_TIMESHARE) {
1772 			*tshift = tcoal_prio_params.timer_coalesce_ts_shift;
1773 			*tmax_abstime = tcoal_prio_params.timer_coalesce_ts_abstime_max;
1774 			TCOAL_PRIO_STAT(ts_tcl);
1775 		} else {
1776 			TCOAL_PRIO_STAT(nc_tcl);
1777 		}
1778 	} else if (urgency == TIMER_CALL_SYS_BACKGROUND) {
1779 		*tshift = tcoal_prio_params.timer_coalesce_bg_shift;
1780 		*tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max;
1781 		TCOAL_PRIO_STAT(bg_tcl);
1782 	} else {
1783 		*tshift = tcoal_prio_params.timer_coalesce_kt_shift;
1784 		*tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max;
1785 		TCOAL_PRIO_STAT(kt_tcl);
1786 	}
1787 }
1788 
1789 
1790 int timer_user_idle_level;
1791 
1792 uint64_t
timer_call_slop(uint64_t deadline,uint64_t now,uint32_t flags,thread_t cthread,boolean_t * pratelimited)1793 timer_call_slop(uint64_t deadline, uint64_t now, uint32_t flags, thread_t cthread, boolean_t *pratelimited)
1794 {
1795 	int32_t tcs_shift = 0;
1796 	uint64_t tcs_max_abstime = 0;
1797 	uint64_t adjval;
1798 	uint32_t urgency = (flags & TIMER_CALL_URGENCY_MASK);
1799 
1800 	if (mach_timer_coalescing_enabled &&
1801 	    (deadline > now) && (urgency != TIMER_CALL_SYS_CRITICAL)) {
1802 		timer_compute_leeway(cthread, urgency, &tcs_shift, &tcs_max_abstime, pratelimited);
1803 
1804 		if (tcs_shift >= 0) {
1805 			adjval =  MIN((deadline - now) >> tcs_shift, tcs_max_abstime);
1806 		} else {
1807 			adjval =  MIN((deadline - now) << (-tcs_shift), tcs_max_abstime);
1808 		}
1809 		/* Apply adjustments derived from "user idle level" heuristic */
1810 		adjval += (adjval * timer_user_idle_level) >> 7;
1811 		return adjval;
1812 	} else {
1813 		return 0;
1814 	}
1815 }
1816 
1817 int
timer_get_user_idle_level(void)1818 timer_get_user_idle_level(void)
1819 {
1820 	return timer_user_idle_level;
1821 }
1822 
1823 kern_return_t
timer_set_user_idle_level(int ilevel)1824 timer_set_user_idle_level(int ilevel)
1825 {
1826 	boolean_t do_reeval = FALSE;
1827 
1828 	if ((ilevel < 0) || (ilevel > 128)) {
1829 		return KERN_INVALID_ARGUMENT;
1830 	}
1831 
1832 	if (ilevel < timer_user_idle_level) {
1833 		do_reeval = TRUE;
1834 	}
1835 
1836 	timer_user_idle_level = ilevel;
1837 
1838 	if (do_reeval) {
1839 		ml_timer_evaluate();
1840 	}
1841 
1842 	return KERN_SUCCESS;
1843 }
1844 
1845 #pragma mark - running timers
1846 
1847 #define RUNNING_TIMER_FAKE_FLAGS (TIMER_CALL_SYS_CRITICAL | \
1848     TIMER_CALL_LOCAL)
1849 
1850 /*
1851  * timer_call_trace_* functions mimic the tracing behavior from the normal
1852  * timer_call subsystem, so tools continue to function.
1853  */
1854 
1855 static void
timer_call_trace_enter_before(struct timer_call * call,uint64_t deadline,uint32_t flags,uint64_t now)1856 timer_call_trace_enter_before(struct timer_call *call, uint64_t deadline,
1857     uint32_t flags, uint64_t now)
1858 {
1859 #pragma unused(call, deadline, flags, now)
1860 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_START,
1861 	    VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_ADDRHIDE(call->tc_param1),
1862 	    deadline, flags, 0);
1863 #if CONFIG_DTRACE
1864 	uint64_t ttd = deadline - now;
1865 	DTRACE_TMR7(callout__create, timer_call_func_t, call->tc_func,
1866 	    timer_call_param_t, call->tc_param0, uint32_t, flags, 0,
1867 	    (ttd >> 32), (unsigned int)(ttd & 0xFFFFFFFF), NULL);
1868 #endif /* CONFIG_DTRACE */
1869 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_END,
1870 	    VM_KERNEL_UNSLIDE_OR_PERM(call), 0, deadline, 0, 0);
1871 }
1872 
1873 static void
timer_call_trace_enter_after(struct timer_call * call,uint64_t deadline)1874 timer_call_trace_enter_after(struct timer_call *call, uint64_t deadline)
1875 {
1876 #pragma unused(call, deadline)
1877 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_END,
1878 	    VM_KERNEL_UNSLIDE_OR_PERM(call), 0, deadline, 0, 0);
1879 }
1880 
1881 static void
timer_call_trace_cancel(struct timer_call * call)1882 timer_call_trace_cancel(struct timer_call *call)
1883 {
1884 #pragma unused(call)
1885 	__unused uint64_t deadline = call->tc_pqlink.deadline;
1886 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CANCEL | DBG_FUNC_START,
1887 	    VM_KERNEL_UNSLIDE_OR_PERM(call), deadline, 0,
1888 	    call->tc_flags, 0);
1889 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CANCEL | DBG_FUNC_END,
1890 	    VM_KERNEL_UNSLIDE_OR_PERM(call), 0, deadline - mach_absolute_time(),
1891 	    deadline - call->tc_entry_time, 0);
1892 #if CONFIG_DTRACE
1893 #if TIMER_TRACE
1894 	uint64_t ttd = deadline - call->tc_entry_time;
1895 #else
1896 	uint64_t ttd = UINT64_MAX;
1897 #endif /* TIMER_TRACE */
1898 	DTRACE_TMR6(callout__cancel, timer_call_func_t, call->tc_func,
1899 	    timer_call_param_t, call->tc_param0, uint32_t, call->tc_flags, 0,
1900 	    (ttd >> 32), (unsigned int)(ttd & 0xFFFFFFFF));
1901 #endif /* CONFIG_DTRACE */
1902 }
1903 
1904 static void
timer_call_trace_expire_entry(struct timer_call * call)1905 timer_call_trace_expire_entry(struct timer_call *call)
1906 {
1907 #pragma unused(call)
1908 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CALLOUT | DBG_FUNC_START,
1909 	    VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(call->tc_func),
1910 	    VM_KERNEL_ADDRHIDE(call->tc_param0),
1911 	    VM_KERNEL_ADDRHIDE(call->tc_param1),
1912 	    0);
1913 #if CONFIG_DTRACE
1914 #if TIMER_TRACE
1915 	uint64_t ttd = call->tc_pqlink.deadline - call->tc_entry_time;
1916 #else /* TIMER_TRACE */
1917 	uint64_t ttd = UINT64_MAX;
1918 #endif /* TIMER_TRACE */
1919 	DTRACE_TMR7(callout__start, timer_call_func_t, call->tc_func,
1920 	    timer_call_param_t, call->tc_param0, unsigned, call->tc_flags,
1921 	    0, (ttd >> 32), (unsigned int)(ttd & 0xFFFFFFFF), NULL);
1922 #endif /* CONFIG_DTRACE */
1923 }
1924 
1925 static void
timer_call_trace_expire_return(struct timer_call * call)1926 timer_call_trace_expire_return(struct timer_call *call)
1927 {
1928 #pragma unused(call)
1929 #if CONFIG_DTRACE
1930 	DTRACE_TMR4(callout__end, timer_call_func_t, call->tc_func,
1931 	    call->tc_param0, call->tc_param1, NULL);
1932 #endif /* CONFIG_DTRACE */
1933 	TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CALLOUT | DBG_FUNC_END,
1934 	    VM_KERNEL_UNSLIDE_OR_PERM(call),
1935 	    VM_KERNEL_UNSLIDE(call->tc_func),
1936 	    VM_KERNEL_ADDRHIDE(call->tc_param0),
1937 	    VM_KERNEL_ADDRHIDE(call->tc_param1),
1938 	    0);
1939 }
1940 
1941 /*
1942  * Set a new deadline for a running timer on this processor.
1943  */
1944 void
running_timer_setup(processor_t processor,enum running_timer timer,void * param,uint64_t deadline,uint64_t now)1945 running_timer_setup(processor_t processor, enum running_timer timer,
1946     void *param, uint64_t deadline, uint64_t now)
1947 {
1948 	assert(timer < RUNNING_TIMER_MAX);
1949 	assert(ml_get_interrupts_enabled() == FALSE);
1950 
1951 	struct timer_call *call = &processor->running_timers[timer];
1952 
1953 	timer_call_trace_enter_before(call, deadline, RUNNING_TIMER_FAKE_FLAGS,
1954 	    now);
1955 
1956 	if (__improbable(deadline < now)) {
1957 		deadline = timer_call_past_deadline_timer_handle(deadline, now);
1958 	}
1959 
1960 	call->tc_pqlink.deadline = deadline;
1961 #if TIMER_TRACE
1962 	call->tc_entry_time = now;
1963 #endif /* TIMER_TRACE */
1964 	call->tc_param1 = param;
1965 
1966 	timer_call_trace_enter_after(call, deadline);
1967 }
1968 
1969 void
running_timers_sync(void)1970 running_timers_sync(void)
1971 {
1972 	timer_resync_deadlines();
1973 }
1974 
1975 void
running_timer_enter(processor_t processor,unsigned int timer,void * param,uint64_t deadline,uint64_t now)1976 running_timer_enter(processor_t processor, unsigned int timer,
1977     void *param, uint64_t deadline, uint64_t now)
1978 {
1979 	running_timer_setup(processor, timer, param, deadline, now);
1980 	running_timers_sync();
1981 }
1982 
1983 /*
1984  * Call the callback for any running timers that fired for this processor.
1985  * Returns true if any timers were past their deadline.
1986  */
1987 bool
running_timers_expire(processor_t processor,uint64_t now)1988 running_timers_expire(processor_t processor, uint64_t now)
1989 {
1990 	bool expired = false;
1991 
1992 	if (!processor->running_timers_active) {
1993 		return expired;
1994 	}
1995 
1996 	for (int i = 0; i < RUNNING_TIMER_MAX; i++) {
1997 		struct timer_call *call = &processor->running_timers[i];
1998 
1999 		uint64_t deadline = call->tc_pqlink.deadline;
2000 		if (deadline > now) {
2001 			continue;
2002 		}
2003 
2004 		expired = true;
2005 		timer_call_trace_expire_entry(call);
2006 		call->tc_func(call->tc_param0, call->tc_param1);
2007 		timer_call_trace_expire_return(call);
2008 	}
2009 
2010 	return expired;
2011 }
2012 
2013 void
running_timer_clear(processor_t processor,enum running_timer timer)2014 running_timer_clear(processor_t processor, enum running_timer timer)
2015 {
2016 	struct timer_call *call = &processor->running_timers[timer];
2017 	uint64_t deadline = call->tc_pqlink.deadline;
2018 	if (deadline == EndOfAllTime) {
2019 		return;
2020 	}
2021 
2022 	call->tc_pqlink.deadline = EndOfAllTime;
2023 #if TIMER_TRACE
2024 	call->tc_entry_time = 0;
2025 #endif /* TIMER_TRACE */
2026 	timer_call_trace_cancel(call);
2027 }
2028 
2029 void
running_timer_cancel(processor_t processor,unsigned int timer)2030 running_timer_cancel(processor_t processor, unsigned int timer)
2031 {
2032 	running_timer_clear(processor, timer);
2033 	running_timers_sync();
2034 }
2035 
2036 uint64_t
running_timers_deadline(processor_t processor)2037 running_timers_deadline(processor_t processor)
2038 {
2039 	if (!processor->running_timers_active) {
2040 		return EndOfAllTime;
2041 	}
2042 
2043 	uint64_t deadline = EndOfAllTime;
2044 	for (int i = 0; i < RUNNING_TIMER_MAX; i++) {
2045 		uint64_t candidate =
2046 		    processor->running_timers[i].tc_pqlink.deadline;
2047 		if (candidate != 0 && candidate < deadline) {
2048 			deadline = candidate;
2049 		}
2050 	}
2051 
2052 	return deadline;
2053 }
2054 
2055 void
running_timers_activate(processor_t processor)2056 running_timers_activate(processor_t processor)
2057 {
2058 	processor->running_timers_active = true;
2059 	running_timers_sync();
2060 }
2061 
2062 void
running_timers_deactivate(processor_t processor)2063 running_timers_deactivate(processor_t processor)
2064 {
2065 	assert(processor->running_timers_active == true);
2066 	processor->running_timers_active = false;
2067 	running_timers_sync();
2068 }
2069