xref: /xnu-8792.61.2/osfmk/kern/clock.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  */
33 /*-
34  * Copyright (c) 1982, 1986, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 4. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)time.h	8.5 (Berkeley) 5/4/95
62  * $FreeBSD$
63  */
64 
65 #include <mach/mach_types.h>
66 
67 #include <kern/spl.h>
68 #include <kern/sched_prim.h>
69 #include <kern/thread.h>
70 #include <kern/clock.h>
71 #include <kern/host_notify.h>
72 #include <kern/thread_call.h>
73 #include <libkern/OSAtomic.h>
74 
75 #include <IOKit/IOPlatformExpert.h>
76 
77 #include <machine/commpage.h>
78 #include <machine/config.h>
79 #include <machine/machine_routines.h>
80 
81 #include <mach/mach_traps.h>
82 #include <mach/mach_time.h>
83 
84 #include <sys/kdebug.h>
85 #include <sys/timex.h>
86 #include <kern/arithmetic_128.h>
87 #include <os/log.h>
88 
89 #if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
90 // On ARM64, the hwclock keeps ticking across a normal S2R so we use it to reset the
91 // system clock after a normal wake. However, on hibernation we cut power to the hwclock,
92 // so we have to add an offset to the hwclock to compute continuous_time after hibernate resume.
93 uint64_t hwclock_conttime_offset = 0;
94 #endif /* HIBERNATION && HAS_CONTINUOUS_HWCLOCK */
95 
96 #if HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK
97 #define ENABLE_LEGACY_CLOCK_CODE 1
98 #endif /* HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK */
99 
100 #if HIBERNATION_USES_LEGACY_CLOCK
101 #include <IOKit/IOHibernatePrivate.h>
102 #endif /* HIBERNATION_USES_LEGACY_CLOCK */
103 
104 uint32_t        hz_tick_interval = 1;
105 #if ENABLE_LEGACY_CLOCK_CODE
106 static uint64_t has_monotonic_clock = 0;
107 #endif /* ENABLE_LEGACY_CLOCK_CODE */
108 
109 lck_ticket_t clock_lock;
110 
111 static LCK_GRP_DECLARE(settime_lock_grp, "settime");
112 static LCK_MTX_DECLARE(settime_lock, &settime_lock_grp);
113 
114 #define clock_lock()    \
115 	lck_ticket_lock(&clock_lock, LCK_GRP_NULL)
116 
117 #define clock_unlock()  \
118 	lck_ticket_unlock(&clock_lock)
119 
120 boolean_t
kdp_clock_is_locked()121 kdp_clock_is_locked()
122 {
123 	return kdp_lck_ticket_is_acquired(&clock_lock);
124 }
125 
126 struct bintime {
127 	time_t  sec;
128 	uint64_t frac;
129 };
130 
131 static __inline void
bintime_addx(struct bintime * _bt,uint64_t _x)132 bintime_addx(struct bintime *_bt, uint64_t _x)
133 {
134 	uint64_t _u;
135 
136 	_u = _bt->frac;
137 	_bt->frac += _x;
138 	if (_u > _bt->frac) {
139 		_bt->sec++;
140 	}
141 }
142 
143 static __inline void
bintime_subx(struct bintime * _bt,uint64_t _x)144 bintime_subx(struct bintime *_bt, uint64_t _x)
145 {
146 	uint64_t _u;
147 
148 	_u = _bt->frac;
149 	_bt->frac -= _x;
150 	if (_u < _bt->frac) {
151 		_bt->sec--;
152 	}
153 }
154 
155 static __inline void
bintime_addns(struct bintime * bt,uint64_t ns)156 bintime_addns(struct bintime *bt, uint64_t ns)
157 {
158 	bt->sec += ns / (uint64_t)NSEC_PER_SEC;
159 	ns = ns % (uint64_t)NSEC_PER_SEC;
160 	if (ns) {
161 		/* 18446744073 = int(2^64 / NSEC_PER_SEC) */
162 		ns = ns * (uint64_t)18446744073LL;
163 		bintime_addx(bt, ns);
164 	}
165 }
166 
167 static __inline void
bintime_subns(struct bintime * bt,uint64_t ns)168 bintime_subns(struct bintime *bt, uint64_t ns)
169 {
170 	bt->sec -= ns / (uint64_t)NSEC_PER_SEC;
171 	ns = ns % (uint64_t)NSEC_PER_SEC;
172 	if (ns) {
173 		/* 18446744073 = int(2^64 / NSEC_PER_SEC) */
174 		ns = ns * (uint64_t)18446744073LL;
175 		bintime_subx(bt, ns);
176 	}
177 }
178 
179 static __inline void
bintime_addxns(struct bintime * bt,uint64_t a,int64_t xns)180 bintime_addxns(struct bintime *bt, uint64_t a, int64_t xns)
181 {
182 	uint64_t uxns = (xns > 0)?(uint64_t)xns:(uint64_t)-xns;
183 	uint64_t ns = multi_overflow(a, uxns);
184 	if (xns > 0) {
185 		if (ns) {
186 			bintime_addns(bt, ns);
187 		}
188 		ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
189 		bintime_addx(bt, ns);
190 	} else {
191 		if (ns) {
192 			bintime_subns(bt, ns);
193 		}
194 		ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
195 		bintime_subx(bt, ns);
196 	}
197 }
198 
199 
200 static __inline void
bintime_add(struct bintime * _bt,const struct bintime * _bt2)201 bintime_add(struct bintime *_bt, const struct bintime *_bt2)
202 {
203 	uint64_t _u;
204 
205 	_u = _bt->frac;
206 	_bt->frac += _bt2->frac;
207 	if (_u > _bt->frac) {
208 		_bt->sec++;
209 	}
210 	_bt->sec += _bt2->sec;
211 }
212 
213 static __inline void
bintime_sub(struct bintime * _bt,const struct bintime * _bt2)214 bintime_sub(struct bintime *_bt, const struct bintime *_bt2)
215 {
216 	uint64_t _u;
217 
218 	_u = _bt->frac;
219 	_bt->frac -= _bt2->frac;
220 	if (_u < _bt->frac) {
221 		_bt->sec--;
222 	}
223 	_bt->sec -= _bt2->sec;
224 }
225 
226 static __inline void
clock2bintime(const clock_sec_t * secs,const clock_usec_t * microsecs,struct bintime * _bt)227 clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bintime *_bt)
228 {
229 	_bt->sec = *secs;
230 	/* 18446744073709 = int(2^64 / 1000000) */
231 	_bt->frac = *microsecs * (uint64_t)18446744073709LL;
232 }
233 
234 static __inline void
bintime2usclock(const struct bintime * _bt,clock_sec_t * secs,clock_usec_t * microsecs)235 bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *microsecs)
236 {
237 	*secs = _bt->sec;
238 	*microsecs = ((uint64_t)USEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
239 }
240 
241 static __inline void
bintime2nsclock(const struct bintime * _bt,clock_sec_t * secs,clock_usec_t * nanosecs)242 bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nanosecs)
243 {
244 	*secs = _bt->sec;
245 	*nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
246 }
247 
248 #if ENABLE_LEGACY_CLOCK_CODE
249 static __inline void
bintime2absolutetime(const struct bintime * _bt,uint64_t * abs)250 bintime2absolutetime(const struct bintime *_bt, uint64_t *abs)
251 {
252 	uint64_t nsec;
253 	nsec = (uint64_t) _bt->sec * (uint64_t)NSEC_PER_SEC + (((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32);
254 	nanoseconds_to_absolutetime(nsec, abs);
255 }
256 
257 struct latched_time {
258 	uint64_t monotonic_time_usec;
259 	uint64_t mach_time;
260 };
261 
262 extern int
263 kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
264 
265 #endif /* ENABLE_LEGACY_CLOCK_CODE */
266 /*
267  *	Time of day (calendar) variables.
268  *
269  *	Algorithm:
270  *
271  *	TOD <- bintime + delta*scale
272  *
273  *	where :
274  *      bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
275  *	delta is ticks elapsed since last scale update.
276  *	scale is computed according to an adjustment provided by ntp_kern.
277  */
278 static struct clock_calend {
279 	uint64_t                s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */
280 	int64_t                 s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
281 	uint64_t                tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
282 	uint64_t                offset_count; /* abs time from which apply current scales */
283 	struct bintime          offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
284 	struct bintime          bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
285 	struct bintime          boottime; /* boot time expressed in (sec, 64 bits frac of a second) */
286 #if ENABLE_LEGACY_CLOCK_CODE
287 	struct bintime          basesleep;
288 #endif /* ENABLE_LEGACY_CLOCK_CODE */
289 } clock_calend;
290 
291 static uint64_t ticks_per_sec; /* ticks in a second (expressed in abs time) */
292 
293 #if DEVELOPMENT || DEBUG
294 extern int g_should_log_clock_adjustments;
295 
296 static void print_all_clock_variables(const char*, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* calend_cp);
297 static void print_all_clock_variables_internal(const char *, struct clock_calend* calend_cp);
298 #else
299 #define print_all_clock_variables(...) do { } while (0)
300 #define print_all_clock_variables_internal(...) do { } while (0)
301 #endif
302 
303 #if     CONFIG_DTRACE
304 
305 
306 /*
307  *	Unlocked calendar flipflop; this is used to track a clock_calend such
308  *	that we can safely access a snapshot of a valid  clock_calend structure
309  *	without needing to take any locks to do it.
310  *
311  *	The trick is to use a generation count and set the low bit when it is
312  *	being updated/read; by doing this, we guarantee, through use of the
313  *	os_atomic functions, that the generation is incremented when the bit
314  *	is cleared atomically (by using a 1 bit add).
315  */
316 static struct unlocked_clock_calend {
317 	struct clock_calend     calend;         /* copy of calendar */
318 	uint32_t                gen;            /* generation count */
319 } flipflop[2];
320 
321 static void clock_track_calend_nowait(void);
322 
323 #endif
324 
325 void _clock_delay_until_deadline(uint64_t interval, uint64_t deadline);
326 void _clock_delay_until_deadline_with_leeway(uint64_t interval, uint64_t deadline, uint64_t leeway);
327 
328 /* Boottime variables*/
329 static uint64_t clock_boottime;
330 static uint32_t clock_boottime_usec;
331 
332 #define TIME_ADD(rsecs, secs, rfrac, frac, unit)        \
333 MACRO_BEGIN                                                                                     \
334 	if (((rfrac) += (frac)) >= (unit)) {                    \
335 	        (rfrac) -= (unit);                                                      \
336 	        (rsecs) += 1;                                                           \
337 	}                                                                                               \
338 	(rsecs) += (secs);                                                              \
339 MACRO_END
340 
341 #define TIME_SUB(rsecs, secs, rfrac, frac, unit)        \
342 MACRO_BEGIN                                                                                     \
343 	if ((int)((rfrac) -= (frac)) < 0) {                             \
344 	        (rfrac) += (unit);                                                      \
345 	        (rsecs) -= 1;                                                           \
346 	}                                                                                               \
347 	(rsecs) -= (secs);                                                              \
348 MACRO_END
349 
350 /*
351  *	clock_config:
352  *
353  *	Called once at boot to configure the clock subsystem.
354  */
355 void
clock_config(void)356 clock_config(void)
357 {
358 	lck_ticket_init(&clock_lock, 0);
359 
360 	clock_oldconfig();
361 
362 	ntp_init();
363 
364 	nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
365 }
366 
367 /*
368  *	clock_init:
369  *
370  *	Called on a processor each time started.
371  */
372 void
clock_init(void)373 clock_init(void)
374 {
375 	clock_oldinit();
376 }
377 
378 /*
379  *	clock_timebase_init:
380  *
381  *	Called by machine dependent code
382  *	to initialize areas dependent on the
383  *	timebase value.  May be called multiple
384  *	times during start up.
385  */
386 void
clock_timebase_init(void)387 clock_timebase_init(void)
388 {
389 	uint64_t        abstime;
390 
391 	/*
392 	 * BSD expects a tick to represent 10ms.
393 	 */
394 	nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
395 	hz_tick_interval = (uint32_t)abstime;
396 
397 	sched_timebase_init();
398 }
399 
400 /*
401  *	mach_timebase_info_trap:
402  *
403  *	User trap returns timebase constant.
404  */
405 kern_return_t
mach_timebase_info_trap(struct mach_timebase_info_trap_args * args)406 mach_timebase_info_trap(
407 	struct mach_timebase_info_trap_args *args)
408 {
409 	mach_vm_address_t                       out_info_addr = args->info;
410 	mach_timebase_info_data_t       info = {};
411 
412 	clock_timebase_info(&info);
413 
414 	copyout((void *)&info, out_info_addr, sizeof(info));
415 
416 	return KERN_SUCCESS;
417 }
418 
419 /*
420  *	Calendar routines.
421  */
422 
423 /*
424  *	clock_get_calendar_microtime:
425  *
426  *	Returns the current calendar value,
427  *	microseconds as the fraction.
428  */
429 void
clock_get_calendar_microtime(clock_sec_t * secs,clock_usec_t * microsecs)430 clock_get_calendar_microtime(
431 	clock_sec_t             *secs,
432 	clock_usec_t            *microsecs)
433 {
434 	clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
435 }
436 
437 /*
438  * get_scale_factors_from_adj:
439  *
440  * computes scale factors from the value given in adjustment.
441  *
442  * Part of the code has been taken from tc_windup of FreeBSD
443  * written by Poul-Henning Kamp <[email protected]>, Julien Ridoux and
444  * Konstantin Belousov.
445  * https://github.com/freebsd/freebsd/blob/master/sys/kern/kern_tc.c
446  */
447 static void
get_scale_factors_from_adj(int64_t adjustment,uint64_t * tick_scale_x,uint64_t * s_scale_ns,int64_t * s_adj_nsx)448 get_scale_factors_from_adj(int64_t adjustment, uint64_t* tick_scale_x, uint64_t* s_scale_ns, int64_t* s_adj_nsx)
449 {
450 	uint64_t scale;
451 	int64_t nano, frac;
452 
453 	/*-
454 	 * Calculating the scaling factor.  We want the number of 1/2^64
455 	 * fractions of a second per period of the hardware counter, taking
456 	 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
457 	 * processing provides us with.
458 	 *
459 	 * The th_adjustment is nanoseconds per second with 32 bit binary
460 	 * fraction and we want 64 bit binary fraction of second:
461 	 *
462 	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
463 	 *
464 	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
465 	 * we can only multiply by about 850 without overflowing, that
466 	 * leaves no suitably precise fractions for multiply before divide.
467 	 *
468 	 * Divide before multiply with a fraction of 2199/512 results in a
469 	 * systematic undercompensation of 10PPM of th_adjustment.  On a
470 	 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
471 	 *
472 	 * We happily sacrifice the lowest of the 64 bits of our result
473 	 * to the goddess of code clarity.
474 	 *
475 	 */
476 	scale = (uint64_t)1 << 63;
477 	scale += (adjustment / 1024) * 2199;
478 	scale /= ticks_per_sec;
479 	*tick_scale_x = scale * 2;
480 
481 	/*
482 	 * hi part of adj
483 	 * it contains ns (without fraction) to add to the next sec.
484 	 * Get ns scale factor for the next sec.
485 	 */
486 	nano = (adjustment > 0)? adjustment >> 32 : -((-adjustment) >> 32);
487 	scale = (uint64_t) NSEC_PER_SEC;
488 	scale += nano;
489 	*s_scale_ns = scale;
490 
491 	/*
492 	 * lo part of adj
493 	 * it contains 32 bit frac of ns to add to the next sec.
494 	 * Keep it as additional adjustment for the next sec.
495 	 */
496 	frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment));
497 	*s_adj_nsx = (frac > 0)? ((uint64_t) frac) << 32 : -(((uint64_t) (-frac)) << 32);
498 
499 	return;
500 }
501 
502 /*
503  * scale_delta:
504  *
505  * returns a bintime struct representing delta scaled accordingly to the
506  * scale factors provided to this function.
507  */
508 static struct bintime
scale_delta(uint64_t delta,uint64_t tick_scale_x,uint64_t s_scale_ns,int64_t s_adj_nsx)509 scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t s_adj_nsx)
510 {
511 	uint64_t sec, new_ns, over;
512 	struct bintime bt;
513 
514 	bt.sec = 0;
515 	bt.frac = 0;
516 
517 	/*
518 	 * If more than one second is elapsed,
519 	 * scale fully elapsed seconds using scale factors for seconds.
520 	 * s_scale_ns -> scales sec to ns.
521 	 * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
522 	 */
523 	if (delta > ticks_per_sec) {
524 		sec = (delta / ticks_per_sec);
525 		new_ns = sec * s_scale_ns;
526 		bintime_addns(&bt, new_ns);
527 		if (s_adj_nsx) {
528 			if (sec == 1) {
529 				/* shortcut, no overflow can occur */
530 				if (s_adj_nsx > 0) {
531 					bintime_addx(&bt, (uint64_t)s_adj_nsx / (uint64_t)NSEC_PER_SEC);
532 				} else {
533 					bintime_subx(&bt, (uint64_t)-s_adj_nsx / (uint64_t)NSEC_PER_SEC);
534 				}
535 			} else {
536 				/*
537 				 * s_adj_nsx is 64 bit frac of ns.
538 				 * sec*s_adj_nsx might overflow in int64_t.
539 				 * use bintime_addxns to not lose overflowed ns.
540 				 */
541 				bintime_addxns(&bt, sec, s_adj_nsx);
542 			}
543 		}
544 		delta = (delta % ticks_per_sec);
545 	}
546 
547 	over = multi_overflow(tick_scale_x, delta);
548 	if (over) {
549 		bt.sec += over;
550 	}
551 
552 	/*
553 	 * scale elapsed ticks using the scale factor for ticks.
554 	 */
555 	bintime_addx(&bt, delta * tick_scale_x);
556 
557 	return bt;
558 }
559 
560 /*
561  * get_scaled_time:
562  *
563  * returns the scaled time of the time elapsed from the last time
564  * scale factors were updated to now.
565  */
566 static struct bintime
get_scaled_time(uint64_t now)567 get_scaled_time(uint64_t now)
568 {
569 	uint64_t delta;
570 
571 	/*
572 	 * Compute ticks elapsed since last scale update.
573 	 * This time will be scaled according to the value given by ntp kern.
574 	 */
575 	delta = now - clock_calend.offset_count;
576 
577 	return scale_delta(delta, clock_calend.tick_scale_x, clock_calend.s_scale_ns, clock_calend.s_adj_nsx);
578 }
579 
580 static void
clock_get_calendar_absolute_and_microtime_locked(clock_sec_t * secs,clock_usec_t * microsecs,uint64_t * abstime)581 clock_get_calendar_absolute_and_microtime_locked(
582 	clock_sec_t             *secs,
583 	clock_usec_t            *microsecs,
584 	uint64_t                *abstime)
585 {
586 	uint64_t now;
587 	struct bintime bt;
588 
589 	now  = mach_absolute_time();
590 	if (abstime) {
591 		*abstime = now;
592 	}
593 
594 	bt = get_scaled_time(now);
595 	bintime_add(&bt, &clock_calend.bintime);
596 	bintime2usclock(&bt, secs, microsecs);
597 }
598 
599 static void
clock_get_calendar_absolute_and_nanotime_locked(clock_sec_t * secs,clock_usec_t * nanosecs,uint64_t * abstime)600 clock_get_calendar_absolute_and_nanotime_locked(
601 	clock_sec_t             *secs,
602 	clock_usec_t            *nanosecs,
603 	uint64_t                *abstime)
604 {
605 	uint64_t now;
606 	struct bintime bt;
607 
608 	now  = mach_absolute_time();
609 	if (abstime) {
610 		*abstime = now;
611 	}
612 
613 	bt = get_scaled_time(now);
614 	bintime_add(&bt, &clock_calend.bintime);
615 	bintime2nsclock(&bt, secs, nanosecs);
616 }
617 
618 /*
619  *	clock_get_calendar_absolute_and_microtime:
620  *
621  *	Returns the current calendar value,
622  *	microseconds as the fraction. Also
623  *	returns mach_absolute_time if abstime
624  *	is not NULL.
625  */
626 void
clock_get_calendar_absolute_and_microtime(clock_sec_t * secs,clock_usec_t * microsecs,uint64_t * abstime)627 clock_get_calendar_absolute_and_microtime(
628 	clock_sec_t             *secs,
629 	clock_usec_t            *microsecs,
630 	uint64_t                *abstime)
631 {
632 	spl_t                   s;
633 
634 	s = splclock();
635 	clock_lock();
636 
637 	clock_get_calendar_absolute_and_microtime_locked(secs, microsecs, abstime);
638 
639 	clock_unlock();
640 	splx(s);
641 }
642 
643 /*
644  *	clock_get_calendar_nanotime:
645  *
646  *	Returns the current calendar value,
647  *	nanoseconds as the fraction.
648  *
649  *	Since we do not have an interface to
650  *	set the calendar with resolution greater
651  *	than a microsecond, we honor that here.
652  */
653 void
clock_get_calendar_nanotime(clock_sec_t * secs,clock_nsec_t * nanosecs)654 clock_get_calendar_nanotime(
655 	clock_sec_t             *secs,
656 	clock_nsec_t            *nanosecs)
657 {
658 	spl_t                   s;
659 
660 	s = splclock();
661 	clock_lock();
662 
663 	clock_get_calendar_absolute_and_nanotime_locked(secs, nanosecs, NULL);
664 
665 	clock_unlock();
666 	splx(s);
667 }
668 
669 /*
670  *	clock_gettimeofday:
671  *
672  *	Kernel interface for commpage implementation of
673  *	gettimeofday() syscall.
674  *
675  *	Returns the current calendar value, and updates the
676  *	commpage info as appropriate.  Because most calls to
677  *	gettimeofday() are handled in user mode by the commpage,
678  *	this routine should be used infrequently.
679  */
680 void
clock_gettimeofday(clock_sec_t * secs,clock_usec_t * microsecs)681 clock_gettimeofday(
682 	clock_sec_t     *secs,
683 	clock_usec_t    *microsecs)
684 {
685 	clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
686 }
687 
688 void
clock_gettimeofday_and_absolute_time(clock_sec_t * secs,clock_usec_t * microsecs,uint64_t * mach_time)689 clock_gettimeofday_and_absolute_time(
690 	clock_sec_t     *secs,
691 	clock_usec_t    *microsecs,
692 	uint64_t        *mach_time)
693 {
694 	uint64_t                now;
695 	spl_t                   s;
696 	struct bintime  bt;
697 
698 	s = splclock();
699 	clock_lock();
700 
701 	now = mach_absolute_time();
702 	bt = get_scaled_time(now);
703 	bintime_add(&bt, &clock_calend.bintime);
704 	bintime2usclock(&bt, secs, microsecs);
705 
706 	clock_gettimeofday_set_commpage(now, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
707 
708 	clock_unlock();
709 	splx(s);
710 
711 	if (mach_time) {
712 		*mach_time = now;
713 	}
714 }
715 
716 /*
717  *	clock_set_calendar_microtime:
718  *
719  *	Sets the current calendar value by
720  *	recalculating the epoch and offset
721  *	from the system clock.
722  *
723  *	Also adjusts the boottime to keep the
724  *	value consistent, writes the new
725  *	calendar value to the platform clock,
726  *	and sends calendar change notifications.
727  */
728 void
clock_set_calendar_microtime(clock_sec_t secs,clock_usec_t microsecs)729 clock_set_calendar_microtime(
730 	clock_sec_t             secs,
731 	clock_usec_t            microsecs)
732 {
733 	uint64_t                absolutesys;
734 	clock_sec_t             newsecs;
735 	clock_sec_t             oldsecs;
736 	clock_usec_t            newmicrosecs;
737 	clock_usec_t            oldmicrosecs;
738 	uint64_t                commpage_value;
739 	spl_t                   s;
740 	struct bintime          bt;
741 	clock_sec_t             deltasecs;
742 	clock_usec_t            deltamicrosecs;
743 
744 	newsecs = secs;
745 	newmicrosecs = microsecs;
746 
747 	/*
748 	 * settime_lock mtx is used to avoid that racing settimeofdays update the wall clock and
749 	 * the platform clock concurrently.
750 	 *
751 	 * clock_lock cannot be used for this race because it is acquired from interrupt context
752 	 * and it needs interrupts disabled while instead updating the platform clock needs to be
753 	 * called with interrupts enabled.
754 	 */
755 	lck_mtx_lock(&settime_lock);
756 
757 	s = splclock();
758 	clock_lock();
759 
760 #if DEVELOPMENT || DEBUG
761 	struct clock_calend clock_calend_cp = clock_calend;
762 #endif
763 	commpage_disable_timestamp();
764 
765 	/*
766 	 *	Adjust the boottime based on the delta.
767 	 */
768 	clock_get_calendar_absolute_and_microtime_locked(&oldsecs, &oldmicrosecs, &absolutesys);
769 
770 #if DEVELOPMENT || DEBUG
771 	if (g_should_log_clock_adjustments) {
772 		os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
773 		    __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys);
774 		os_log(OS_LOG_DEFAULT, "%s requested %lu s %d u\n",
775 		    __func__, (unsigned long)secs, microsecs );
776 	}
777 #endif
778 
779 	if (oldsecs < secs || (oldsecs == secs && oldmicrosecs < microsecs)) {
780 		// moving forwards
781 		deltasecs = secs;
782 		deltamicrosecs = microsecs;
783 
784 		TIME_SUB(deltasecs, oldsecs, deltamicrosecs, oldmicrosecs, USEC_PER_SEC);
785 
786 		TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
787 		clock2bintime(&deltasecs, &deltamicrosecs, &bt);
788 		bintime_add(&clock_calend.boottime, &bt);
789 	} else {
790 		// moving backwards
791 		deltasecs = oldsecs;
792 		deltamicrosecs = oldmicrosecs;
793 
794 		TIME_SUB(deltasecs, secs, deltamicrosecs, microsecs, USEC_PER_SEC);
795 
796 		TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
797 		clock2bintime(&deltasecs, &deltamicrosecs, &bt);
798 		bintime_sub(&clock_calend.boottime, &bt);
799 	}
800 
801 	clock_calend.bintime = clock_calend.boottime;
802 	bintime_add(&clock_calend.bintime, &clock_calend.offset);
803 
804 	clock2bintime((clock_sec_t *) &secs, (clock_usec_t *) &microsecs, &bt);
805 
806 	clock_gettimeofday_set_commpage(absolutesys, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
807 
808 #if DEVELOPMENT || DEBUG
809 	struct clock_calend clock_calend_cp1 = clock_calend;
810 #endif
811 
812 	commpage_value = clock_boottime * USEC_PER_SEC + clock_boottime_usec;
813 
814 	clock_unlock();
815 	splx(s);
816 
817 	/*
818 	 *	Set the new value for the platform clock.
819 	 *	This call might block, so interrupts must be enabled.
820 	 */
821 #if DEVELOPMENT || DEBUG
822 	uint64_t now_b = mach_absolute_time();
823 #endif
824 
825 	PESetUTCTimeOfDay(newsecs, newmicrosecs);
826 
827 #if DEVELOPMENT || DEBUG
828 	uint64_t now_a = mach_absolute_time();
829 	if (g_should_log_clock_adjustments) {
830 		os_log(OS_LOG_DEFAULT, "%s mach bef PESet %llu mach aft %llu \n", __func__, now_b, now_a);
831 	}
832 #endif
833 
834 	print_all_clock_variables_internal(__func__, &clock_calend_cp);
835 	print_all_clock_variables_internal(__func__, &clock_calend_cp1);
836 
837 	commpage_update_boottime(commpage_value);
838 
839 	/*
840 	 *	Send host notifications.
841 	 */
842 	host_notify_calendar_change();
843 	host_notify_calendar_set();
844 
845 #if CONFIG_DTRACE
846 	clock_track_calend_nowait();
847 #endif
848 
849 	lck_mtx_unlock(&settime_lock);
850 }
851 
852 uint64_t mach_absolutetime_asleep = 0;
853 uint64_t mach_absolutetime_last_sleep = 0;
854 
855 void
clock_get_calendar_uptime(clock_sec_t * secs)856 clock_get_calendar_uptime(clock_sec_t *secs)
857 {
858 	uint64_t now;
859 	spl_t s;
860 	struct bintime bt;
861 
862 	s = splclock();
863 	clock_lock();
864 
865 	now = mach_absolute_time();
866 
867 	bt = get_scaled_time(now);
868 	bintime_add(&bt, &clock_calend.offset);
869 
870 	*secs = bt.sec;
871 
872 	clock_unlock();
873 	splx(s);
874 }
875 
876 
877 /*
878  * clock_update_calendar:
879  *
880  * called by ntp timer to update scale factors.
881  */
882 void
clock_update_calendar(void)883 clock_update_calendar(void)
884 {
885 	uint64_t now, delta;
886 	struct bintime bt;
887 	spl_t s;
888 	int64_t adjustment;
889 
890 	s = splclock();
891 	clock_lock();
892 
893 	now  = mach_absolute_time();
894 
895 	/*
896 	 * scale the time elapsed since the last update and
897 	 * add it to offset.
898 	 */
899 	bt = get_scaled_time(now);
900 	bintime_add(&clock_calend.offset, &bt);
901 
902 	/*
903 	 * update the base from which apply next scale factors.
904 	 */
905 	delta = now - clock_calend.offset_count;
906 	clock_calend.offset_count += delta;
907 
908 	clock_calend.bintime = clock_calend.offset;
909 	bintime_add(&clock_calend.bintime, &clock_calend.boottime);
910 
911 	/*
912 	 * recompute next adjustment.
913 	 */
914 	ntp_update_second(&adjustment, clock_calend.bintime.sec);
915 
916 #if DEVELOPMENT || DEBUG
917 	if (g_should_log_clock_adjustments) {
918 		os_log(OS_LOG_DEFAULT, "%s adjustment %lld\n", __func__, adjustment);
919 	}
920 #endif
921 
922 	/*
923 	 * recomputing scale factors.
924 	 */
925 	get_scale_factors_from_adj(adjustment, &clock_calend.tick_scale_x, &clock_calend.s_scale_ns, &clock_calend.s_adj_nsx);
926 
927 	clock_gettimeofday_set_commpage(now, clock_calend.bintime.sec, clock_calend.bintime.frac, clock_calend.tick_scale_x, ticks_per_sec);
928 
929 #if DEVELOPMENT || DEBUG
930 	struct clock_calend calend_cp = clock_calend;
931 #endif
932 
933 	clock_unlock();
934 	splx(s);
935 
936 	print_all_clock_variables(__func__, NULL, NULL, NULL, NULL, &calend_cp);
937 }
938 
939 
940 #if DEVELOPMENT || DEBUG
941 
942 void
print_all_clock_variables_internal(const char * func,struct clock_calend * clock_calend_cp)943 print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
944 {
945 	clock_sec_t     offset_secs;
946 	clock_usec_t    offset_microsecs;
947 	clock_sec_t     bintime_secs;
948 	clock_usec_t    bintime_microsecs;
949 	clock_sec_t     bootime_secs;
950 	clock_usec_t    bootime_microsecs;
951 
952 	if (!g_should_log_clock_adjustments) {
953 		return;
954 	}
955 
956 	bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs);
957 	bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs);
958 	bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs);
959 
960 	os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
961 	    func, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
962 	    clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
963 	os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
964 	    func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
965 	    (unsigned long)offset_secs, offset_microsecs);
966 	os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
967 	    func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
968 	    (unsigned long)bintime_secs, bintime_microsecs);
969 	os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
970 	    func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
971 	    (unsigned long)bootime_secs, bootime_microsecs);
972 
973 #if !HAS_CONTINUOUS_HWCLOCK
974 	clock_sec_t     basesleep_secs;
975 	clock_usec_t    basesleep_microsecs;
976 
977 	bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs);
978 	os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
979 	    func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
980 	    (unsigned long)basesleep_secs, basesleep_microsecs);
981 #endif
982 }
983 
984 
985 void
print_all_clock_variables(const char * func,clock_sec_t * pmu_secs,clock_usec_t * pmu_usec,clock_sec_t * sys_secs,clock_usec_t * sys_usec,struct clock_calend * clock_calend_cp)986 print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
987 {
988 	if (!g_should_log_clock_adjustments) {
989 		return;
990 	}
991 
992 	struct bintime  bt;
993 	clock_sec_t     wall_secs;
994 	clock_usec_t    wall_microsecs;
995 	uint64_t now;
996 	uint64_t delta;
997 
998 	if (pmu_secs) {
999 		os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
1000 	}
1001 	if (sys_secs) {
1002 		os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec);
1003 	}
1004 
1005 	print_all_clock_variables_internal(func, clock_calend_cp);
1006 
1007 	now = mach_absolute_time();
1008 	delta = now - clock_calend_cp->offset_count;
1009 
1010 	bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
1011 	bintime_add(&bt, &clock_calend_cp->bintime);
1012 	bintime2usclock(&bt, &wall_secs, &wall_microsecs);
1013 
1014 	os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
1015 	    func, (unsigned long)wall_secs, wall_microsecs, now);
1016 }
1017 
1018 
1019 #endif /* DEVELOPMENT || DEBUG */
1020 
1021 
1022 /*
1023  *	clock_initialize_calendar:
1024  *
1025  *	Set the calendar and related clocks
1026  *	from the platform clock at boot.
1027  *
1028  *	Also sends host notifications.
1029  */
1030 void
clock_initialize_calendar(void)1031 clock_initialize_calendar(void)
1032 {
1033 	clock_sec_t             sys;  // sleepless time since boot in seconds
1034 	clock_sec_t             secs; // Current UTC time
1035 	clock_sec_t             utc_offset_secs; // Difference in current UTC time and sleepless time since boot
1036 	clock_usec_t            microsys;
1037 	clock_usec_t            microsecs;
1038 	clock_usec_t            utc_offset_microsecs;
1039 	spl_t                   s;
1040 	struct bintime          bt;
1041 #if ENABLE_LEGACY_CLOCK_CODE
1042 	struct bintime          monotonic_bt;
1043 	struct latched_time     monotonic_time;
1044 	uint64_t                monotonic_usec_total;
1045 	clock_sec_t             sys2, monotonic_sec;
1046 	clock_usec_t            microsys2, monotonic_usec;
1047 	size_t                  size;
1048 
1049 #endif /* ENABLE_LEGACY_CLOCK_CODE */
1050 	//Get the UTC time and corresponding sys time
1051 	PEGetUTCTimeOfDay(&secs, &microsecs);
1052 	clock_get_system_microtime(&sys, &microsys);
1053 
1054 #if ENABLE_LEGACY_CLOCK_CODE
1055 	/*
1056 	 * If the platform has a monotonic clock, use kern.monotonicclock_usecs
1057 	 * to estimate the sleep/wake time, otherwise use the UTC time to estimate
1058 	 * the sleep time.
1059 	 */
1060 	size = sizeof(monotonic_time);
1061 	if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
1062 		has_monotonic_clock = 0;
1063 		os_log(OS_LOG_DEFAULT, "%s system does not have monotonic clock\n", __func__);
1064 	} else {
1065 		has_monotonic_clock = 1;
1066 		monotonic_usec_total = monotonic_time.monotonic_time_usec;
1067 		absolutetime_to_microtime(monotonic_time.mach_time, &sys2, &microsys2);
1068 		os_log(OS_LOG_DEFAULT, "%s system has monotonic clock\n", __func__);
1069 	}
1070 #endif /* ENABLE_LEGACY_CLOCK_CODE */
1071 
1072 	s = splclock();
1073 	clock_lock();
1074 
1075 	commpage_disable_timestamp();
1076 
1077 	utc_offset_secs = secs;
1078 	utc_offset_microsecs = microsecs;
1079 
1080 	/*
1081 	 * We normally expect the UTC clock to be always-on and produce
1082 	 * greater readings than the tick counter.  There may be corner cases
1083 	 * due to differing clock resolutions (UTC clock is likely lower) and
1084 	 * and errors reading the UTC clock (some implementations return 0
1085 	 * on error) in which that doesn't hold true.  Bring the UTC measurements
1086 	 * in-line with the tick counter measurements as a best effort in that case.
1087 	 */
1088 	if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
1089 		os_log(OS_LOG_DEFAULT, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n",
1090 		    __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
1091 		secs = utc_offset_secs = sys;
1092 		microsecs = utc_offset_microsecs = microsys;
1093 	}
1094 
1095 	// UTC - sys
1096 	// This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
1097 	TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
1098 	// This function converts utc_offset_secs and utc_offset_microsecs in bintime
1099 	clock2bintime(&utc_offset_secs, &utc_offset_microsecs, &bt);
1100 
1101 	/*
1102 	 *	Initialize the boot time based on the platform clock.
1103 	 */
1104 	clock_boottime = secs;
1105 	clock_boottime_usec = microsecs;
1106 	commpage_update_boottime(clock_boottime * USEC_PER_SEC + clock_boottime_usec);
1107 
1108 	nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
1109 	clock_calend.boottime = bt;
1110 	clock_calend.bintime = bt;
1111 	clock_calend.offset.sec = 0;
1112 	clock_calend.offset.frac = 0;
1113 
1114 	clock_calend.tick_scale_x = (uint64_t)1 << 63;
1115 	clock_calend.tick_scale_x /= ticks_per_sec;
1116 	clock_calend.tick_scale_x *= 2;
1117 
1118 	clock_calend.s_scale_ns = NSEC_PER_SEC;
1119 	clock_calend.s_adj_nsx = 0;
1120 
1121 #if ENABLE_LEGACY_CLOCK_CODE
1122 	if (has_monotonic_clock) {
1123 		OS_ANALYZER_SUPPRESS("82347749") monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1124 		monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
1125 
1126 		// monotonic clock - sys
1127 		// This macro stores the subtraction result in monotonic_sec and monotonic_usec
1128 		TIME_SUB(monotonic_sec, sys2, monotonic_usec, microsys2, USEC_PER_SEC);
1129 		clock2bintime(&monotonic_sec, &monotonic_usec, &monotonic_bt);
1130 
1131 		// set the baseleep as the difference between monotonic clock - sys
1132 		clock_calend.basesleep = monotonic_bt;
1133 	}
1134 #endif /* ENABLE_LEGACY_CLOCK_CODE */
1135 	commpage_update_mach_continuous_time(mach_absolutetime_asleep);
1136 
1137 #if DEVELOPMENT || DEBUG
1138 	struct clock_calend clock_calend_cp = clock_calend;
1139 #endif
1140 
1141 	clock_unlock();
1142 	splx(s);
1143 
1144 	print_all_clock_variables(__func__, &secs, &microsecs, &sys, &microsys, &clock_calend_cp);
1145 
1146 	/*
1147 	 *	Send host notifications.
1148 	 */
1149 	host_notify_calendar_change();
1150 
1151 #if CONFIG_DTRACE
1152 	clock_track_calend_nowait();
1153 #endif
1154 }
1155 
1156 #if HAS_CONTINUOUS_HWCLOCK
1157 
1158 static void
scale_sleep_time(void)1159 scale_sleep_time(void)
1160 {
1161 	/* Apply the current NTP frequency adjustment to the time slept.
1162 	 * The frequency adjustment remains stable between calls to ntp_adjtime(),
1163 	 * and should thus provide a reasonable approximation of the total adjustment
1164 	 * required for the time slept. */
1165 	struct bintime sleep_time;
1166 	uint64_t tick_scale_x, s_scale_ns;
1167 	int64_t s_adj_nsx;
1168 	int64_t sleep_adj = ntp_get_freq();
1169 	if (sleep_adj) {
1170 		get_scale_factors_from_adj(sleep_adj, &tick_scale_x, &s_scale_ns, &s_adj_nsx);
1171 		sleep_time = scale_delta(mach_absolutetime_last_sleep, tick_scale_x, s_scale_ns, s_adj_nsx);
1172 	} else {
1173 		tick_scale_x = (uint64_t)1 << 63;
1174 		tick_scale_x /= ticks_per_sec;
1175 		tick_scale_x *= 2;
1176 		sleep_time.sec = mach_absolutetime_last_sleep / ticks_per_sec;
1177 		sleep_time.frac = (mach_absolutetime_last_sleep % ticks_per_sec) * tick_scale_x;
1178 	}
1179 	bintime_add(&clock_calend.offset, &sleep_time);
1180 	bintime_add(&clock_calend.bintime, &sleep_time);
1181 }
1182 
1183 static void
clock_wakeup_calendar_hwclock(void)1184 clock_wakeup_calendar_hwclock(void)
1185 {
1186 	spl_t   s;
1187 
1188 	s = splclock();
1189 	clock_lock();
1190 
1191 	commpage_disable_timestamp();
1192 
1193 	uint64_t abstime = mach_absolute_time();
1194 	uint64_t total_sleep_time = mach_continuous_time() - abstime;
1195 
1196 	mach_absolutetime_last_sleep = total_sleep_time - mach_absolutetime_asleep;
1197 	mach_absolutetime_asleep = total_sleep_time;
1198 
1199 	scale_sleep_time();
1200 
1201 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE),
1202 	    (uintptr_t)mach_absolutetime_last_sleep,
1203 	    (uintptr_t)mach_absolutetime_asleep,
1204 	    (uintptr_t)(mach_absolutetime_last_sleep >> 32),
1205 	    (uintptr_t)(mach_absolutetime_asleep >> 32));
1206 
1207 	commpage_update_mach_continuous_time(mach_absolutetime_asleep);
1208 #if HIBERNATION
1209 	commpage_update_mach_continuous_time_hw_offset(hwclock_conttime_offset);
1210 #endif
1211 	adjust_cont_time_thread_calls();
1212 
1213 	clock_unlock();
1214 	splx(s);
1215 
1216 	host_notify_calendar_change();
1217 
1218 #if CONFIG_DTRACE
1219 	clock_track_calend_nowait();
1220 #endif
1221 }
1222 
1223 #endif /* HAS_CONTINUOUS_HWCLOCK */
1224 
1225 #if ENABLE_LEGACY_CLOCK_CODE
1226 
1227 static void
clock_wakeup_calendar_legacy(void)1228 clock_wakeup_calendar_legacy(void)
1229 {
1230 	clock_sec_t             wake_sys_sec;
1231 	clock_usec_t            wake_sys_usec;
1232 	clock_sec_t             wake_sec;
1233 	clock_usec_t            wake_usec;
1234 	clock_sec_t             wall_time_sec;
1235 	clock_usec_t            wall_time_usec;
1236 	clock_sec_t             diff_sec;
1237 	clock_usec_t            diff_usec;
1238 	clock_sec_t             var_s;
1239 	clock_usec_t            var_us;
1240 	spl_t                   s;
1241 	struct bintime          bt, last_sleep_bt;
1242 	struct latched_time     monotonic_time;
1243 	uint64_t                monotonic_usec_total;
1244 	uint64_t                wake_abs;
1245 	size_t                  size;
1246 
1247 	/*
1248 	 * If the platform has the monotonic clock use that to
1249 	 * compute the sleep time. The monotonic clock does not have an offset
1250 	 * that can be modified, so nor kernel or userspace can change the time
1251 	 * of this clock, it can only monotonically increase over time.
1252 	 * During sleep mach_absolute_time (sys time) does not tick,
1253 	 * so the sleep time is the difference between the current monotonic time
1254 	 * less the absolute time and the previous difference stored at wake time.
1255 	 *
1256 	 * basesleep = (monotonic - sys) ---> computed at last wake
1257 	 * sleep_time = (monotonic - sys) - basesleep
1258 	 *
1259 	 * If the platform does not support monotonic clock we set the wall time to what the
1260 	 * UTC clock returns us.
1261 	 * Setting the wall time to UTC time implies that we loose all the adjustments
1262 	 * done during wake time through adjtime/ntp_adjustime.
1263 	 * The UTC time is the monotonic clock + an offset that can be set
1264 	 * by kernel.
1265 	 * The time slept in this case is the difference between wall time and UTC
1266 	 * at wake.
1267 	 *
1268 	 * IMPORTANT:
1269 	 * We assume that only the kernel is setting the offset of the PMU/RTC and that
1270 	 * it is doing it only througth the settimeofday interface.
1271 	 */
1272 	if (has_monotonic_clock) {
1273 #if DEVELOPMENT || DEBUG
1274 		/*
1275 		 * Just for debugging, get the wake UTC time.
1276 		 */
1277 		PEGetUTCTimeOfDay(&var_s, &var_us);
1278 #endif
1279 		/*
1280 		 * Get monotonic time with corresponding sys time
1281 		 */
1282 		size = sizeof(monotonic_time);
1283 		if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
1284 			panic("%s: could not call kern.monotonicclock_usecs", __func__);
1285 		}
1286 		wake_abs = monotonic_time.mach_time;
1287 		absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
1288 
1289 		monotonic_usec_total = monotonic_time.monotonic_time_usec;
1290 		wake_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1291 		wake_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
1292 	} else {
1293 		/*
1294 		 * Get UTC time and corresponding sys time
1295 		 */
1296 		PEGetUTCTimeOfDay(&wake_sec, &wake_usec);
1297 		wake_abs = mach_absolute_time();
1298 		absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
1299 	}
1300 
1301 #if DEVELOPMENT || DEBUG
1302 	os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs);
1303 	if (has_monotonic_clock) {
1304 		OS_ANALYZER_SUPPRESS("82347749") os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us);
1305 	}
1306 #endif /* DEVELOPMENT || DEBUG */
1307 
1308 	s = splclock();
1309 	clock_lock();
1310 
1311 	commpage_disable_timestamp();
1312 
1313 #if DEVELOPMENT || DEBUG
1314 	struct clock_calend clock_calend_cp1 = clock_calend;
1315 #endif /* DEVELOPMENT || DEBUG */
1316 
1317 	/*
1318 	 * We normally expect the UTC/monotonic clock to be always-on and produce
1319 	 * greater readings than the sys counter.  There may be corner cases
1320 	 * due to differing clock resolutions (UTC/monotonic clock is likely lower) and
1321 	 * and errors reading the UTC/monotonic clock (some implementations return 0
1322 	 * on error) in which that doesn't hold true.
1323 	 */
1324 	if ((wake_sys_sec > wake_sec) || ((wake_sys_sec == wake_sec) && (wake_sys_usec > wake_usec))) {
1325 		os_log_error(OS_LOG_DEFAULT, "WARNING: %s clock is less then sys clock at wake: %lu s %d u vs %lu s %d u, defaulting sleep time to zero\n", (has_monotonic_clock)?"monotonic":"UTC", (unsigned long)wake_sec, wake_usec, (unsigned long)wake_sys_sec, wake_sys_usec);
1326 		mach_absolutetime_last_sleep = 0;
1327 		goto done;
1328 	}
1329 
1330 	if (has_monotonic_clock) {
1331 		/*
1332 		 * computer the difference monotonic - sys
1333 		 * we already checked that monotonic time is
1334 		 * greater than sys.
1335 		 */
1336 		diff_sec = wake_sec;
1337 		diff_usec = wake_usec;
1338 		// This macro stores the subtraction result in diff_sec and diff_usec
1339 		TIME_SUB(diff_sec, wake_sys_sec, diff_usec, wake_sys_usec, USEC_PER_SEC);
1340 		//This function converts diff_sec and diff_usec in bintime
1341 		clock2bintime(&diff_sec, &diff_usec, &bt);
1342 
1343 		/*
1344 		 * Safety belt: the monotonic clock will likely have a lower resolution than the sys counter.
1345 		 * It's also possible that the device didn't fully transition to the powered-off state on
1346 		 * the most recent sleep, so the sys counter may not have reset or may have only briefly
1347 		 * turned off.  In that case it's possible for the difference between the monotonic clock and the
1348 		 * sys counter to be less than the previously recorded value in clock.calend.basesleep.
1349 		 * In that case simply record that we slept for 0 ticks.
1350 		 */
1351 		if ((bt.sec > clock_calend.basesleep.sec) ||
1352 		    ((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) {
1353 			//last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs)
1354 			last_sleep_bt = bt;
1355 			bintime_sub(&last_sleep_bt, &clock_calend.basesleep);
1356 
1357 			bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
1358 			mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1359 
1360 			//set basesleep to current monotonic - abs
1361 			clock_calend.basesleep = bt;
1362 
1363 			//update wall time
1364 			bintime_add(&clock_calend.offset, &last_sleep_bt);
1365 			bintime_add(&clock_calend.bintime, &last_sleep_bt);
1366 
1367 			bintime2usclock(&last_sleep_bt, &var_s, &var_us);
1368 			os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long) var_s, var_us);
1369 		} else {
1370 			bintime2usclock(&clock_calend.basesleep, &var_s, &var_us);
1371 			os_log_error(OS_LOG_DEFAULT, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s, var_us, (unsigned long) diff_sec, diff_usec);
1372 
1373 			mach_absolutetime_last_sleep = 0;
1374 		}
1375 	} else {
1376 		/*
1377 		 * set the wall time to UTC value
1378 		 */
1379 		bt = get_scaled_time(wake_abs);
1380 		bintime_add(&bt, &clock_calend.bintime);
1381 		bintime2usclock(&bt, &wall_time_sec, &wall_time_usec);
1382 
1383 		if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec)) {
1384 			os_log(OS_LOG_DEFAULT, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec, wall_time_usec, (unsigned long) wake_sec, wake_usec);
1385 
1386 			mach_absolutetime_last_sleep = 0;
1387 		} else {
1388 			diff_sec = wake_sec;
1389 			diff_usec = wake_usec;
1390 			// This macro stores the subtraction result in diff_sec and diff_usec
1391 			TIME_SUB(diff_sec, wall_time_sec, diff_usec, wall_time_usec, USEC_PER_SEC);
1392 			//This function converts diff_sec and diff_usec in bintime
1393 			clock2bintime(&diff_sec, &diff_usec, &bt);
1394 
1395 			//time slept in this case is the difference between PMU/RTC and wall time
1396 			last_sleep_bt = bt;
1397 
1398 			bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
1399 			mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1400 
1401 			//update wall time
1402 			bintime_add(&clock_calend.offset, &last_sleep_bt);
1403 			bintime_add(&clock_calend.bintime, &last_sleep_bt);
1404 
1405 			bintime2usclock(&last_sleep_bt, &var_s, &var_us);
1406 			os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long)var_s, var_us);
1407 		}
1408 	}
1409 done:
1410 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE),
1411 	    (uintptr_t)mach_absolutetime_last_sleep,
1412 	    (uintptr_t)mach_absolutetime_asleep,
1413 	    (uintptr_t)(mach_absolutetime_last_sleep >> 32),
1414 	    (uintptr_t)(mach_absolutetime_asleep >> 32));
1415 
1416 	commpage_update_mach_continuous_time(mach_absolutetime_asleep);
1417 	adjust_cont_time_thread_calls();
1418 
1419 #if DEVELOPMENT || DEBUG
1420 	struct clock_calend clock_calend_cp = clock_calend;
1421 #endif
1422 
1423 	clock_unlock();
1424 	splx(s);
1425 
1426 #if DEVELOPMENT || DEBUG
1427 	if (g_should_log_clock_adjustments) {
1428 		print_all_clock_variables("clock_wakeup_calendar: BEFORE", NULL, NULL, NULL, NULL, &clock_calend_cp1);
1429 		print_all_clock_variables("clock_wakeup_calendar: AFTER", NULL, NULL, NULL, NULL, &clock_calend_cp);
1430 	}
1431 #endif /* DEVELOPMENT || DEBUG */
1432 
1433 	host_notify_calendar_change();
1434 
1435 #if CONFIG_DTRACE
1436 	clock_track_calend_nowait();
1437 #endif
1438 }
1439 
1440 #endif /* ENABLE_LEGACY_CLOCK_CODE */
1441 
1442 void
clock_wakeup_calendar(void)1443 clock_wakeup_calendar(void)
1444 {
1445 #if HAS_CONTINUOUS_HWCLOCK
1446 #if HIBERNATION_USES_LEGACY_CLOCK
1447 	if (gIOHibernateState) {
1448 		// if we're resuming from hibernation, we have to take the legacy wakeup path
1449 		return clock_wakeup_calendar_legacy();
1450 	}
1451 #endif /* HIBERNATION_USES_LEGACY_CLOCK */
1452 	// use the hwclock wakeup path
1453 	return clock_wakeup_calendar_hwclock();
1454 #elif ENABLE_LEGACY_CLOCK_CODE
1455 	return clock_wakeup_calendar_legacy();
1456 #else
1457 #error "can't determine which clock code to run"
1458 #endif
1459 }
1460 
1461 /*
1462  *	clock_get_boottime_nanotime:
1463  *
1464  *	Return the boottime, used by sysctl.
1465  */
1466 void
clock_get_boottime_nanotime(clock_sec_t * secs,clock_nsec_t * nanosecs)1467 clock_get_boottime_nanotime(
1468 	clock_sec_t                     *secs,
1469 	clock_nsec_t            *nanosecs)
1470 {
1471 	spl_t   s;
1472 
1473 	s = splclock();
1474 	clock_lock();
1475 
1476 	*secs = (clock_sec_t)clock_boottime;
1477 	*nanosecs = (clock_nsec_t)clock_boottime_usec * NSEC_PER_USEC;
1478 
1479 	clock_unlock();
1480 	splx(s);
1481 }
1482 
1483 /*
1484  *	clock_get_boottime_nanotime:
1485  *
1486  *	Return the boottime, used by sysctl.
1487  */
1488 void
clock_get_boottime_microtime(clock_sec_t * secs,clock_usec_t * microsecs)1489 clock_get_boottime_microtime(
1490 	clock_sec_t                     *secs,
1491 	clock_usec_t            *microsecs)
1492 {
1493 	spl_t   s;
1494 
1495 	s = splclock();
1496 	clock_lock();
1497 
1498 	*secs = (clock_sec_t)clock_boottime;
1499 	*microsecs = (clock_nsec_t)clock_boottime_usec;
1500 
1501 	clock_unlock();
1502 	splx(s);
1503 }
1504 
1505 
1506 /*
1507  *	Wait / delay routines.
1508  */
1509 static void
mach_wait_until_continue(__unused void * parameter,wait_result_t wresult)1510 mach_wait_until_continue(
1511 	__unused void   *parameter,
1512 	wait_result_t   wresult)
1513 {
1514 	thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
1515 	/*NOTREACHED*/
1516 }
1517 
1518 /*
1519  * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
1520  *
1521  * Parameters:    args->deadline          Amount of time to wait
1522  *
1523  * Returns:        0                      Success
1524  *                !0                      Not success
1525  *
1526  */
1527 kern_return_t
mach_wait_until_trap(struct mach_wait_until_trap_args * args)1528 mach_wait_until_trap(
1529 	struct mach_wait_until_trap_args        *args)
1530 {
1531 	uint64_t                deadline = args->deadline;
1532 	wait_result_t   wresult;
1533 
1534 
1535 	wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
1536 	    TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
1537 	if (wresult == THREAD_WAITING) {
1538 		wresult = thread_block(mach_wait_until_continue);
1539 	}
1540 
1541 	return (wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS;
1542 }
1543 
1544 void
clock_delay_until(uint64_t deadline)1545 clock_delay_until(
1546 	uint64_t                deadline)
1547 {
1548 	uint64_t                now = mach_absolute_time();
1549 
1550 	if (now >= deadline) {
1551 		return;
1552 	}
1553 
1554 	_clock_delay_until_deadline(deadline - now, deadline);
1555 }
1556 
1557 /*
1558  * Preserve the original precise interval that the client
1559  * requested for comparison to the spin threshold.
1560  */
1561 void
_clock_delay_until_deadline(uint64_t interval,uint64_t deadline)1562 _clock_delay_until_deadline(
1563 	uint64_t                interval,
1564 	uint64_t                deadline)
1565 {
1566 	_clock_delay_until_deadline_with_leeway(interval, deadline, 0);
1567 }
1568 
1569 /*
1570  * Like _clock_delay_until_deadline, but it accepts a
1571  * leeway value.
1572  */
1573 void
_clock_delay_until_deadline_with_leeway(uint64_t interval,uint64_t deadline,uint64_t leeway)1574 _clock_delay_until_deadline_with_leeway(
1575 	uint64_t                interval,
1576 	uint64_t                deadline,
1577 	uint64_t                leeway)
1578 {
1579 	if (interval == 0) {
1580 		return;
1581 	}
1582 
1583 	if (ml_delay_should_spin(interval) ||
1584 	    get_preemption_level() != 0 ||
1585 	    ml_get_interrupts_enabled() == FALSE) {
1586 		machine_delay_until(interval, deadline);
1587 	} else {
1588 		/*
1589 		 * For now, assume a leeway request of 0 means the client does not want a leeway
1590 		 * value. We may want to change this interpretation in the future.
1591 		 */
1592 
1593 		if (leeway) {
1594 			assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
1595 		} else {
1596 			assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
1597 		}
1598 
1599 		thread_block(THREAD_CONTINUE_NULL);
1600 	}
1601 }
1602 
1603 void
delay_for_interval(uint32_t interval,uint32_t scale_factor)1604 delay_for_interval(
1605 	uint32_t                interval,
1606 	uint32_t                scale_factor)
1607 {
1608 	uint64_t                abstime;
1609 
1610 	clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1611 
1612 	_clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
1613 }
1614 
1615 void
delay_for_interval_with_leeway(uint32_t interval,uint32_t leeway,uint32_t scale_factor)1616 delay_for_interval_with_leeway(
1617 	uint32_t                interval,
1618 	uint32_t                leeway,
1619 	uint32_t                scale_factor)
1620 {
1621 	uint64_t                abstime_interval;
1622 	uint64_t                abstime_leeway;
1623 
1624 	clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
1625 	clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
1626 
1627 	_clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
1628 }
1629 
1630 void
delay(int usec)1631 delay(
1632 	int             usec)
1633 {
1634 	delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1635 }
1636 
1637 /*
1638  *	Miscellaneous routines.
1639  */
1640 void
clock_interval_to_deadline(uint32_t interval,uint32_t scale_factor,uint64_t * result)1641 clock_interval_to_deadline(
1642 	uint32_t                        interval,
1643 	uint32_t                        scale_factor,
1644 	uint64_t                        *result)
1645 {
1646 	uint64_t        abstime;
1647 
1648 	clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1649 
1650 	if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1651 		*result = UINT64_MAX;
1652 	}
1653 }
1654 
1655 void
nanoseconds_to_deadline(uint64_t interval,uint64_t * result)1656 nanoseconds_to_deadline(
1657 	uint64_t                        interval,
1658 	uint64_t                        *result)
1659 {
1660 	uint64_t        abstime;
1661 
1662 	nanoseconds_to_absolutetime(interval, &abstime);
1663 
1664 	if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1665 		*result = UINT64_MAX;
1666 	}
1667 }
1668 
1669 void
clock_absolutetime_interval_to_deadline(uint64_t abstime,uint64_t * result)1670 clock_absolutetime_interval_to_deadline(
1671 	uint64_t                        abstime,
1672 	uint64_t                        *result)
1673 {
1674 	if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1675 		*result = UINT64_MAX;
1676 	}
1677 }
1678 
1679 void
clock_continuoustime_interval_to_deadline(uint64_t conttime,uint64_t * result)1680 clock_continuoustime_interval_to_deadline(
1681 	uint64_t                        conttime,
1682 	uint64_t                        *result)
1683 {
1684 	if (os_add_overflow(mach_continuous_time(), conttime, result)) {
1685 		*result = UINT64_MAX;
1686 	}
1687 }
1688 
1689 void
clock_get_uptime(uint64_t * result)1690 clock_get_uptime(
1691 	uint64_t        *result)
1692 {
1693 	*result = mach_absolute_time();
1694 }
1695 
1696 void
clock_deadline_for_periodic_event(uint64_t interval,uint64_t abstime,uint64_t * deadline)1697 clock_deadline_for_periodic_event(
1698 	uint64_t                        interval,
1699 	uint64_t                        abstime,
1700 	uint64_t                        *deadline)
1701 {
1702 	assert(interval != 0);
1703 
1704 	// *deadline += interval;
1705 	if (os_add_overflow(*deadline, interval, deadline)) {
1706 		*deadline = UINT64_MAX;
1707 	}
1708 
1709 	if (*deadline <= abstime) {
1710 		// *deadline = abstime + interval;
1711 		if (os_add_overflow(abstime, interval, deadline)) {
1712 			*deadline = UINT64_MAX;
1713 		}
1714 
1715 		abstime = mach_absolute_time();
1716 		if (*deadline <= abstime) {
1717 			// *deadline = abstime + interval;
1718 			if (os_add_overflow(abstime, interval, deadline)) {
1719 				*deadline = UINT64_MAX;
1720 			}
1721 		}
1722 	}
1723 }
1724 
1725 uint64_t
mach_continuous_time(void)1726 mach_continuous_time(void)
1727 {
1728 #if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
1729 	return ml_get_hwclock() + hwclock_conttime_offset;
1730 #elif HAS_CONTINUOUS_HWCLOCK
1731 	return ml_get_hwclock();
1732 #else
1733 	while (1) {
1734 		uint64_t read1 = mach_absolutetime_asleep;
1735 		uint64_t absolute = mach_absolute_time();
1736 		OSMemoryBarrier();
1737 		uint64_t read2 = mach_absolutetime_asleep;
1738 
1739 		if (__builtin_expect(read1 == read2, 1)) {
1740 			return absolute + read1;
1741 		}
1742 	}
1743 #endif
1744 }
1745 
1746 uint64_t
mach_continuous_approximate_time(void)1747 mach_continuous_approximate_time(void)
1748 {
1749 #if HAS_CONTINUOUS_HWCLOCK
1750 	return mach_continuous_time();
1751 #else
1752 	while (1) {
1753 		uint64_t read1 = mach_absolutetime_asleep;
1754 		uint64_t absolute = mach_approximate_time();
1755 		OSMemoryBarrier();
1756 		uint64_t read2 = mach_absolutetime_asleep;
1757 
1758 		if (__builtin_expect(read1 == read2, 1)) {
1759 			return absolute + read1;
1760 		}
1761 	}
1762 #endif
1763 }
1764 
1765 /*
1766  * continuoustime_to_absolutetime
1767  * Must be called with interrupts disabled
1768  * Returned value is only valid until the next update to
1769  * mach_continuous_time
1770  */
1771 uint64_t
continuoustime_to_absolutetime(uint64_t conttime)1772 continuoustime_to_absolutetime(uint64_t conttime)
1773 {
1774 	if (conttime <= mach_absolutetime_asleep) {
1775 		return 0;
1776 	} else {
1777 		return conttime - mach_absolutetime_asleep;
1778 	}
1779 }
1780 
1781 /*
1782  * absolutetime_to_continuoustime
1783  * Must be called with interrupts disabled
1784  * Returned value is only valid until the next update to
1785  * mach_continuous_time
1786  */
1787 uint64_t
absolutetime_to_continuoustime(uint64_t abstime)1788 absolutetime_to_continuoustime(uint64_t abstime)
1789 {
1790 	return abstime + mach_absolutetime_asleep;
1791 }
1792 
1793 #if     CONFIG_DTRACE
1794 
1795 /*
1796  * clock_get_calendar_nanotime_nowait
1797  *
1798  * Description:	Non-blocking version of clock_get_calendar_nanotime()
1799  *
1800  * Notes:	This function operates by separately tracking calendar time
1801  *		updates using a two element structure to copy the calendar
1802  *		state, which may be asynchronously modified.  It utilizes
1803  *		barrier instructions in the tracking process and in the local
1804  *		stable snapshot process in order to ensure that a consistent
1805  *		snapshot is used to perform the calculation.
1806  */
1807 void
clock_get_calendar_nanotime_nowait(clock_sec_t * secs,clock_nsec_t * nanosecs)1808 clock_get_calendar_nanotime_nowait(
1809 	clock_sec_t                     *secs,
1810 	clock_nsec_t            *nanosecs)
1811 {
1812 	int i = 0;
1813 	uint64_t                now;
1814 	struct unlocked_clock_calend stable;
1815 	struct bintime bt;
1816 
1817 	for (;;) {
1818 		stable = flipflop[i];           /* take snapshot */
1819 
1820 		/*
1821 		 * Use a barrier instructions to ensure atomicity.  We AND
1822 		 * off the "in progress" bit to get the current generation
1823 		 * count.
1824 		 */
1825 		os_atomic_andnot(&stable.gen, 1, relaxed);
1826 
1827 		/*
1828 		 * If an update _is_ in progress, the generation count will be
1829 		 * off by one, if it _was_ in progress, it will be off by two,
1830 		 * and if we caught it at a good time, it will be equal (and
1831 		 * our snapshot is threfore stable).
1832 		 */
1833 		if (flipflop[i].gen == stable.gen) {
1834 			break;
1835 		}
1836 
1837 		/* Switch to the other element of the flipflop, and try again. */
1838 		i ^= 1;
1839 	}
1840 
1841 	now = mach_absolute_time();
1842 
1843 	bt = get_scaled_time(now);
1844 
1845 	bintime_add(&bt, &clock_calend.bintime);
1846 
1847 	bintime2nsclock(&bt, secs, nanosecs);
1848 }
1849 
1850 static void
clock_track_calend_nowait(void)1851 clock_track_calend_nowait(void)
1852 {
1853 	int i;
1854 
1855 	for (i = 0; i < 2; i++) {
1856 		struct clock_calend tmp = clock_calend;
1857 
1858 		/*
1859 		 * Set the low bit if the generation count; since we use a
1860 		 * barrier instruction to do this, we are guaranteed that this
1861 		 * will flag an update in progress to an async caller trying
1862 		 * to examine the contents.
1863 		 */
1864 		os_atomic_or(&flipflop[i].gen, 1, relaxed);
1865 
1866 		flipflop[i].calend = tmp;
1867 
1868 		/*
1869 		 * Increment the generation count to clear the low bit to
1870 		 * signal completion.  If a caller compares the generation
1871 		 * count after taking a copy while in progress, the count
1872 		 * will be off by two.
1873 		 */
1874 		os_atomic_inc(&flipflop[i].gen, relaxed);
1875 	}
1876 }
1877 
1878 #endif  /* CONFIG_DTRACE */
1879