1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 /*
33 * File: i386/rtclock.c
34 * Purpose: Routines for handling the machine dependent
35 * real-time clock. Historically, this clock is
36 * generated by the Intel 8254 Programmable Interval
37 * Timer, but local apic timers are now used for
38 * this purpose with the master time reference being
39 * the cpu clock counted by the timestamp MSR.
40 */
41
42
43 #include <mach/mach_types.h>
44
45 #include <kern/cpu_data.h>
46 #include <kern/cpu_number.h>
47 #include <kern/clock.h>
48 #include <kern/host_notify.h>
49 #include <kern/macro_help.h>
50 #include <kern/misc_protos.h>
51 #include <kern/spl.h>
52 #include <kern/assert.h>
53 #include <kern/timer_queue.h>
54 #include <mach/vm_prot.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_kern.h> /* for kernel_map */
57 #include <architecture/i386/pio.h>
58 #include <i386/machine_cpu.h>
59 #include <i386/cpuid.h>
60 #include <i386/cpu_threads.h>
61 #include <i386/mp.h>
62 #include <i386/machine_routines.h>
63 #include <i386/pal_routines.h>
64 #include <i386/proc_reg.h>
65 #include <i386/misc_protos.h>
66 #include <pexpert/pexpert.h>
67 #include <machine/limits.h>
68 #include <machine/commpage.h>
69 #include <sys/kdebug.h>
70 #include <i386/tsc.h>
71 #include <i386/rtclock_protos.h>
72 #define UI_CPUFREQ_ROUNDING_FACTOR 10000000
73
74 int rtclock_init(void);
75
76 uint64_t tsc_rebase_abs_time = 0;
77
78 volatile uint64_t gAcpiLastSleepTscBase = 0;
79 volatile uint64_t gAcpiLastSleepNanoBase = 0;
80 volatile uint64_t gAcpiLastWakeTscBase = 0;
81 volatile uint64_t gAcpiLastWakeNanoBase = 0;
82
83 static void rtc_set_timescale(uint64_t cycles);
84 static uint64_t rtc_export_speed(uint64_t cycles);
85
86 void
rtc_timer_start(void)87 rtc_timer_start(void)
88 {
89 /*
90 * Force a complete re-evaluation of timer deadlines.
91 */
92 x86_lcpu()->rtcDeadline = EndOfAllTime;
93 timer_resync_deadlines();
94 }
95
96 static inline uint32_t
_absolutetime_to_microtime(uint64_t abstime,clock_sec_t * secs,clock_usec_t * microsecs)97 _absolutetime_to_microtime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *microsecs)
98 {
99 uint32_t remain;
100 *secs = abstime / (uint64_t)NSEC_PER_SEC;
101 remain = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC);
102 *microsecs = remain / NSEC_PER_USEC;
103 return remain;
104 }
105
106 static inline void
_absolutetime_to_nanotime(uint64_t abstime,clock_sec_t * secs,clock_usec_t * nanosecs)107 _absolutetime_to_nanotime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *nanosecs)
108 {
109 *secs = abstime / (uint64_t)NSEC_PER_SEC;
110 *nanosecs = (clock_usec_t)(abstime % (uint64_t)NSEC_PER_SEC);
111 }
112
113 /*
114 * Nanotime/mach_absolutime_time
115 * -----------------------------
116 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
117 * efficiently by the kernel and in userspace - is the reference for all timing.
118 * The cpu clock rate is platform-dependent and may stop or be reset when the
119 * processor is napped/slept. As a result, nanotime is the software abstraction
120 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
121 *
122 * The kernel maintains nanotime information recording:
123 * - the ratio of tsc to nanoseconds
124 * with this ratio expressed as a 32-bit scale and shift
125 * (power of 2 divider);
126 * - { tsc_base, ns_base } pair of corresponding timestamps.
127 *
128 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
129 * for the userspace nanotime routine to read.
130 *
131 * All of the routines which update the nanotime data are non-reentrant. This must
132 * be guaranteed by the caller.
133 */
134 static inline void
rtc_nanotime_set_commpage(pal_rtc_nanotime_t * rntp)135 rtc_nanotime_set_commpage(pal_rtc_nanotime_t *rntp)
136 {
137 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
138 }
139
140 /*
141 * rtc_nanotime_init:
142 *
143 * Intialize the nanotime info from the base time.
144 */
145 static inline void
_rtc_nanotime_init(pal_rtc_nanotime_t * rntp,uint64_t base)146 _rtc_nanotime_init(pal_rtc_nanotime_t *rntp, uint64_t base)
147 {
148 uint64_t tsc = rdtsc64();
149
150 _pal_rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
151 }
152
153 void
rtc_nanotime_init(uint64_t base)154 rtc_nanotime_init(uint64_t base)
155 {
156 gAcpiLastSleepTscBase = pal_rtc_nanotime_info.tsc_base;
157 gAcpiLastSleepNanoBase = pal_rtc_nanotime_info.ns_base;
158
159 _rtc_nanotime_init(&pal_rtc_nanotime_info, base);
160
161 gAcpiLastWakeTscBase = pal_rtc_nanotime_info.tsc_base;
162 gAcpiLastWakeNanoBase = pal_rtc_nanotime_info.ns_base;
163
164 rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
165 }
166
167 /*
168 * rtc_nanotime_init_commpage:
169 *
170 * Call back from the commpage initialization to
171 * cause the commpage data to be filled in once the
172 * commpages have been created.
173 */
174 void
rtc_nanotime_init_commpage(void)175 rtc_nanotime_init_commpage(void)
176 {
177 spl_t s = splclock();
178
179 rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
180 splx(s);
181 }
182
183 /*
184 * rtc_nanotime_read:
185 *
186 * Returns the current nanotime value, accessable from any
187 * context.
188 */
189 static inline uint64_t
rtc_nanotime_read(void)190 rtc_nanotime_read(void)
191 {
192 return _rtc_nanotime_read(&pal_rtc_nanotime_info);
193 }
194
195 /*
196 * rtc_clock_napped:
197 *
198 * Invoked from power management when we exit from a low C-State (>= C4)
199 * and the TSC has stopped counting. The nanotime data is updated according
200 * to the provided value which represents the new value for nanotime.
201 */
202 void
rtc_clock_napped(uint64_t base,uint64_t tsc_base)203 rtc_clock_napped(uint64_t base, uint64_t tsc_base)
204 {
205 pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
206 uint64_t oldnsecs;
207 uint64_t newnsecs;
208 uint64_t tsc;
209
210 assert(!ml_get_interrupts_enabled());
211 tsc = rdtsc64();
212 oldnsecs = rntp->ns_base + _rtc_tsc_to_nanoseconds(tsc - rntp->tsc_base, rntp);
213 newnsecs = base + _rtc_tsc_to_nanoseconds(tsc - tsc_base, rntp);
214
215 /*
216 * Only update the base values if time using the new base values
217 * is later than the time using the old base values.
218 */
219 if (oldnsecs < newnsecs) {
220 _pal_rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
221 rtc_nanotime_set_commpage(rntp);
222 }
223 }
224
225 /*
226 * Invoked from power management to correct the SFLM TSC entry drift problem:
227 * a small delta is added to the tsc_base. This is equivalent to nudgin time
228 * backwards. We require this to be on the order of a TSC quantum which won't
229 * cause callers of mach_absolute_time() to see time going backwards!
230 */
231 void
rtc_clock_adjust(uint64_t tsc_base_delta)232 rtc_clock_adjust(uint64_t tsc_base_delta)
233 {
234 pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
235
236 assert(!ml_get_interrupts_enabled());
237 assert(tsc_base_delta < 100ULL); /* i.e. it's small */
238 _rtc_nanotime_adjust(tsc_base_delta, rntp);
239 rtc_nanotime_set_commpage(rntp);
240 }
241
242 /*
243 * rtc_sleep_wakeup:
244 *
245 * Invoked from power management when we have awoken from a sleep (S3)
246 * and the TSC has been reset, or from Deep Idle (S0) sleep when the TSC
247 * has progressed. The nanotime data is updated based on the passed-in value.
248 *
249 * The caller must guarantee non-reentrancy.
250 */
251 void
rtc_sleep_wakeup(uint64_t base)252 rtc_sleep_wakeup(
253 uint64_t base)
254 {
255 /* Set fixed configuration for lapic timers */
256 rtc_timer->rtc_config();
257
258 /*
259 * Reset nanotime.
260 * The timestamp counter will have been reset
261 * but nanotime (uptime) marches onward.
262 */
263 rtc_nanotime_init(base);
264 }
265
266 void
rtc_decrementer_configure(void)267 rtc_decrementer_configure(void)
268 {
269 rtc_timer->rtc_config();
270 }
271 /*
272 * rtclock_early_init() is called very early at boot to
273 * establish mach_absolute_time() and set it to zero.
274 */
275 void
rtclock_early_init(void)276 rtclock_early_init(void)
277 {
278 assert(tscFreq);
279 rtc_set_timescale(tscFreq);
280 }
281
282 /*
283 * Initialize the real-time clock device.
284 * In addition, various variables used to support the clock are initialized.
285 */
286 int
rtclock_init(void)287 rtclock_init(void)
288 {
289 uint64_t cycles;
290
291 assert(!ml_get_interrupts_enabled());
292
293 if (cpu_number() == master_cpu) {
294 assert(tscFreq);
295
296 /*
297 * Adjust and set the exported cpu speed.
298 */
299 cycles = rtc_export_speed(tscFreq);
300
301 /*
302 * Set min/max to actual.
303 * ACPI may update these later if speed-stepping is detected.
304 */
305 gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
306 gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
307
308 rtc_timer_init();
309 clock_timebase_init();
310 ml_init_delay_spin_threshold(10);
311 }
312
313 /* Set fixed configuration for lapic timers */
314 rtc_timer->rtc_config();
315 rtc_timer_start();
316
317 return 1;
318 }
319
320 // utility routine
321 // Code to calculate how many processor cycles are in a second...
322
323 static void
rtc_set_timescale(uint64_t cycles)324 rtc_set_timescale(uint64_t cycles)
325 {
326 pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
327 uint32_t shift = 0;
328
329 /* the "scale" factor will overflow unless cycles>SLOW_TSC_THRESHOLD */
330
331 while (cycles <= SLOW_TSC_THRESHOLD) {
332 shift++;
333 cycles <<= 1;
334 }
335
336 rntp->scale = (uint32_t)(((uint64_t)NSEC_PER_SEC << 32) / cycles);
337
338 rntp->shift = shift;
339
340 /*
341 * On some platforms, the TSC is not reset at warm boot. But the
342 * rebase time must be relative to the current boot so we can't use
343 * mach_absolute_time(). Instead, we convert the TSC delta since boot
344 * to nanoseconds.
345 */
346 if (tsc_rebase_abs_time == 0) {
347 tsc_rebase_abs_time = _rtc_tsc_to_nanoseconds(
348 rdtsc64() - tsc_at_boot, rntp);
349 }
350
351 rtc_nanotime_init(0);
352 }
353
354 static uint64_t
rtc_export_speed(uint64_t cyc_per_sec)355 rtc_export_speed(uint64_t cyc_per_sec)
356 {
357 pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info;
358 uint64_t cycles;
359
360 if (rntp->shift != 0) {
361 printf("Slow TSC, rtc_nanotime.shift == %d\n", rntp->shift);
362 }
363
364 /* Round: */
365 cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR / 2))
366 / UI_CPUFREQ_ROUNDING_FACTOR)
367 * UI_CPUFREQ_ROUNDING_FACTOR;
368
369 /*
370 * Set current measured speed.
371 */
372 if (cycles >= 0x100000000ULL) {
373 gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
374 } else {
375 gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
376 }
377 gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
378
379 kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
380 return cycles;
381 }
382
383 void
clock_get_system_microtime(clock_sec_t * secs,clock_usec_t * microsecs)384 clock_get_system_microtime(
385 clock_sec_t *secs,
386 clock_usec_t *microsecs)
387 {
388 uint64_t now = rtc_nanotime_read();
389
390 _absolutetime_to_microtime(now, secs, microsecs);
391 }
392
393 void
clock_get_system_nanotime(clock_sec_t * secs,clock_nsec_t * nanosecs)394 clock_get_system_nanotime(
395 clock_sec_t *secs,
396 clock_nsec_t *nanosecs)
397 {
398 uint64_t now = rtc_nanotime_read();
399
400 _absolutetime_to_nanotime(now, secs, nanosecs);
401 }
402
403 void
clock_gettimeofday_set_commpage(uint64_t abstime,uint64_t sec,uint64_t frac,uint64_t scale,uint64_t tick_per_sec)404 clock_gettimeofday_set_commpage(uint64_t abstime, uint64_t sec, uint64_t frac, uint64_t scale, uint64_t tick_per_sec)
405 {
406 commpage_set_timestamp(abstime, sec, frac, scale, tick_per_sec);
407 }
408
409 void
clock_timebase_info(mach_timebase_info_t info)410 clock_timebase_info(
411 mach_timebase_info_t info)
412 {
413 info->numer = info->denom = 1;
414 }
415
416 /*
417 * Real-time clock device interrupt.
418 */
419 int
rtclock_intr(x86_saved_state_t * tregs)420 rtclock_intr(
421 x86_saved_state_t *tregs)
422 {
423 uint64_t rip;
424 boolean_t user_mode = FALSE;
425
426 assert(get_preemption_level() > 0);
427 assert(!ml_get_interrupts_enabled());
428
429 if (is_saved_state64(tregs) == TRUE) {
430 x86_saved_state64_t *regs;
431
432 regs = saved_state64(tregs);
433
434 if (regs->isf.cs & 0x03) {
435 user_mode = TRUE;
436 }
437 rip = regs->isf.rip;
438 } else {
439 x86_saved_state32_t *regs;
440
441 regs = saved_state32(tregs);
442
443 if (regs->cs & 0x03) {
444 user_mode = TRUE;
445 }
446 rip = regs->eip;
447 }
448
449 /* call the generic etimer */
450 timer_intr(user_mode, rip);
451
452 return 0;
453 }
454
455
456 /*
457 * Request timer pop from the hardware
458 */
459
460 uint64_t
setPop(uint64_t time)461 setPop(uint64_t time)
462 {
463 uint64_t now;
464 uint64_t pop;
465
466 /* 0 and EndOfAllTime are special-cases for "clear the timer" */
467 if (time == 0 || time == EndOfAllTime) {
468 time = EndOfAllTime;
469 now = 0;
470 pop = rtc_timer->rtc_set(0, 0);
471 } else {
472 now = rtc_nanotime_read(); /* The time in nanoseconds */
473 pop = rtc_timer->rtc_set(time, now);
474 }
475
476 /* Record requested and actual deadlines set */
477 x86_lcpu()->rtcDeadline = time;
478 x86_lcpu()->rtcPop = pop;
479
480 return pop - now;
481 }
482
483 uint64_t
mach_absolute_time(void)484 mach_absolute_time(void)
485 {
486 return rtc_nanotime_read();
487 }
488
489 uint64_t
mach_approximate_time(void)490 mach_approximate_time(void)
491 {
492 return rtc_nanotime_read();
493 }
494
495 void
clock_interval_to_absolutetime_interval(uint32_t interval,uint32_t scale_factor,uint64_t * result)496 clock_interval_to_absolutetime_interval(
497 uint32_t interval,
498 uint32_t scale_factor,
499 uint64_t *result)
500 {
501 *result = (uint64_t)interval * scale_factor;
502 }
503
504 void
absolutetime_to_microtime(uint64_t abstime,clock_sec_t * secs,clock_usec_t * microsecs)505 absolutetime_to_microtime(
506 uint64_t abstime,
507 clock_sec_t *secs,
508 clock_usec_t *microsecs)
509 {
510 _absolutetime_to_microtime(abstime, secs, microsecs);
511 }
512
513 void
nanotime_to_absolutetime(clock_sec_t secs,clock_nsec_t nanosecs,uint64_t * result)514 nanotime_to_absolutetime(
515 clock_sec_t secs,
516 clock_nsec_t nanosecs,
517 uint64_t *result)
518 {
519 *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
520 }
521
522 void
absolutetime_to_nanoseconds(uint64_t abstime,uint64_t * result)523 absolutetime_to_nanoseconds(
524 uint64_t abstime,
525 uint64_t *result)
526 {
527 *result = abstime;
528 }
529
530 void
nanoseconds_to_absolutetime(uint64_t nanoseconds,uint64_t * result)531 nanoseconds_to_absolutetime(
532 uint64_t nanoseconds,
533 uint64_t *result)
534 {
535 *result = nanoseconds;
536 }
537
538 void
machine_delay_until(uint64_t interval,uint64_t deadline)539 machine_delay_until(
540 uint64_t interval,
541 uint64_t deadline)
542 {
543 (void)interval;
544 while (mach_absolute_time() < deadline) {
545 cpu_pause();
546 }
547 }
548