xref: /xnu-12377.1.9/bsd/kern/kern_time.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  * 3. All advertising materials mentioning features or use of this software
42  *    must display the following acknowledgement:
43  *	This product includes software developed by the University of
44  *	California, Berkeley and its contributors.
45  * 4. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
62  */
63 /*
64  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65  * support for mandatory and extensible security protections.  This notice
66  * is included in support of clause 2.2 (b) of the Apple Public License,
67  * Version 2.0.
68  */
69 
70 #include <sys/param.h>
71 #include <sys/resourcevar.h>
72 #include <sys/kernel.h>
73 #include <sys/systm.h>
74 #include <sys/proc_internal.h>
75 #include <sys/kauth.h>
76 #include <sys/vnode.h>
77 #include <sys/time.h>
78 #include <sys/priv.h>
79 
80 #include <sys/mount_internal.h>
81 #include <sys/sysproto.h>
82 #include <sys/signalvar.h>
83 #include <sys/protosw.h> /* for net_uptime2timeval() */
84 
85 #include <kern/clock.h>
86 #include <kern/task.h>
87 #include <kern/thread_call.h>
88 #include <kern/uipc_domain.h>
89 #if CONFIG_MACF
90 #include <security/mac_framework.h>
91 #endif
92 #include <IOKit/IOBSD.h>
93 #include <sys/time.h>
94 #include <kern/remote_time.h>
95 
96 #define HZ      100     /* XXX */
97 
98 /* simple lock used to access timezone, tz structure */
99 static LCK_GRP_DECLARE(tz_slock_grp, "tzlock");
100 static LCK_SPIN_DECLARE(tz_slock, &tz_slock_grp);
101 
102 static void             setthetime(
103 	struct timeval  *tv);
104 
105 static boolean_t timeval_fixusec(struct timeval *t1);
106 
107 /*
108  * Time of day and interval timer support.
109  *
110  * These routines provide the kernel entry points to get and set
111  * the time-of-day and per-process interval timers.  Subroutines
112  * here provide support for adding and subtracting timeval structures
113  * and decrementing interval timers, optionally reloading the interval
114  * timers when they expire.
115  */
116 /* ARGSUSED */
117 int
gettimeofday(struct proc * p,struct gettimeofday_args * uap,__unused int32_t * retval)118 gettimeofday(
119 	struct proc     *p,
120 	struct gettimeofday_args *uap,
121 	__unused int32_t *retval)
122 {
123 	int error = 0;
124 	struct timezone ltz; /* local copy */
125 	clock_sec_t secs;
126 	clock_usec_t usecs;
127 	uint64_t mach_time;
128 
129 	if (uap->tp || uap->mach_absolute_time) {
130 		clock_gettimeofday_and_absolute_time(&secs, &usecs, &mach_time);
131 	}
132 
133 	if (uap->tp) {
134 		/* Casting secs through a uint32_t to match arm64 commpage */
135 		if (IS_64BIT_PROCESS(p)) {
136 			struct user64_timeval user_atv = {};
137 			user_atv.tv_sec = (uint32_t)secs;
138 			user_atv.tv_usec = usecs;
139 			error = copyout(&user_atv, uap->tp, sizeof(user_atv));
140 		} else {
141 			struct user32_timeval user_atv = {};
142 			user_atv.tv_sec = (uint32_t)secs;
143 			user_atv.tv_usec = usecs;
144 			error = copyout(&user_atv, uap->tp, sizeof(user_atv));
145 		}
146 		if (error) {
147 			return error;
148 		}
149 	}
150 
151 	if (uap->tzp) {
152 		lck_spin_lock(&tz_slock);
153 		ltz = tz;
154 		lck_spin_unlock(&tz_slock);
155 
156 		error = copyout((caddr_t)&ltz, CAST_USER_ADDR_T(uap->tzp), sizeof(tz));
157 	}
158 
159 	if (error == 0 && uap->mach_absolute_time) {
160 		error = copyout(&mach_time, uap->mach_absolute_time, sizeof(mach_time));
161 	}
162 
163 	return error;
164 }
165 
166 /*
167  * XXX Y2038 bug because of setthetime() argument
168  */
169 /* ARGSUSED */
170 int
settimeofday(__unused struct proc * p,struct settimeofday_args * uap,__unused int32_t * retval)171 settimeofday(__unused struct proc *p, struct settimeofday_args  *uap, __unused int32_t *retval)
172 {
173 	struct timeval atv;
174 	struct timezone atz;
175 	int error;
176 
177 	bzero(&atv, sizeof(atv));
178 
179 	/* Check that this task is entitled to set the time or it is root */
180 	if (!IOCurrentTaskHasEntitlement(SETTIME_ENTITLEMENT)) {
181 #if CONFIG_MACF
182 		error = mac_system_check_settime(kauth_cred_get());
183 		if (error) {
184 			return error;
185 		}
186 #endif
187 #if defined(XNU_TARGET_OS_OSX)
188 		if ((error = suser(kauth_cred_get(), &p->p_acflag))) {
189 			return error;
190 		}
191 #endif
192 	}
193 
194 	/* Verify all parameters before changing time */
195 	if (uap->tv) {
196 		if (IS_64BIT_PROCESS(p)) {
197 			struct user64_timeval user_atv;
198 			error = copyin(uap->tv, &user_atv, sizeof(user_atv));
199 			atv.tv_sec = (__darwin_time_t)user_atv.tv_sec;
200 			atv.tv_usec = user_atv.tv_usec;
201 		} else {
202 			struct user32_timeval user_atv;
203 			error = copyin(uap->tv, &user_atv, sizeof(user_atv));
204 			atv.tv_sec = user_atv.tv_sec;
205 			atv.tv_usec = user_atv.tv_usec;
206 		}
207 		if (error) {
208 			return error;
209 		}
210 	}
211 	if (uap->tzp && (error = copyin(uap->tzp, (caddr_t)&atz, sizeof(atz)))) {
212 		return error;
213 	}
214 	if (uap->tv) {
215 		/* only positive values of sec/usec are accepted */
216 		if (atv.tv_sec < 0 || atv.tv_usec < 0) {
217 			return EPERM;
218 		}
219 		if (!timeval_fixusec(&atv)) {
220 			return EPERM;
221 		}
222 		setthetime(&atv);
223 	}
224 	if (uap->tzp) {
225 		lck_spin_lock(&tz_slock);
226 		tz = atz;
227 		lck_spin_unlock(&tz_slock);
228 	}
229 	return 0;
230 }
231 
232 static void
setthetime(struct timeval * tv)233 setthetime(
234 	struct timeval  *tv)
235 {
236 	clock_set_calendar_microtime(tv->tv_sec, tv->tv_usec);
237 }
238 
239 /*
240  *	Verify the calendar value.  If negative,
241  *	reset to zero (the epoch).
242  */
243 void
inittodr(__unused time_t base)244 inittodr(
245 	__unused time_t base)
246 {
247 	struct timeval  tv;
248 
249 	/*
250 	 * Assertion:
251 	 * The calendar has already been
252 	 * set up from the platform clock.
253 	 *
254 	 * The value returned by microtime()
255 	 * is gotten from the calendar.
256 	 */
257 	microtime(&tv);
258 
259 	if (tv.tv_sec < 0 || tv.tv_usec < 0) {
260 		printf("WARNING: preposterous time in Real Time Clock");
261 		tv.tv_sec = 0;          /* the UNIX epoch */
262 		tv.tv_usec = 0;
263 		setthetime(&tv);
264 		printf(" -- CHECK AND RESET THE DATE!\n");
265 	}
266 }
267 
268 time_t
boottime_sec(void)269 boottime_sec(void)
270 {
271 	clock_sec_t             secs;
272 	clock_nsec_t    nanosecs;
273 
274 	clock_get_boottime_nanotime(&secs, &nanosecs);
275 	return secs;
276 }
277 
278 void
boottime_timeval(struct timeval * tv)279 boottime_timeval(struct timeval *tv)
280 {
281 	clock_sec_t             secs;
282 	clock_usec_t    microsecs;
283 
284 	clock_get_boottime_microtime(&secs, &microsecs);
285 
286 	tv->tv_sec = secs;
287 	tv->tv_usec = microsecs;
288 }
289 
290 /*
291  * Get value of an interval timer.  The process virtual and
292  * profiling virtual time timers are kept internally in the
293  * way they are specified externally: in time until they expire.
294  *
295  * The real time interval timer expiration time (p_rtime)
296  * is kept as an absolute time rather than as a delta, so that
297  * it is easy to keep periodic real-time signals from drifting.
298  *
299  * The real time timer is processed by a callout routine.
300  * Since a callout may be delayed in real time due to
301  * other processing in the system, it is possible for the real
302  * time callout routine (realitexpire, given below), to be delayed
303  * in real time past when it is supposed to occur.  It does not
304  * suffice, therefore, to reload the real time .it_value from the
305  * real time .it_interval.  Rather, we compute the next time in
306  * absolute time when the timer should go off.
307  *
308  * Returns:	0			Success
309  *		EINVAL			Invalid argument
310  *	copyout:EFAULT			Bad address
311  */
312 /* ARGSUSED */
313 int
getitimer(struct proc * p,struct getitimer_args * uap,__unused int32_t * retval)314 getitimer(struct proc *p, struct getitimer_args *uap, __unused int32_t *retval)
315 {
316 	struct itimerval aitv;
317 
318 	if (uap->which > ITIMER_PROF) {
319 		return EINVAL;
320 	}
321 
322 	bzero(&aitv, sizeof(aitv));
323 
324 	proc_spinlock(p);
325 	switch (uap->which) {
326 	case ITIMER_REAL:
327 		/*
328 		 * If time for real time timer has passed return 0,
329 		 * else return difference between current time and
330 		 * time for the timer to go off.
331 		 */
332 		aitv = p->p_realtimer;
333 		if (timerisset(&p->p_rtime)) {
334 			struct timeval          now;
335 
336 			microuptime(&now);
337 			if (timercmp(&p->p_rtime, &now, <)) {
338 				timerclear(&aitv.it_value);
339 			} else {
340 				aitv.it_value = p->p_rtime;
341 				timevalsub(&aitv.it_value, &now);
342 			}
343 		} else {
344 			timerclear(&aitv.it_value);
345 		}
346 		break;
347 
348 	case ITIMER_VIRTUAL:
349 		aitv = p->p_vtimer_user;
350 		break;
351 
352 	case ITIMER_PROF:
353 		aitv = p->p_vtimer_prof;
354 		break;
355 	}
356 
357 	proc_spinunlock(p);
358 
359 	if (IS_64BIT_PROCESS(p)) {
360 		struct user64_itimerval user_itv;
361 		bzero(&user_itv, sizeof(user_itv));
362 		user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec;
363 		user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
364 		user_itv.it_value.tv_sec = aitv.it_value.tv_sec;
365 		user_itv.it_value.tv_usec = aitv.it_value.tv_usec;
366 		return copyout((caddr_t)&user_itv, uap->itv, sizeof(user_itv));
367 	} else {
368 		struct user32_itimerval user_itv;
369 		bzero(&user_itv, sizeof(user_itv));
370 		user_itv.it_interval.tv_sec = (user32_time_t)aitv.it_interval.tv_sec;
371 		user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
372 		user_itv.it_value.tv_sec = (user32_time_t)aitv.it_value.tv_sec;
373 		user_itv.it_value.tv_usec = aitv.it_value.tv_usec;
374 		return copyout((caddr_t)&user_itv, uap->itv, sizeof(user_itv));
375 	}
376 }
377 
378 /*
379  * Returns:	0			Success
380  *		EINVAL			Invalid argument
381  *	copyin:EFAULT			Bad address
382  *	getitimer:EINVAL		Invalid argument
383  *	getitimer:EFAULT		Bad address
384  */
385 /* ARGSUSED */
386 int
setitimer(struct proc * p,struct setitimer_args * uap,int32_t * retval)387 setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval)
388 {
389 	struct itimerval aitv;
390 	user_addr_t itvp;
391 	int error;
392 
393 	bzero(&aitv, sizeof(aitv));
394 
395 	if (uap->which > ITIMER_PROF) {
396 		return EINVAL;
397 	}
398 	if ((itvp = uap->itv)) {
399 		if (IS_64BIT_PROCESS(p)) {
400 			struct user64_itimerval user_itv;
401 			if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof(user_itv)))) {
402 				return error;
403 			}
404 			aitv.it_interval.tv_sec = (__darwin_time_t)user_itv.it_interval.tv_sec;
405 			aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec;
406 			aitv.it_value.tv_sec = (__darwin_time_t)user_itv.it_value.tv_sec;
407 			aitv.it_value.tv_usec = user_itv.it_value.tv_usec;
408 		} else {
409 			struct user32_itimerval user_itv;
410 			if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof(user_itv)))) {
411 				return error;
412 			}
413 			aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec;
414 			aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec;
415 			aitv.it_value.tv_sec = user_itv.it_value.tv_sec;
416 			aitv.it_value.tv_usec = user_itv.it_value.tv_usec;
417 		}
418 	}
419 	if ((uap->itv = uap->oitv) && (error = getitimer(p, (struct getitimer_args *)uap, retval))) {
420 		return error;
421 	}
422 	if (itvp == 0) {
423 		return 0;
424 	}
425 	if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) {
426 		return EINVAL;
427 	}
428 
429 	switch (uap->which) {
430 	case ITIMER_REAL:
431 		proc_spinlock(p);
432 		if (timerisset(&aitv.it_value)) {
433 			microuptime(&p->p_rtime);
434 			timevaladd(&p->p_rtime, &aitv.it_value);
435 			p->p_realtimer = aitv;
436 			if (!thread_call_enter_delayed_with_leeway(p->p_rcall, NULL,
437 			    tvtoabstime(&p->p_rtime), 0, THREAD_CALL_DELAY_USER_NORMAL)) {
438 				p->p_ractive++;
439 			}
440 		} else {
441 			timerclear(&p->p_rtime);
442 			p->p_realtimer = aitv;
443 			if (thread_call_cancel(p->p_rcall)) {
444 				p->p_ractive--;
445 			}
446 		}
447 		proc_spinunlock(p);
448 
449 		break;
450 
451 
452 	case ITIMER_VIRTUAL:
453 		if (timerisset(&aitv.it_value)) {
454 			task_vtimer_set(proc_task(p), TASK_VTIMER_USER);
455 		} else {
456 			task_vtimer_clear(proc_task(p), TASK_VTIMER_USER);
457 		}
458 
459 		proc_spinlock(p);
460 		p->p_vtimer_user = aitv;
461 		proc_spinunlock(p);
462 		break;
463 
464 	case ITIMER_PROF:
465 		if (timerisset(&aitv.it_value)) {
466 			task_vtimer_set(proc_task(p), TASK_VTIMER_PROF);
467 		} else {
468 			task_vtimer_clear(proc_task(p), TASK_VTIMER_PROF);
469 		}
470 
471 		proc_spinlock(p);
472 		p->p_vtimer_prof = aitv;
473 		proc_spinunlock(p);
474 		break;
475 	}
476 
477 	return 0;
478 }
479 
480 void
proc_inherit_itimers(struct proc * old_proc,struct proc * new_proc)481 proc_inherit_itimers(struct proc *old_proc, struct proc *new_proc)
482 {
483 	struct itimerval real_itv, vuser_itv, vprof_itv;
484 
485 	/* Snapshot the old timer values */
486 	proc_spinlock(old_proc);
487 	real_itv = old_proc->p_realtimer;
488 	vuser_itv = old_proc->p_vtimer_user;
489 	vprof_itv = old_proc->p_vtimer_prof;
490 	proc_spinunlock(old_proc);
491 
492 	if (timerisset(&vuser_itv.it_value)) {
493 		task_vtimer_set(proc_task(new_proc), TASK_VTIMER_USER);
494 	} else {
495 		task_vtimer_clear(proc_task(new_proc), TASK_VTIMER_USER);
496 	}
497 
498 	if (timerisset(&vprof_itv.it_value)) {
499 		task_vtimer_set(proc_task(new_proc), TASK_VTIMER_PROF);
500 	} else {
501 		task_vtimer_clear(proc_task(new_proc), TASK_VTIMER_PROF);
502 	}
503 
504 	/* Update the timer values on new proc */
505 	proc_spinlock(new_proc);
506 
507 	if (timerisset(&real_itv.it_value)) {
508 		microuptime(&new_proc->p_rtime);
509 		timevaladd(&new_proc->p_rtime, &real_itv.it_value);
510 		new_proc->p_realtimer = real_itv;
511 		if (!thread_call_enter_delayed_with_leeway(new_proc->p_rcall, NULL,
512 		    tvtoabstime(&new_proc->p_rtime), 0, THREAD_CALL_DELAY_USER_NORMAL)) {
513 			new_proc->p_ractive++;
514 		}
515 	} else {
516 		timerclear(&new_proc->p_rtime);
517 		new_proc->p_realtimer = real_itv;
518 	}
519 
520 	new_proc->p_vtimer_user = vuser_itv;
521 	new_proc->p_vtimer_prof = vprof_itv;
522 
523 	proc_spinunlock(new_proc);
524 }
525 
526 /*
527  * Real interval timer expired:
528  * send process whose timer expired an alarm signal.
529  * If time is not set up to reload, then just return.
530  * Else compute next time timer should go off which is > current time.
531  * This is where delay in processing this timeout causes multiple
532  * SIGALRM calls to be compressed into one.
533  */
534 void
realitexpire(struct proc * p,__unused void * p2)535 realitexpire(
536 	struct proc *p,
537 	__unused void *p2)
538 {
539 	struct proc *r;
540 	struct timeval t;
541 
542 	r = proc_find(proc_getpid(p));
543 
544 	proc_spinlock(p);
545 
546 	assert(p->p_ractive > 0);
547 
548 	if (--p->p_ractive > 0 || r != p) {
549 		/*
550 		 * bail, because either proc is exiting
551 		 * or there's another active thread call
552 		 */
553 		proc_spinunlock(p);
554 
555 		if (r != NULL) {
556 			proc_rele(r);
557 		}
558 		return;
559 	}
560 
561 	if (!timerisset(&p->p_realtimer.it_interval)) {
562 		/*
563 		 * p_realtimer was cleared while this call was pending,
564 		 * send one last SIGALRM, but don't re-arm
565 		 */
566 		timerclear(&p->p_rtime);
567 		proc_spinunlock(p);
568 
569 		psignal(p, SIGALRM);
570 		proc_rele(p);
571 		return;
572 	}
573 
574 	proc_spinunlock(p);
575 
576 	/*
577 	 * Send the signal before re-arming the next thread call,
578 	 * so in case psignal blocks, we won't create yet another thread call.
579 	 */
580 
581 	psignal(p, SIGALRM);
582 
583 	proc_spinlock(p);
584 
585 	/* Should we still re-arm the next thread call? */
586 	if (!timerisset(&p->p_realtimer.it_interval)) {
587 		timerclear(&p->p_rtime);
588 		proc_spinunlock(p);
589 
590 		proc_rele(p);
591 		return;
592 	}
593 
594 	microuptime(&t);
595 	timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
596 
597 	if (timercmp(&p->p_rtime, &t, <=)) {
598 		if ((p->p_rtime.tv_sec + 2) >= t.tv_sec) {
599 			for (;;) {
600 				timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
601 				if (timercmp(&p->p_rtime, &t, >)) {
602 					break;
603 				}
604 			}
605 		} else {
606 			p->p_rtime = p->p_realtimer.it_interval;
607 			timevaladd(&p->p_rtime, &t);
608 		}
609 	}
610 
611 	assert(p->p_rcall != NULL);
612 
613 	if (!thread_call_enter_delayed_with_leeway(p->p_rcall, NULL, tvtoabstime(&p->p_rtime), 0,
614 	    THREAD_CALL_DELAY_USER_NORMAL)) {
615 		p->p_ractive++;
616 	}
617 
618 	proc_spinunlock(p);
619 
620 	proc_rele(p);
621 }
622 
623 /*
624  * Called once in proc_exit to clean up after an armed or pending realitexpire
625  *
626  * This will only be called after the proc refcount is drained,
627  * so realitexpire cannot be currently holding a proc ref.
628  * i.e. it will/has gotten PROC_NULL from proc_find.
629  */
630 void
proc_free_realitimer(proc_t p)631 proc_free_realitimer(proc_t p)
632 {
633 	proc_spinlock(p);
634 
635 	assert(p->p_rcall != NULL);
636 	assert(proc_list_exited(p));
637 
638 	timerclear(&p->p_realtimer.it_interval);
639 
640 	if (thread_call_cancel(p->p_rcall)) {
641 		assert(p->p_ractive > 0);
642 		p->p_ractive--;
643 	}
644 
645 	while (p->p_ractive > 0) {
646 		proc_spinunlock(p);
647 
648 		delay(1);
649 
650 		proc_spinlock(p);
651 	}
652 
653 	thread_call_t call = p->p_rcall;
654 	p->p_rcall = NULL;
655 
656 	proc_spinunlock(p);
657 
658 	thread_call_free(call);
659 }
660 
661 /*
662  * Check that a proposed value to load into the .it_value or
663  * .it_interval part of an interval timer is acceptable.
664  */
665 int
itimerfix(struct timeval * tv)666 itimerfix(
667 	struct timeval *tv)
668 {
669 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
670 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
671 		return EINVAL;
672 	}
673 	return 0;
674 }
675 
676 int
timespec_is_valid(const struct timespec * ts)677 timespec_is_valid(const struct timespec *ts)
678 {
679 	/* The INT32_MAX limit ensures the timespec is safe for clock_*() functions
680 	 * which accept 32-bit ints. */
681 	if (ts->tv_sec < 0 || ts->tv_sec > INT32_MAX ||
682 	    ts->tv_nsec < 0 || (unsigned long long)ts->tv_nsec > NSEC_PER_SEC) {
683 		return 0;
684 	}
685 	return 1;
686 }
687 
688 /*
689  * Decrement an interval timer by a specified number
690  * of microseconds, which must be less than a second,
691  * i.e. < 1000000.  If the timer expires, then reload
692  * it.  In this case, carry over (usec - old value) to
693  * reduce the value reloaded into the timer so that
694  * the timer does not drift.  This routine assumes
695  * that it is called in a context where the timers
696  * on which it is operating cannot change in value.
697  */
698 int
itimerdecr(proc_t p,struct itimerval * itp,int usec)699 itimerdecr(proc_t p,
700     struct itimerval *itp, int usec)
701 {
702 	proc_spinlock(p);
703 
704 	if (itp->it_value.tv_usec < usec) {
705 		if (itp->it_value.tv_sec == 0) {
706 			/* expired, and already in next interval */
707 			usec -= itp->it_value.tv_usec;
708 			goto expire;
709 		}
710 		itp->it_value.tv_usec += 1000000;
711 		itp->it_value.tv_sec--;
712 	}
713 	itp->it_value.tv_usec -= usec;
714 	usec = 0;
715 	if (timerisset(&itp->it_value)) {
716 		proc_spinunlock(p);
717 		return 1;
718 	}
719 	/* expired, exactly at end of interval */
720 expire:
721 	if (timerisset(&itp->it_interval)) {
722 		itp->it_value = itp->it_interval;
723 		if (itp->it_value.tv_sec > 0) {
724 			itp->it_value.tv_usec -= usec;
725 			if (itp->it_value.tv_usec < 0) {
726 				itp->it_value.tv_usec += 1000000;
727 				itp->it_value.tv_sec--;
728 			}
729 		}
730 	} else {
731 		itp->it_value.tv_usec = 0;              /* sec is already 0 */
732 	}
733 	proc_spinunlock(p);
734 	return 0;
735 }
736 
737 /*
738  * Add and subtract routines for timevals.
739  * N.B.: subtract routine doesn't deal with
740  * results which are before the beginning,
741  * it just gets very confused in this case.
742  * Caveat emptor.
743  */
744 void
timevaladd(struct timeval * t1,struct timeval * t2)745 timevaladd(
746 	struct timeval *t1,
747 	struct timeval *t2)
748 {
749 	t1->tv_sec += t2->tv_sec;
750 	t1->tv_usec += t2->tv_usec;
751 	timevalfix(t1);
752 }
753 void
timevalsub(struct timeval * t1,struct timeval * t2)754 timevalsub(
755 	struct timeval *t1,
756 	struct timeval *t2)
757 {
758 	t1->tv_sec -= t2->tv_sec;
759 	t1->tv_usec -= t2->tv_usec;
760 	timevalfix(t1);
761 }
762 void
timevalfix(struct timeval * t1)763 timevalfix(
764 	struct timeval *t1)
765 {
766 	if (t1->tv_usec < 0) {
767 		t1->tv_sec--;
768 		t1->tv_usec += 1000000;
769 	}
770 	if (t1->tv_usec >= 1000000) {
771 		t1->tv_sec++;
772 		t1->tv_usec -= 1000000;
773 	}
774 }
775 
776 static boolean_t
timeval_fixusec(struct timeval * t1)777 timeval_fixusec(
778 	struct timeval *t1)
779 {
780 	assert(t1->tv_usec >= 0);
781 	assert(t1->tv_sec >= 0);
782 
783 	if (t1->tv_usec >= 1000000) {
784 		if (os_add_overflow(t1->tv_sec, t1->tv_usec / 1000000, &t1->tv_sec)) {
785 			return FALSE;
786 		}
787 		t1->tv_usec = t1->tv_usec % 1000000;
788 	}
789 
790 	return TRUE;
791 }
792 
793 /*
794  * Return the best possible estimate of the time in the timeval
795  * to which tvp points.
796  */
797 void
microtime(struct timeval * tvp)798 microtime(
799 	struct timeval  *tvp)
800 {
801 	clock_sec_t             tv_sec;
802 	clock_usec_t    tv_usec;
803 
804 	clock_get_calendar_microtime(&tv_sec, &tv_usec);
805 
806 	tvp->tv_sec = tv_sec;
807 	tvp->tv_usec = tv_usec;
808 }
809 
810 void
microtime_with_abstime(struct timeval * tvp,uint64_t * abstime)811 microtime_with_abstime(
812 	struct timeval  *tvp, uint64_t *abstime)
813 {
814 	clock_sec_t             tv_sec;
815 	clock_usec_t    tv_usec;
816 
817 	clock_get_calendar_absolute_and_microtime(&tv_sec, &tv_usec, abstime);
818 
819 	tvp->tv_sec = tv_sec;
820 	tvp->tv_usec = tv_usec;
821 }
822 
823 void
microuptime(struct timeval * tvp)824 microuptime(
825 	struct timeval  *tvp)
826 {
827 	clock_sec_t             tv_sec;
828 	clock_usec_t    tv_usec;
829 
830 	clock_get_system_microtime(&tv_sec, &tv_usec);
831 
832 	tvp->tv_sec = tv_sec;
833 	tvp->tv_usec = tv_usec;
834 }
835 
836 /*
837  * Ditto for timespec.
838  */
839 void
nanotime(struct timespec * tsp)840 nanotime(
841 	struct timespec *tsp)
842 {
843 	clock_sec_t             tv_sec;
844 	clock_nsec_t    tv_nsec;
845 
846 	clock_get_calendar_nanotime(&tv_sec, &tv_nsec);
847 
848 	tsp->tv_sec = tv_sec;
849 	tsp->tv_nsec = tv_nsec;
850 }
851 
852 void
nanouptime(struct timespec * tsp)853 nanouptime(
854 	struct timespec *tsp)
855 {
856 	clock_sec_t             tv_sec;
857 	clock_nsec_t    tv_nsec;
858 
859 	clock_get_system_nanotime(&tv_sec, &tv_nsec);
860 
861 	tsp->tv_sec = tv_sec;
862 	tsp->tv_nsec = tv_nsec;
863 }
864 
865 uint64_t
tvtoabstime(struct timeval * tvp)866 tvtoabstime(
867 	struct timeval  *tvp)
868 {
869 	uint64_t        result, usresult;
870 
871 	clock_interval_to_absolutetime_interval(
872 		(uint32_t)tvp->tv_sec, NSEC_PER_SEC, &result);
873 	clock_interval_to_absolutetime_interval(
874 		tvp->tv_usec, NSEC_PER_USEC, &usresult);
875 
876 	return result + usresult;
877 }
878 
879 uint64_t
tstoabstime(struct timespec * ts)880 tstoabstime(struct timespec *ts)
881 {
882 	uint64_t abstime_s, abstime_ns;
883 	clock_interval_to_absolutetime_interval((uint32_t)ts->tv_sec, NSEC_PER_SEC, &abstime_s);
884 	clock_interval_to_absolutetime_interval((uint32_t)ts->tv_nsec, 1, &abstime_ns);
885 	return abstime_s + abstime_ns;
886 }
887 
888 #if NETWORKING
889 /*
890  * ratecheck(): simple time-based rate-limit checking.
891  */
892 int
ratecheck(struct timeval * lasttime,const struct timeval * mininterval)893 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
894 {
895 	struct timeval tv, delta;
896 	int rv = 0;
897 
898 	net_uptime2timeval(&tv);
899 	delta = tv;
900 	timevalsub(&delta, lasttime);
901 
902 	/*
903 	 * check for 0,0 is so that the message will be seen at least once,
904 	 * even if interval is huge.
905 	 */
906 	if (timevalcmp(&delta, mininterval, >=) ||
907 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
908 		*lasttime = tv;
909 		rv = 1;
910 	}
911 
912 	return rv;
913 }
914 
915 /*
916  * ppsratecheck(): packets (or events) per second limitation.
917  */
918 int
ppsratecheck(struct timeval * lasttime,int * curpps,int maxpps)919 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
920 {
921 	struct timeval tv, delta;
922 	int rv;
923 
924 	net_uptime2timeval(&tv);
925 
926 	timersub(&tv, lasttime, &delta);
927 
928 	/*
929 	 * Check for 0,0 so that the message will be seen at least once.
930 	 * If more than one second has passed since the last update of
931 	 * lasttime, reset the counter.
932 	 *
933 	 * we do increment *curpps even in *curpps < maxpps case, as some may
934 	 * try to use *curpps for stat purposes as well.
935 	 */
936 	if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
937 	    delta.tv_sec >= 1) {
938 		*lasttime = tv;
939 		*curpps = 0;
940 		rv = 1;
941 	} else if (maxpps < 0) {
942 		rv = 1;
943 	} else if (*curpps < maxpps) {
944 		rv = 1;
945 	} else {
946 		rv = 0;
947 	}
948 
949 	/* be careful about wrap-around */
950 	if (*curpps < INT_MAX) {
951 		*curpps = *curpps + 1;
952 	}
953 
954 	return rv;
955 }
956 #endif /* NETWORKING */
957 
958 int
__mach_bridge_remote_time(__unused struct proc * p,struct __mach_bridge_remote_time_args * mbrt_args,uint64_t * retval)959 __mach_bridge_remote_time(__unused struct proc *p, struct __mach_bridge_remote_time_args *mbrt_args, uint64_t *retval)
960 {
961 	*retval = mach_bridge_remote_time(mbrt_args->local_timestamp);
962 	return 0;
963 }
964