xref: /xnu-8792.61.2/bsd/kern/kern_synch.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Mach Operating System
30  * Copyright (c) 1987 Carnegie-Mellon University
31  * All rights reserved.  The CMU software License Agreement specifies
32  * the terms and conditions for use and redistribution.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc_internal.h>
38 #include <sys/user.h>
39 #include <sys/file_internal.h>
40 #include <sys/vnode.h>
41 #include <sys/kernel.h>
42 
43 #include <kern/queue.h>
44 #include <sys/lock.h>
45 #include <kern/thread.h>
46 #include <kern/sched_prim.h>
47 #include <kern/ast.h>
48 
49 #include <kern/cpu_number.h>
50 #include <vm/vm_kern.h>
51 
52 #include <kern/task.h>
53 #include <mach/time_value.h>
54 #include <kern/locks.h>
55 #include <kern/policy_internal.h>
56 
57 #include <sys/systm.h>                  /* for unix_syscall_return() */
58 #include <libkern/OSAtomic.h>
59 
60 extern void compute_averunnable(void *);        /* XXX */
61 
62 __attribute__((noreturn))
63 static void
_sleep_continue(__unused void * parameter,wait_result_t wresult)64 _sleep_continue( __unused void *parameter, wait_result_t wresult)
65 {
66 	struct proc *p = current_proc();
67 	thread_t self  = current_thread();
68 	struct uthread * ut;
69 	int sig, catch;
70 	int error = 0;
71 	int dropmutex, spinmutex;
72 
73 	ut = get_bsdthread_info(self);
74 	catch     = ut->uu_pri & PCATCH;
75 	dropmutex = ut->uu_pri & PDROP;
76 	spinmutex = ut->uu_pri & PSPIN;
77 
78 	switch (wresult) {
79 	case THREAD_TIMED_OUT:
80 		error = EWOULDBLOCK;
81 		break;
82 	case THREAD_AWAKENED:
83 		/*
84 		 * Posix implies any signal should be delivered
85 		 * first, regardless of whether awakened due
86 		 * to receiving event.
87 		 */
88 		if (!catch) {
89 			break;
90 		}
91 		OS_FALLTHROUGH;
92 	case THREAD_INTERRUPTED:
93 		if (catch) {
94 			if (thread_should_abort(self)) {
95 				error = EINTR;
96 			} else if (SHOULDissignal(p, ut)) {
97 				if ((sig = CURSIG(p)) != 0) {
98 					if (p->p_sigacts.ps_sigintr & sigmask(sig)) {
99 						error = EINTR;
100 					} else {
101 						error = ERESTART;
102 					}
103 				}
104 				if (thread_should_abort(self)) {
105 					error = EINTR;
106 				}
107 			} else if ((ut->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
108 				/* due to thread cancel */
109 				error = EINTR;
110 			}
111 		} else {
112 			error = EINTR;
113 		}
114 		break;
115 	}
116 
117 	if (error == EINTR || error == ERESTART) {
118 		act_set_astbsd(self);
119 	}
120 
121 	if (ut->uu_mtx && !dropmutex) {
122 		if (spinmutex) {
123 			lck_mtx_lock_spin(ut->uu_mtx);
124 		} else {
125 			lck_mtx_lock(ut->uu_mtx);
126 		}
127 	}
128 	ut->uu_wchan = NULL;
129 	ut->uu_wmesg = NULL;
130 
131 	unix_syscall_return((*ut->uu_continuation)(error));
132 }
133 
134 /*
135  * Give up the processor till a wakeup occurs
136  * on chan, at which time the process
137  * enters the scheduling queue at priority pri.
138  * The most important effect of pri is that when
139  * pri<=PZERO a signal cannot disturb the sleep;
140  * if pri>PZERO signals will be processed.
141  * If pri&PCATCH is set, signals will cause sleep
142  * to return 1, rather than longjmp.
143  * Callers of this routine must be prepared for
144  * premature return, and check that the reason for
145  * sleeping has gone away.
146  *
147  * if msleep was the entry point, than we have a mutex to deal with
148  *
149  * The mutex is unlocked before the caller is blocked, and
150  * relocked before msleep returns unless the priority includes the PDROP
151  * flag... if PDROP is specified, _sleep returns with the mutex unlocked
152  * regardless of whether it actually blocked or not.
153  */
154 
155 static int
_sleep(caddr_t chan,int pri,const char * wmsg,u_int64_t abstime,int (* continuation)(int),lck_mtx_t * mtx)156 _sleep(
157 	caddr_t         chan,
158 	int             pri,
159 	const char      *wmsg,
160 	u_int64_t       abstime,
161 	int             (*continuation)(int),
162 	lck_mtx_t       *mtx)
163 {
164 	struct proc *p;
165 	thread_t self = current_thread();
166 	struct uthread * ut;
167 	int sig, catch;
168 	int dropmutex  = pri & PDROP;
169 	int spinmutex  = pri & PSPIN;
170 	int wait_result;
171 	int error = 0;
172 
173 	ut = get_bsdthread_info(self);
174 
175 	p = current_proc();
176 	p->p_priority = pri & PRIMASK;
177 	/* It can still block in proc_exit() after the teardown. */
178 	if (p->p_stats != NULL) {
179 		OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nvcsw);
180 	}
181 
182 	if (pri & PCATCH) {
183 		catch = THREAD_ABORTSAFE;
184 	} else {
185 		catch = THREAD_UNINT;
186 	}
187 
188 	/* set wait message & channel */
189 	ut->uu_wchan = chan;
190 	ut->uu_wmesg = wmsg ? wmsg : "unknown";
191 
192 	if (mtx != NULL && chan != NULL && continuation == NULL) {
193 		int     flags;
194 
195 		if (dropmutex) {
196 			flags = LCK_SLEEP_UNLOCK;
197 		} else {
198 			flags = LCK_SLEEP_DEFAULT;
199 		}
200 
201 		if (spinmutex) {
202 			flags |= LCK_SLEEP_SPIN;
203 		}
204 
205 		if (abstime) {
206 			wait_result = lck_mtx_sleep_deadline(mtx, flags, chan, catch, abstime);
207 		} else {
208 			wait_result = lck_mtx_sleep(mtx, flags, chan, catch);
209 		}
210 	} else {
211 		if (chan != NULL) {
212 			assert_wait_deadline(chan, catch, abstime);
213 		}
214 		if (mtx) {
215 			lck_mtx_unlock(mtx);
216 		}
217 
218 		if (catch == THREAD_ABORTSAFE) {
219 			if (SHOULDissignal(p, ut)) {
220 				if ((sig = CURSIG(p)) != 0) {
221 					if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) {
222 						goto block;
223 					}
224 					if (p->p_sigacts.ps_sigintr & sigmask(sig)) {
225 						error = EINTR;
226 					} else {
227 						error = ERESTART;
228 					}
229 					if (mtx && !dropmutex) {
230 						if (spinmutex) {
231 							lck_mtx_lock_spin(mtx);
232 						} else {
233 							lck_mtx_lock(mtx);
234 						}
235 					}
236 					goto out;
237 				}
238 			}
239 			if (thread_should_abort(self)) {
240 				if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) {
241 					goto block;
242 				}
243 				error = EINTR;
244 
245 				if (mtx && !dropmutex) {
246 					if (spinmutex) {
247 						lck_mtx_lock_spin(mtx);
248 					} else {
249 						lck_mtx_lock(mtx);
250 					}
251 				}
252 				goto out;
253 			}
254 		}
255 
256 
257 block:
258 		if (continuation != NULL) {
259 			ut->uu_continuation = continuation;
260 			ut->uu_pri  = (uint16_t)pri;
261 			ut->uu_mtx  = mtx;
262 			(void) thread_block(_sleep_continue);
263 			/* NOTREACHED */
264 		}
265 
266 		wait_result = thread_block(THREAD_CONTINUE_NULL);
267 
268 		if (mtx && !dropmutex) {
269 			if (spinmutex) {
270 				lck_mtx_lock_spin(mtx);
271 			} else {
272 				lck_mtx_lock(mtx);
273 			}
274 		}
275 	}
276 
277 	switch (wait_result) {
278 	case THREAD_TIMED_OUT:
279 		error = EWOULDBLOCK;
280 		break;
281 	case THREAD_AWAKENED:
282 	case THREAD_RESTART:
283 		/*
284 		 * Posix implies any signal should be delivered
285 		 * first, regardless of whether awakened due
286 		 * to receiving event.
287 		 */
288 		if (catch != THREAD_ABORTSAFE) {
289 			break;
290 		}
291 		OS_FALLTHROUGH;
292 	case THREAD_INTERRUPTED:
293 		if (catch == THREAD_ABORTSAFE) {
294 			if (thread_should_abort(self)) {
295 				error = EINTR;
296 			} else if (SHOULDissignal(p, ut)) {
297 				if ((sig = CURSIG(p)) != 0) {
298 					if (p->p_sigacts.ps_sigintr & sigmask(sig)) {
299 						error = EINTR;
300 					} else {
301 						error = ERESTART;
302 					}
303 				}
304 				if (thread_should_abort(self)) {
305 					error = EINTR;
306 				}
307 			} else if ((ut->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
308 				/* due to thread cancel */
309 				error = EINTR;
310 			}
311 		} else {
312 			error = EINTR;
313 		}
314 		break;
315 	}
316 out:
317 	if (error == EINTR || error == ERESTART) {
318 		act_set_astbsd(self);
319 	}
320 	ut->uu_wchan = NULL;
321 	ut->uu_wmesg = NULL;
322 
323 	return error;
324 }
325 
326 int
sleep(void * chan,int pri)327 sleep(
328 	void    *chan,
329 	int             pri)
330 {
331 	return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0);
332 }
333 
334 int
msleep0(void * chan,lck_mtx_t * mtx,int pri,const char * wmsg,int timo,int (* continuation)(int))335 msleep0(
336 	void            *chan,
337 	lck_mtx_t       *mtx,
338 	int             pri,
339 	const char      *wmsg,
340 	int             timo,
341 	int             (*continuation)(int))
342 {
343 	u_int64_t       abstime = 0;
344 
345 	if (timo) {
346 		clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
347 	}
348 
349 	return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx);
350 }
351 
352 int
msleep(void * chan,lck_mtx_t * mtx,int pri,const char * wmsg,struct timespec * ts)353 msleep(
354 	void            *chan,
355 	lck_mtx_t       *mtx,
356 	int             pri,
357 	const char      *wmsg,
358 	struct timespec         *ts)
359 {
360 	u_int64_t       abstime = 0;
361 
362 	if (ts && (ts->tv_sec || ts->tv_nsec)) {
363 		nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime );
364 		clock_absolutetime_interval_to_deadline( abstime, &abstime );
365 	}
366 
367 	return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
368 }
369 
370 int
msleep1(void * chan,lck_mtx_t * mtx,int pri,const char * wmsg,u_int64_t abstime)371 msleep1(
372 	void            *chan,
373 	lck_mtx_t       *mtx,
374 	int             pri,
375 	const char      *wmsg,
376 	u_int64_t       abstime)
377 {
378 	return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
379 }
380 
381 int
tsleep(void * chan,int pri,const char * wmsg,int timo)382 tsleep(
383 	void            *chan,
384 	int             pri,
385 	const char      *wmsg,
386 	int             timo)
387 {
388 	u_int64_t       abstime = 0;
389 
390 	if (timo) {
391 		clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
392 	}
393 	return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0);
394 }
395 
396 int
tsleep0(void * chan,int pri,const char * wmsg,int timo,int (* continuation)(int))397 tsleep0(
398 	void            *chan,
399 	int             pri,
400 	const char      *wmsg,
401 	int             timo,
402 	int             (*continuation)(int))
403 {
404 	u_int64_t       abstime = 0;
405 
406 	if (timo) {
407 		clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
408 	}
409 	return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
410 }
411 
412 int
tsleep1(void * chan,int pri,const char * wmsg,u_int64_t abstime,int (* continuation)(int))413 tsleep1(
414 	void            *chan,
415 	int             pri,
416 	const char      *wmsg,
417 	u_int64_t       abstime,
418 	int             (*continuation)(int))
419 {
420 	return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
421 }
422 
423 /*
424  * Wake up all processes sleeping on chan.
425  */
426 void
wakeup(void * chan)427 wakeup(void *chan)
428 {
429 	thread_wakeup((caddr_t)chan);
430 }
431 
432 /*
433  * Wake up the first process sleeping on chan.
434  *
435  * Be very sure that the first process is really
436  * the right one to wakeup.
437  */
438 void
wakeup_one(caddr_t chan)439 wakeup_one(caddr_t chan)
440 {
441 	thread_wakeup_one((caddr_t)chan);
442 }
443 
444 /*
445  * Compute the priority of a process when running in user mode.
446  * Arrange to reschedule if the resulting priority is better
447  * than that of the current process.
448  */
449 void
resetpriority(struct proc * p)450 resetpriority(struct proc *p)
451 {
452 	(void)task_importance(proc_task(p), -p->p_nice);
453 }
454 
455 struct loadavg averunnable =
456 { {0, 0, 0}, FSCALE };                  /* load average, of runnable procs */
457 /*
458  * Constants for averages over 1, 5, and 15 minutes
459  * when sampling at 5 second intervals.
460  */
461 static fixpt_t cexp[3] = {
462 	(fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */
463 	(fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */
464 	(fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */
465 };
466 
467 void
compute_averunnable(void * arg)468 compute_averunnable(void *arg)
469 {
470 	unsigned int            nrun = *(unsigned int *)arg;
471 	struct loadavg          *avg = &averunnable;
472 	int             i;
473 
474 	for (i = 0; i < 3; i++) {
475 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
476 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
477 	}
478 }
479