xref: /xnu-8792.61.2/osfmk/kern/sync_sema.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  *
31  */
32 /*
33  *	File:	kern/sync_sema.c
34  *	Author:	Joseph CaraDonna
35  *
36  *	Contains RT distributed semaphore synchronization services.
37  */
38 
39 #include <mach/mach_types.h>
40 #include <mach/mach_traps.h>
41 #include <mach/kern_return.h>
42 #include <mach/semaphore.h>
43 #include <mach/sync_policy.h>
44 #include <mach/task.h>
45 
46 #include <kern/misc_protos.h>
47 #include <kern/sync_sema.h>
48 #include <kern/spl.h>
49 #include <kern/ipc_kobject.h>
50 #include <kern/ipc_tt.h>
51 #include <kern/thread.h>
52 #include <kern/clock.h>
53 #include <ipc/ipc_port.h>
54 #include <ipc/ipc_space.h>
55 #include <kern/host.h>
56 #include <kern/waitq.h>
57 #include <kern/zalloc.h>
58 #include <kern/mach_param.h>
59 
60 static const uint8_t semaphore_event;
61 #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event)
62 
63 ZONE_DEFINE_ID(ZONE_ID_SEMAPHORE, "semaphores", struct semaphore,
64     ZC_ZFREE_CLEARMEM);
65 
66 os_refgrp_decl(static, sema_refgrp, "semaphore", NULL);
67 
68 /* Forward declarations */
69 
70 static inline bool
semaphore_active(semaphore_t semaphore)71 semaphore_active(semaphore_t semaphore)
72 {
73 	return semaphore->owner != TASK_NULL;
74 }
75 
76 static __inline__ uint64_t
semaphore_deadline(unsigned int sec,clock_res_t nsec)77 semaphore_deadline(
78 	unsigned int            sec,
79 	clock_res_t             nsec)
80 {
81 	uint64_t abstime;
82 
83 	nanotime_to_absolutetime(sec, nsec, &abstime);
84 	clock_absolutetime_interval_to_deadline(abstime, &abstime);
85 
86 	return abstime;
87 }
88 
89 /*
90  *	Routine:	semaphore_create
91  *
92  *	Creates a semaphore.
93  *	The port representing the semaphore is returned as a parameter.
94  */
95 kern_return_t
semaphore_create(task_t task,semaphore_t * new_semaphore,int policy,int value)96 semaphore_create(
97 	task_t                  task,
98 	semaphore_t             *new_semaphore,
99 	int                     policy,
100 	int                     value)
101 {
102 	semaphore_t s = SEMAPHORE_NULL;
103 
104 	*new_semaphore = SEMAPHORE_NULL;
105 	if (task == TASK_NULL || value < 0 || (policy & ~SYNC_POLICY_USER_MASK)) {
106 		return KERN_INVALID_ARGUMENT;
107 	}
108 
109 	s = zalloc_id(ZONE_ID_SEMAPHORE, Z_ZERO | Z_WAITOK | Z_NOFAIL);
110 
111 	/*
112 	 *  Associate the new semaphore with the task by adding
113 	 *  the new semaphore to the task's semaphore list.
114 	 */
115 	task_lock(task);
116 	/* Check for race with task_terminate */
117 	if (!task->active) {
118 		task_unlock(task);
119 		zfree_id(ZONE_ID_SEMAPHORE, s);
120 		return KERN_INVALID_TASK;
121 	}
122 
123 	waitq_init(&s->waitq, WQT_QUEUE, policy | SYNC_POLICY_INIT_LOCKED);
124 
125 	/* init everything under both the task and semaphore locks */
126 	os_ref_init_raw(&s->ref_count, &sema_refgrp);
127 	s->count = value;
128 	s->owner = task;
129 	enqueue_head(&task->semaphore_list, &s->task_link);
130 	task->semaphores_owned++;
131 
132 	semaphore_unlock(s);
133 
134 	task_unlock(task);
135 
136 	*new_semaphore = s;
137 
138 	return KERN_SUCCESS;
139 }
140 
141 /*
142  *	Routine:	semaphore_destroy_internal
143  *
144  *	Disassociate a semaphore from its owning task, mark it inactive,
145  *	and set any waiting threads running with THREAD_RESTART.
146  *
147  *	Conditions:
148  *			task is locked
149  *			semaphore is owned by the specified task
150  *			disabling interrupts (splsched) is the responsibility of the caller.
151  *	Returns:
152  *			with semaphore unlocked
153  */
154 static void
semaphore_destroy_internal(task_t task,semaphore_t semaphore,bool semaphore_locked)155 semaphore_destroy_internal(
156 	task_t                  task,
157 	semaphore_t             semaphore,
158 	bool                    semaphore_locked)
159 {
160 	int old_count;
161 
162 	/* unlink semaphore from owning task */
163 	assert(semaphore->owner == task);
164 	remqueue(&semaphore->task_link);
165 	task->semaphores_owned--;
166 
167 	/*
168 	 * deactivate semaphore under both locks
169 	 * and then wake up all waiters.
170 	 */
171 	if (!semaphore_locked) {
172 		semaphore_lock(semaphore);
173 	}
174 
175 	semaphore->owner = TASK_NULL;
176 	old_count = semaphore->count;
177 	semaphore->count = 0;
178 
179 	if (old_count < 0) {
180 		waitq_wakeup64_all_locked(&semaphore->waitq,
181 		    SEMAPHORE_EVENT, THREAD_RESTART, WAITQ_UNLOCK);
182 		/* waitq/semaphore is unlocked */
183 	} else {
184 		assert(circle_queue_empty(&semaphore->waitq.waitq_queue));
185 		semaphore_unlock(semaphore);
186 	}
187 }
188 
189 /*
190  *	Routine:	semaphore_free
191  *
192  *	Free a semaphore that hit a 0 refcount.
193  *
194  *	Conditions:
195  *			Nothing is locked.
196  */
197 __attribute__((noinline))
198 static void
semaphore_free(semaphore_t semaphore)199 semaphore_free(
200 	semaphore_t             semaphore)
201 {
202 	ipc_port_t port;
203 	task_t task;
204 
205 	/*
206 	 * Last ref, clean up the port [if any]
207 	 * associated with the semaphore, destroy
208 	 * it (if still active) and then free
209 	 * the semaphore.
210 	 */
211 	port = semaphore->port;
212 	if (IP_VALID(port)) {
213 		assert(!port->ip_srights);
214 		ipc_kobject_dealloc_port(port, 0, IKOT_SEMAPHORE);
215 	}
216 
217 	/*
218 	 * If the semaphore owned by the current task,
219 	 * we know the current task can't go away,
220 	 * so we can take locks in the right order.
221 	 *
222 	 * Else we try to take locks in the "wrong" order
223 	 * but if we fail to, we take a task ref and do it "right".
224 	 */
225 	task = current_task();
226 	if (semaphore->owner == task) {
227 		task_lock(task);
228 		if (semaphore->owner == task) {
229 			spl_t s = splsched();
230 			semaphore_destroy_internal(task, semaphore, false);
231 			splx(s);
232 		} else {
233 			assert(semaphore->owner == TASK_NULL);
234 		}
235 		task_unlock(task);
236 	} else {
237 		spl_t s = splsched();
238 
239 		semaphore_lock(semaphore);
240 
241 		task = semaphore->owner;
242 		if (task == TASK_NULL) {
243 			semaphore_unlock(semaphore);
244 			splx(s);
245 		} else if (task_lock_try(task)) {
246 			semaphore_destroy_internal(task, semaphore, true);
247 			splx(s);
248 			/* semaphore unlocked */
249 			task_unlock(task);
250 		} else {
251 			task_reference(task);
252 			semaphore_unlock(semaphore);
253 			splx(s);
254 
255 			task_lock(task);
256 			if (semaphore->owner == task) {
257 				s = splsched();
258 				semaphore_destroy_internal(task, semaphore, false);
259 				splx(s);
260 			}
261 			task_unlock(task);
262 
263 			task_deallocate(task);
264 		}
265 	}
266 
267 	waitq_deinit(&semaphore->waitq);
268 	zfree_id(ZONE_ID_SEMAPHORE, semaphore);
269 }
270 
271 /*
272  *	Routine:	semaphore_destroy
273  *
274  *	Destroys a semaphore and consume the caller's reference on the
275  *	semaphore.
276  */
277 kern_return_t
semaphore_destroy(task_t task,semaphore_t semaphore)278 semaphore_destroy(
279 	task_t                  task,
280 	semaphore_t             semaphore)
281 {
282 	if (semaphore == SEMAPHORE_NULL) {
283 		return KERN_INVALID_ARGUMENT;
284 	}
285 
286 	if (task == TASK_NULL) {
287 		semaphore_dereference(semaphore);
288 		return KERN_INVALID_ARGUMENT;
289 	}
290 
291 	if (semaphore->owner == task) {
292 		task_lock(task);
293 		if (semaphore->owner == task) {
294 			spl_t spl_level = splsched();
295 			semaphore_destroy_internal(task, semaphore, false);
296 			splx(spl_level);
297 		}
298 		task_unlock(task);
299 	}
300 
301 	semaphore_dereference(semaphore);
302 	return KERN_SUCCESS;
303 }
304 
305 /*
306  *	Routine:	semaphore_destroy_all
307  *
308  *	Destroy all the semaphores associated with a given task.
309  */
310 #define SEMASPERSPL 20  /* max number of semaphores to destroy per spl hold */
311 
312 void
semaphore_destroy_all(task_t task)313 semaphore_destroy_all(
314 	task_t                  task)
315 {
316 	semaphore_t semaphore;
317 	uint32_t count;
318 	spl_t spl_level;
319 
320 	count = 0;
321 	task_lock(task);
322 
323 	qe_foreach_element_safe(semaphore, &task->semaphore_list, task_link) {
324 		if (count == 0) {
325 			spl_level = splsched();
326 		}
327 
328 		semaphore_destroy_internal(task, semaphore, false);
329 
330 		/* throttle number of semaphores per interrupt disablement */
331 		if (++count == SEMASPERSPL) {
332 			count = 0;
333 			splx(spl_level);
334 		}
335 	}
336 	if (count != 0) {
337 		splx(spl_level);
338 	}
339 
340 	task_unlock(task);
341 }
342 
343 /*
344  *	Routine:	semaphore_signal_internal
345  *
346  *		Signals the semaphore as direct.
347  *	Assumptions:
348  *		Semaphore is locked.
349  */
350 static kern_return_t
semaphore_signal_internal(semaphore_t semaphore,thread_t thread,int options)351 semaphore_signal_internal(
352 	semaphore_t             semaphore,
353 	thread_t                thread,
354 	int                     options)
355 {
356 	kern_return_t kr;
357 	spl_t  spl_level;
358 
359 	spl_level = splsched();
360 	semaphore_lock(semaphore);
361 
362 	if (!semaphore_active(semaphore)) {
363 		semaphore_unlock(semaphore);
364 		splx(spl_level);
365 		return KERN_TERMINATED;
366 	}
367 
368 	if (thread != THREAD_NULL) {
369 		if (semaphore->count < 0) {
370 			kr = waitq_wakeup64_thread_and_unlock(
371 				&semaphore->waitq, SEMAPHORE_EVENT,
372 				thread, THREAD_AWAKENED);
373 			/* waitq/semaphore is unlocked */
374 		} else {
375 			kr = KERN_NOT_WAITING;
376 			semaphore_unlock(semaphore);
377 		}
378 		splx(spl_level);
379 		return kr;
380 	}
381 
382 	if (options & SEMAPHORE_SIGNAL_ALL) {
383 		int old_count = semaphore->count;
384 
385 		kr = KERN_NOT_WAITING;
386 		if (old_count < 0) {
387 			semaphore->count = 0;  /* always reset */
388 			kr = waitq_wakeup64_all_locked(&semaphore->waitq,
389 			    SEMAPHORE_EVENT, THREAD_AWAKENED, WAITQ_UNLOCK);
390 			/* waitq / semaphore is unlocked */
391 		} else {
392 			if (options & SEMAPHORE_SIGNAL_PREPOST) {
393 				semaphore->count++;
394 			}
395 			kr = KERN_SUCCESS;
396 			semaphore_unlock(semaphore);
397 		}
398 		splx(spl_level);
399 		return kr;
400 	}
401 
402 	if (semaphore->count < 0) {
403 		waitq_wakeup_flags_t flags = WAITQ_KEEP_LOCKED;
404 
405 		if (options & SEMAPHORE_THREAD_HANDOFF) {
406 			flags |= WAITQ_HANDOFF;
407 		}
408 		kr = waitq_wakeup64_one_locked(&semaphore->waitq,
409 		    SEMAPHORE_EVENT, THREAD_AWAKENED, flags);
410 		if (kr == KERN_SUCCESS) {
411 			semaphore_unlock(semaphore);
412 			splx(spl_level);
413 			return KERN_SUCCESS;
414 		} else {
415 			semaphore->count = 0;  /* all waiters gone */
416 		}
417 	}
418 
419 	if (options & SEMAPHORE_SIGNAL_PREPOST) {
420 		semaphore->count++;
421 	}
422 
423 	semaphore_unlock(semaphore);
424 	splx(spl_level);
425 	return KERN_NOT_WAITING;
426 }
427 
428 /*
429  *	Routine:	semaphore_signal_thread
430  *
431  *	If the specified thread is blocked on the semaphore, it is
432  *	woken up.  If a NULL thread was supplied, then any one
433  *	thread is woken up.  Otherwise the caller gets KERN_NOT_WAITING
434  *	and the	semaphore is unchanged.
435  */
436 kern_return_t
semaphore_signal_thread(semaphore_t semaphore,thread_t thread)437 semaphore_signal_thread(
438 	semaphore_t     semaphore,
439 	thread_t        thread)
440 {
441 	if (semaphore == SEMAPHORE_NULL) {
442 		return KERN_INVALID_ARGUMENT;
443 	}
444 
445 	return semaphore_signal_internal(semaphore, thread,
446 	           SEMAPHORE_OPTION_NONE);
447 }
448 
449 /*
450  *	Routine:	semaphore_signal_thread_trap
451  *
452  *	Trap interface to the semaphore_signal_thread function.
453  */
454 kern_return_t
semaphore_signal_thread_trap(struct semaphore_signal_thread_trap_args * args)455 semaphore_signal_thread_trap(
456 	struct semaphore_signal_thread_trap_args *args)
457 {
458 	mach_port_name_t sema_name = args->signal_name;
459 	mach_port_name_t thread_name = args->thread_name;
460 	semaphore_t      semaphore;
461 	thread_t         thread;
462 	kern_return_t    kr;
463 
464 	/*
465 	 * MACH_PORT_NULL is not an error. It means that we want to
466 	 * select any one thread that is already waiting, but not to
467 	 * pre-post the semaphore.
468 	 */
469 	if (thread_name != MACH_PORT_NULL) {
470 		thread = port_name_to_thread(thread_name, PORT_INTRANS_OPTIONS_NONE);
471 		if (thread == THREAD_NULL) {
472 			return KERN_INVALID_ARGUMENT;
473 		}
474 	} else {
475 		thread = THREAD_NULL;
476 	}
477 
478 	kr = port_name_to_semaphore(sema_name, &semaphore);
479 	if (kr == KERN_SUCCESS) {
480 		kr = semaphore_signal_internal(semaphore,
481 		    thread,
482 		    SEMAPHORE_OPTION_NONE);
483 		semaphore_dereference(semaphore);
484 	}
485 	if (thread != THREAD_NULL) {
486 		thread_deallocate(thread);
487 	}
488 	return kr;
489 }
490 
491 
492 
493 /*
494  *	Routine:	semaphore_signal
495  *
496  *		Traditional (in-kernel client and MIG interface) semaphore
497  *		signal routine.  Most users will access the trap version.
498  *
499  *		This interface in not defined to return info about whether
500  *		this call found a thread waiting or not.  The internal
501  *		routines (and future external routines) do.  We have to
502  *		convert those into plain KERN_SUCCESS returns.
503  */
504 kern_return_t
semaphore_signal(semaphore_t semaphore)505 semaphore_signal(
506 	semaphore_t             semaphore)
507 {
508 	kern_return_t           kr;
509 
510 	if (semaphore == SEMAPHORE_NULL) {
511 		return KERN_INVALID_ARGUMENT;
512 	}
513 
514 	kr = semaphore_signal_internal(semaphore,
515 	    THREAD_NULL,
516 	    SEMAPHORE_SIGNAL_PREPOST);
517 	if (kr == KERN_NOT_WAITING) {
518 		return KERN_SUCCESS;
519 	}
520 	return kr;
521 }
522 
523 /*
524  *	Routine:	semaphore_signal_trap
525  *
526  *	Trap interface to the semaphore_signal function.
527  */
528 kern_return_t
semaphore_signal_trap(struct semaphore_signal_trap_args * args)529 semaphore_signal_trap(
530 	struct semaphore_signal_trap_args *args)
531 {
532 	mach_port_name_t sema_name = args->signal_name;
533 
534 	return semaphore_signal_internal_trap(sema_name);
535 }
536 
537 kern_return_t
semaphore_signal_internal_trap(mach_port_name_t sema_name)538 semaphore_signal_internal_trap(mach_port_name_t sema_name)
539 {
540 	semaphore_t   semaphore;
541 	kern_return_t kr;
542 
543 	kr = port_name_to_semaphore(sema_name, &semaphore);
544 	if (kr == KERN_SUCCESS) {
545 		kr = semaphore_signal_internal(semaphore,
546 		    THREAD_NULL,
547 		    SEMAPHORE_SIGNAL_PREPOST);
548 		semaphore_dereference(semaphore);
549 		if (kr == KERN_NOT_WAITING) {
550 			kr = KERN_SUCCESS;
551 		}
552 	}
553 	return kr;
554 }
555 
556 /*
557  *	Routine:	semaphore_signal_all
558  *
559  *	Awakens ALL threads currently blocked on the semaphore.
560  *	The semaphore count returns to zero.
561  */
562 kern_return_t
semaphore_signal_all(semaphore_t semaphore)563 semaphore_signal_all(
564 	semaphore_t             semaphore)
565 {
566 	kern_return_t kr;
567 
568 	if (semaphore == SEMAPHORE_NULL) {
569 		return KERN_INVALID_ARGUMENT;
570 	}
571 
572 	kr = semaphore_signal_internal(semaphore,
573 	    THREAD_NULL,
574 	    SEMAPHORE_SIGNAL_ALL);
575 	if (kr == KERN_NOT_WAITING) {
576 		return KERN_SUCCESS;
577 	}
578 	return kr;
579 }
580 
581 /*
582  *	Routine:	semaphore_signal_all_trap
583  *
584  *	Trap interface to the semaphore_signal_all function.
585  */
586 kern_return_t
semaphore_signal_all_trap(struct semaphore_signal_all_trap_args * args)587 semaphore_signal_all_trap(
588 	struct semaphore_signal_all_trap_args *args)
589 {
590 	mach_port_name_t sema_name = args->signal_name;
591 	semaphore_t     semaphore;
592 	kern_return_t kr;
593 
594 	kr = port_name_to_semaphore(sema_name, &semaphore);
595 	if (kr == KERN_SUCCESS) {
596 		kr = semaphore_signal_internal(semaphore,
597 		    THREAD_NULL,
598 		    SEMAPHORE_SIGNAL_ALL);
599 		semaphore_dereference(semaphore);
600 		if (kr == KERN_NOT_WAITING) {
601 			kr = KERN_SUCCESS;
602 		}
603 	}
604 	return kr;
605 }
606 
607 /*
608  *	Routine:	semaphore_convert_wait_result
609  *
610  *	Generate the return code after a semaphore wait/block.  It
611  *	takes the wait result as an input and coverts that to an
612  *	appropriate result.
613  */
614 static kern_return_t
semaphore_convert_wait_result(int wait_result)615 semaphore_convert_wait_result(int wait_result)
616 {
617 	switch (wait_result) {
618 	case THREAD_AWAKENED:
619 		return KERN_SUCCESS;
620 
621 	case THREAD_TIMED_OUT:
622 		return KERN_OPERATION_TIMED_OUT;
623 
624 	case THREAD_INTERRUPTED:
625 		return KERN_ABORTED;
626 
627 	case THREAD_RESTART:
628 		return KERN_TERMINATED;
629 
630 	default:
631 		panic("semaphore_block");
632 		return KERN_FAILURE;
633 	}
634 }
635 
636 /*
637  *	Routine:	semaphore_wait_continue
638  *
639  *	Common continuation routine after waiting on a semphore.
640  *	It returns directly to user space.
641  */
642 static void
semaphore_wait_continue(void * arg __unused,wait_result_t wr)643 semaphore_wait_continue(void *arg __unused, wait_result_t wr)
644 {
645 	thread_t self = current_thread();
646 	semaphore_cont_t caller_cont = self->sth_continuation;
647 
648 	assert(self->sth_waitsemaphore != SEMAPHORE_NULL);
649 	semaphore_dereference(self->sth_waitsemaphore);
650 	if (self->sth_signalsemaphore != SEMAPHORE_NULL) {
651 		semaphore_dereference(self->sth_signalsemaphore);
652 	}
653 
654 	assert(self->handoff_thread == THREAD_NULL);
655 	assert(caller_cont != NULL);
656 	(*caller_cont)(semaphore_convert_wait_result(wr));
657 }
658 
659 /*
660  *	Routine:	semaphore_wait_internal
661  *
662  *		Decrements the semaphore count by one.  If the count is
663  *		negative after the decrement, the calling thread blocks
664  *		(possibly at a continuation and/or with a timeout).
665  *
666  *	Assumptions:
667  *		The reference
668  *		A reference is held on the signal semaphore.
669  */
670 static kern_return_t
semaphore_wait_internal(semaphore_t wait_semaphore,semaphore_t signal_semaphore,uint64_t deadline,int option,semaphore_cont_t caller_cont)671 semaphore_wait_internal(
672 	semaphore_t             wait_semaphore,
673 	semaphore_t             signal_semaphore,
674 	uint64_t                deadline,
675 	int                     option,
676 	semaphore_cont_t        caller_cont)
677 {
678 	int           wait_result;
679 	spl_t         spl_level;
680 	kern_return_t kr = KERN_ALREADY_WAITING;
681 	thread_t      self = current_thread();
682 	thread_t      handoff_thread = THREAD_NULL;
683 	int           semaphore_signal_options = SEMAPHORE_SIGNAL_PREPOST;
684 	thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE;
685 
686 	spl_level = splsched();
687 	semaphore_lock(wait_semaphore);
688 
689 	if (!semaphore_active(wait_semaphore)) {
690 		kr = KERN_TERMINATED;
691 	} else if (wait_semaphore->count > 0) {
692 		wait_semaphore->count--;
693 		kr = KERN_SUCCESS;
694 	} else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) {
695 		kr = KERN_OPERATION_TIMED_OUT;
696 	} else {
697 		wait_semaphore->count = -1;  /* we don't keep an actual count */
698 
699 		thread_set_pending_block_hint(self, kThreadWaitSemaphore);
700 		(void)waitq_assert_wait64_locked(
701 			&wait_semaphore->waitq,
702 			SEMAPHORE_EVENT,
703 			THREAD_ABORTSAFE,
704 			TIMEOUT_URGENCY_USER_NORMAL,
705 			deadline, TIMEOUT_NO_LEEWAY,
706 			self);
707 
708 		semaphore_signal_options |= SEMAPHORE_THREAD_HANDOFF;
709 	}
710 	semaphore_unlock(wait_semaphore);
711 	splx(spl_level);
712 
713 	/*
714 	 * wait_semaphore is unlocked so we are free to go ahead and
715 	 * signal the signal_semaphore (if one was provided).
716 	 */
717 	if (signal_semaphore != SEMAPHORE_NULL) {
718 		kern_return_t signal_kr;
719 
720 		/*
721 		 * lock the signal semaphore reference we got and signal it.
722 		 * This will NOT block (we cannot block after having asserted
723 		 * our intention to wait above).
724 		 */
725 		signal_kr = semaphore_signal_internal(signal_semaphore,
726 		    THREAD_NULL, semaphore_signal_options);
727 
728 		if (signal_kr == KERN_NOT_WAITING) {
729 			assert(self->handoff_thread == THREAD_NULL);
730 			signal_kr = KERN_SUCCESS;
731 		} else if (signal_kr == KERN_TERMINATED) {
732 			/*
733 			 * Uh!Oh!  The semaphore we were to signal died.
734 			 * We have to get ourselves out of the wait in
735 			 * case we get stuck here forever (it is assumed
736 			 * that the semaphore we were posting is gating
737 			 * the decision by someone else to post the
738 			 * semaphore we are waiting on).  People will
739 			 * discover the other dead semaphore soon enough.
740 			 * If we got out of the wait cleanly (someone
741 			 * already posted a wakeup to us) then return that
742 			 * (most important) result.  Otherwise,
743 			 * return the KERN_TERMINATED status.
744 			 */
745 			assert(self->handoff_thread == THREAD_NULL);
746 			clear_wait(self, THREAD_INTERRUPTED);
747 			kr = semaphore_convert_wait_result(self->wait_result);
748 			if (kr == KERN_ABORTED) {
749 				kr = KERN_TERMINATED;
750 			}
751 		}
752 	}
753 
754 	/*
755 	 * If we had an error, or we didn't really need to wait we can
756 	 * return now that we have signalled the signal semaphore.
757 	 */
758 	if (kr != KERN_ALREADY_WAITING) {
759 		assert(self->handoff_thread == THREAD_NULL);
760 		return kr;
761 	}
762 
763 	if (self->handoff_thread) {
764 		handoff_thread = self->handoff_thread;
765 		self->handoff_thread = THREAD_NULL;
766 		handoff_option = THREAD_HANDOFF_SETRUN_NEEDED;
767 	}
768 
769 	/*
770 	 * Now, we can block.  If the caller supplied a continuation
771 	 * pointer of his own for after the block, block with the
772 	 * appropriate semaphore continuation.  This will gather the
773 	 * semaphore results, release references on the semaphore(s),
774 	 * and then call the caller's continuation.
775 	 */
776 	if (caller_cont) {
777 		self->sth_continuation = caller_cont;
778 		self->sth_waitsemaphore = wait_semaphore;
779 		self->sth_signalsemaphore = signal_semaphore;
780 
781 		thread_handoff_parameter(handoff_thread, semaphore_wait_continue,
782 		    NULL, handoff_option);
783 	} else {
784 		wait_result = thread_handoff_deallocate(handoff_thread, handoff_option);
785 	}
786 
787 	assert(self->handoff_thread == THREAD_NULL);
788 	return semaphore_convert_wait_result(wait_result);
789 }
790 
791 
792 /*
793  *	Routine:	semaphore_wait
794  *
795  *	Traditional (non-continuation) interface presented to
796  *      in-kernel clients to wait on a semaphore.
797  */
798 kern_return_t
semaphore_wait(semaphore_t semaphore)799 semaphore_wait(
800 	semaphore_t             semaphore)
801 {
802 	if (semaphore == SEMAPHORE_NULL) {
803 		return KERN_INVALID_ARGUMENT;
804 	}
805 
806 	return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
807 	           0ULL, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL);
808 }
809 
810 kern_return_t
semaphore_wait_noblock(semaphore_t semaphore)811 semaphore_wait_noblock(
812 	semaphore_t             semaphore)
813 {
814 	if (semaphore == SEMAPHORE_NULL) {
815 		return KERN_INVALID_ARGUMENT;
816 	}
817 
818 	return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
819 	           0ULL, SEMAPHORE_TIMEOUT_NOBLOCK, SEMAPHORE_CONT_NULL);
820 }
821 
822 kern_return_t
semaphore_wait_deadline(semaphore_t semaphore,uint64_t deadline)823 semaphore_wait_deadline(
824 	semaphore_t             semaphore,
825 	uint64_t                deadline)
826 {
827 	if (semaphore == SEMAPHORE_NULL) {
828 		return KERN_INVALID_ARGUMENT;
829 	}
830 
831 	return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
832 	           deadline, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL);
833 }
834 
835 /*
836  *	Trap:	semaphore_wait_trap
837  *
838  *	Trap version of semaphore wait.  Called on behalf of user-level
839  *	clients.
840  */
841 
842 kern_return_t
semaphore_wait_trap(struct semaphore_wait_trap_args * args)843 semaphore_wait_trap(
844 	struct semaphore_wait_trap_args *args)
845 {
846 	return semaphore_wait_trap_internal(args->wait_name, thread_syscall_return);
847 }
848 
849 kern_return_t
semaphore_wait_trap_internal(mach_port_name_t name,semaphore_cont_t caller_cont)850 semaphore_wait_trap_internal(
851 	mach_port_name_t name,
852 	semaphore_cont_t caller_cont)
853 {
854 	semaphore_t   semaphore;
855 	kern_return_t kr;
856 
857 	kr = port_name_to_semaphore(name, &semaphore);
858 	if (kr == KERN_SUCCESS) {
859 		kr = semaphore_wait_internal(semaphore,
860 		    SEMAPHORE_NULL,
861 		    0ULL, SEMAPHORE_OPTION_NONE,
862 		    caller_cont);
863 		semaphore_dereference(semaphore);
864 	}
865 	return kr;
866 }
867 
868 /*
869  *	Routine:	semaphore_timedwait
870  *
871  *	Traditional (non-continuation) interface presented to
872  *      in-kernel clients to wait on a semaphore with a timeout.
873  *
874  *	A timeout of {0,0} is considered non-blocking.
875  */
876 kern_return_t
semaphore_timedwait(semaphore_t semaphore,mach_timespec_t wait_time)877 semaphore_timedwait(
878 	semaphore_t             semaphore,
879 	mach_timespec_t         wait_time)
880 {
881 	int      option = SEMAPHORE_OPTION_NONE;
882 	uint64_t deadline = 0;
883 
884 	if (semaphore == SEMAPHORE_NULL) {
885 		return KERN_INVALID_ARGUMENT;
886 	}
887 
888 	if (BAD_MACH_TIMESPEC(&wait_time)) {
889 		return KERN_INVALID_VALUE;
890 	}
891 
892 	if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) {
893 		option = SEMAPHORE_TIMEOUT_NOBLOCK;
894 	} else {
895 		deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
896 	}
897 
898 	return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
899 	           deadline, option, SEMAPHORE_CONT_NULL);
900 }
901 
902 /*
903  *	Trap:	semaphore_timedwait_trap
904  *
905  *	Trap version of a semaphore_timedwait.  The timeout parameter
906  *	is passed in two distinct parts and re-assembled on this side
907  *	of the trap interface (to accomodate calling conventions that
908  *	pass structures as pointers instead of inline in registers without
909  *	having to add a copyin).
910  *
911  *	A timeout of {0,0} is considered non-blocking.
912  */
913 kern_return_t
semaphore_timedwait_trap(struct semaphore_timedwait_trap_args * args)914 semaphore_timedwait_trap(
915 	struct semaphore_timedwait_trap_args *args)
916 {
917 	return semaphore_timedwait_trap_internal(args->wait_name,
918 	           args->sec, args->nsec, thread_syscall_return);
919 }
920 
921 
922 kern_return_t
semaphore_timedwait_trap_internal(mach_port_name_t name,unsigned int sec,clock_res_t nsec,semaphore_cont_t caller_cont)923 semaphore_timedwait_trap_internal(
924 	mach_port_name_t name,
925 	unsigned int     sec,
926 	clock_res_t      nsec,
927 	semaphore_cont_t caller_cont)
928 {
929 	semaphore_t semaphore;
930 	mach_timespec_t wait_time;
931 	kern_return_t kr;
932 
933 	wait_time.tv_sec = sec;
934 	wait_time.tv_nsec = nsec;
935 	if (BAD_MACH_TIMESPEC(&wait_time)) {
936 		return KERN_INVALID_VALUE;
937 	}
938 
939 	kr = port_name_to_semaphore(name, &semaphore);
940 	if (kr == KERN_SUCCESS) {
941 		int      option = SEMAPHORE_OPTION_NONE;
942 		uint64_t deadline = 0;
943 
944 		if (sec == 0 && nsec == 0) {
945 			option = SEMAPHORE_TIMEOUT_NOBLOCK;
946 		} else {
947 			deadline = semaphore_deadline(sec, nsec);
948 		}
949 
950 		kr = semaphore_wait_internal(semaphore,
951 		    SEMAPHORE_NULL,
952 		    deadline, option,
953 		    caller_cont);
954 		semaphore_dereference(semaphore);
955 	}
956 	return kr;
957 }
958 
959 /*
960  *	Routine:	semaphore_wait_signal
961  *
962  *	Atomically register a wait on a semaphore and THEN signal
963  *	another.  This is the in-kernel entry point that does not
964  *	block at a continuation and does not free a signal_semaphore
965  *      reference.
966  */
967 kern_return_t
semaphore_wait_signal(semaphore_t wait_semaphore,semaphore_t signal_semaphore)968 semaphore_wait_signal(
969 	semaphore_t             wait_semaphore,
970 	semaphore_t             signal_semaphore)
971 {
972 	if (wait_semaphore == SEMAPHORE_NULL) {
973 		return KERN_INVALID_ARGUMENT;
974 	}
975 
976 	return semaphore_wait_internal(wait_semaphore, signal_semaphore,
977 	           0ULL, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL);
978 }
979 
980 /*
981  *	Trap:	semaphore_wait_signal_trap
982  *
983  *	Atomically register a wait on a semaphore and THEN signal
984  *	another.  This is the trap version from user space.
985  */
986 kern_return_t
semaphore_wait_signal_trap(struct semaphore_wait_signal_trap_args * args)987 semaphore_wait_signal_trap(
988 	struct semaphore_wait_signal_trap_args *args)
989 {
990 	return semaphore_wait_signal_trap_internal(args->wait_name,
991 	           args->signal_name, thread_syscall_return);
992 }
993 
994 kern_return_t
semaphore_wait_signal_trap_internal(mach_port_name_t wait_name,mach_port_name_t signal_name,semaphore_cont_t caller_cont)995 semaphore_wait_signal_trap_internal(
996 	mach_port_name_t wait_name,
997 	mach_port_name_t signal_name,
998 	semaphore_cont_t caller_cont)
999 {
1000 	semaphore_t wait_semaphore;
1001 	semaphore_t signal_semaphore;
1002 	kern_return_t kr;
1003 
1004 	kr = port_name_to_semaphore(signal_name, &signal_semaphore);
1005 	if (kr == KERN_SUCCESS) {
1006 		kr = port_name_to_semaphore(wait_name, &wait_semaphore);
1007 		if (kr == KERN_SUCCESS) {
1008 			kr = semaphore_wait_internal(wait_semaphore,
1009 			    signal_semaphore,
1010 			    0ULL, SEMAPHORE_OPTION_NONE,
1011 			    caller_cont);
1012 			semaphore_dereference(wait_semaphore);
1013 		}
1014 		semaphore_dereference(signal_semaphore);
1015 	}
1016 	return kr;
1017 }
1018 
1019 
1020 /*
1021  *	Routine:	semaphore_timedwait_signal
1022  *
1023  *	Atomically register a wait on a semaphore and THEN signal
1024  *	another.  This is the in-kernel entry point that does not
1025  *	block at a continuation.
1026  *
1027  *	A timeout of {0,0} is considered non-blocking.
1028  */
1029 kern_return_t
semaphore_timedwait_signal(semaphore_t wait_semaphore,semaphore_t signal_semaphore,mach_timespec_t wait_time)1030 semaphore_timedwait_signal(
1031 	semaphore_t             wait_semaphore,
1032 	semaphore_t             signal_semaphore,
1033 	mach_timespec_t         wait_time)
1034 {
1035 	int      option = SEMAPHORE_OPTION_NONE;
1036 	uint64_t deadline = 0;
1037 
1038 	if (wait_semaphore == SEMAPHORE_NULL) {
1039 		return KERN_INVALID_ARGUMENT;
1040 	}
1041 
1042 	if (BAD_MACH_TIMESPEC(&wait_time)) {
1043 		return KERN_INVALID_VALUE;
1044 	}
1045 
1046 	if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) {
1047 		option = SEMAPHORE_TIMEOUT_NOBLOCK;
1048 	} else {
1049 		deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
1050 	}
1051 
1052 	return semaphore_wait_internal(wait_semaphore, signal_semaphore,
1053 	           deadline, option, SEMAPHORE_CONT_NULL);
1054 }
1055 
1056 /*
1057  *	Trap:	semaphore_timedwait_signal_trap
1058  *
1059  *	Atomically register a timed wait on a semaphore and THEN signal
1060  *	another.  This is the trap version from user space.
1061  */
1062 kern_return_t
semaphore_timedwait_signal_trap(struct semaphore_timedwait_signal_trap_args * args)1063 semaphore_timedwait_signal_trap(
1064 	struct semaphore_timedwait_signal_trap_args *args)
1065 {
1066 	return semaphore_timedwait_signal_trap_internal(args->wait_name,
1067 	           args->signal_name, args->sec, args->nsec, thread_syscall_return);
1068 }
1069 
1070 kern_return_t
semaphore_timedwait_signal_trap_internal(mach_port_name_t wait_name,mach_port_name_t signal_name,unsigned int sec,clock_res_t nsec,semaphore_cont_t caller_cont)1071 semaphore_timedwait_signal_trap_internal(
1072 	mach_port_name_t wait_name,
1073 	mach_port_name_t signal_name,
1074 	unsigned int sec,
1075 	clock_res_t nsec,
1076 	semaphore_cont_t caller_cont)
1077 {
1078 	semaphore_t wait_semaphore;
1079 	semaphore_t signal_semaphore;
1080 	mach_timespec_t wait_time;
1081 	kern_return_t kr;
1082 
1083 	wait_time.tv_sec = sec;
1084 	wait_time.tv_nsec = nsec;
1085 	if (BAD_MACH_TIMESPEC(&wait_time)) {
1086 		return KERN_INVALID_VALUE;
1087 	}
1088 
1089 	kr = port_name_to_semaphore(signal_name, &signal_semaphore);
1090 	if (kr == KERN_SUCCESS) {
1091 		kr = port_name_to_semaphore(wait_name, &wait_semaphore);
1092 		if (kr == KERN_SUCCESS) {
1093 			int      option = SEMAPHORE_OPTION_NONE;
1094 			uint64_t deadline = 0;
1095 
1096 			if (sec == 0 && nsec == 0) {
1097 				option = SEMAPHORE_TIMEOUT_NOBLOCK;
1098 			} else {
1099 				deadline = semaphore_deadline(sec, nsec);
1100 			}
1101 
1102 			kr = semaphore_wait_internal(wait_semaphore,
1103 			    signal_semaphore,
1104 			    deadline, option,
1105 			    caller_cont);
1106 			semaphore_dereference(wait_semaphore);
1107 		}
1108 		semaphore_dereference(signal_semaphore);
1109 	}
1110 	return kr;
1111 }
1112 
1113 
1114 /*
1115  *	Routine:	semaphore_reference
1116  *
1117  *	Take out a reference on a semaphore.  This keeps the data structure
1118  *	in existence (but the semaphore may be deactivated).
1119  */
1120 void
semaphore_reference(semaphore_t semaphore)1121 semaphore_reference(
1122 	semaphore_t             semaphore)
1123 {
1124 	zone_id_require(ZONE_ID_SEMAPHORE, sizeof(*semaphore), semaphore);
1125 	os_ref_retain_raw(&semaphore->ref_count, &sema_refgrp);
1126 }
1127 
1128 /*
1129  *	Routine:	semaphore_dereference
1130  *
1131  *	Release a reference on a semaphore.  If this is the last reference,
1132  *	the semaphore data structure is deallocated.
1133  */
1134 void
semaphore_dereference(semaphore_t semaphore)1135 semaphore_dereference(
1136 	semaphore_t             semaphore)
1137 {
1138 	if (semaphore == NULL) {
1139 		return;
1140 	}
1141 
1142 	if (os_ref_release_raw(&semaphore->ref_count, &sema_refgrp) == 0) {
1143 		return semaphore_free(semaphore);
1144 	}
1145 }
1146 
1147 void
kdp_sema_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_t * waitinfo)1148 kdp_sema_find_owner(struct waitq *waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
1149 {
1150 	semaphore_t sem = __container_of(waitq, struct semaphore, waitq);
1151 	assert(event == SEMAPHORE_EVENT);
1152 
1153 	zone_id_require(ZONE_ID_SEMAPHORE, sizeof(*sem), sem);
1154 
1155 	waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port);
1156 	if (sem->owner) {
1157 		waitinfo->owner = pid_from_task(sem->owner);
1158 	}
1159 }
1160 
1161 /*
1162  *	Routine:	port_name_to_semaphore
1163  *	Purpose:
1164  *		Convert from a port name in the current space to a semaphore.
1165  *		Produces a semaphore ref, which may be null.
1166  *	Conditions:
1167  *		Nothing locked.
1168  */
1169 kern_return_t
port_name_to_semaphore(mach_port_name_t name,semaphore_t * semaphorep)1170 port_name_to_semaphore(
1171 	mach_port_name_t        name,
1172 	semaphore_t             *semaphorep)
1173 {
1174 	ipc_port_t port;
1175 	kern_return_t kr;
1176 
1177 	if (!MACH_PORT_VALID(name)) {
1178 		*semaphorep = SEMAPHORE_NULL;
1179 		return KERN_INVALID_NAME;
1180 	}
1181 
1182 	kr = ipc_port_translate_send(current_space(), name, &port);
1183 	if (kr != KERN_SUCCESS) {
1184 		*semaphorep = SEMAPHORE_NULL;
1185 		return kr;
1186 	}
1187 	/* have the port locked */
1188 
1189 	*semaphorep = convert_port_to_semaphore(port);
1190 	if (*semaphorep == SEMAPHORE_NULL) {
1191 		/* the port is valid, but doesn't denote a semaphore */
1192 		kr = KERN_INVALID_CAPABILITY;
1193 	} else {
1194 		kr = KERN_SUCCESS;
1195 	}
1196 	ip_mq_unlock(port);
1197 
1198 	return kr;
1199 }
1200 
1201 /*
1202  *	Routine:	convert_port_to_semaphore
1203  *	Purpose:
1204  *		Convert from a port to a semaphore.
1205  *		Doesn't consume the port [send-right] ref;
1206  *		produces a semaphore ref, which may be null.
1207  *	Conditions:
1208  *		Caller has a send-right reference to port.
1209  *		Port may or may not be locked.
1210  */
1211 semaphore_t
convert_port_to_semaphore(ipc_port_t port)1212 convert_port_to_semaphore(ipc_port_t port)
1213 {
1214 	semaphore_t semaphore = SEMAPHORE_NULL;
1215 
1216 	if (IP_VALID(port)) {
1217 		semaphore = ipc_kobject_get_stable(port, IKOT_SEMAPHORE);
1218 		if (semaphore != SEMAPHORE_NULL) {
1219 			semaphore_reference(semaphore);
1220 		}
1221 	}
1222 
1223 	return semaphore;
1224 }
1225 
1226 
1227 /*
1228  *	Routine:	convert_semaphore_to_port
1229  *	Purpose:
1230  *		Convert a semaphore reference to a send right to a
1231  *		semaphore port.
1232  *
1233  *		Consumes the semaphore reference.  If the semaphore
1234  *		port currently has no send rights (or doesn't exist
1235  *		yet), the reference is donated to the port to represent
1236  *		all extant send rights collectively.
1237  */
1238 ipc_port_t
convert_semaphore_to_port(semaphore_t semaphore)1239 convert_semaphore_to_port(semaphore_t semaphore)
1240 {
1241 	if (semaphore == SEMAPHORE_NULL) {
1242 		return IP_NULL;
1243 	}
1244 
1245 	/*
1246 	 * make a send right and donate our reference for
1247 	 * semaphore_no_senders if this is the first send right
1248 	 */
1249 	if (!ipc_kobject_make_send_lazy_alloc_port(&semaphore->port,
1250 	    semaphore, IKOT_SEMAPHORE, IPC_KOBJECT_ALLOC_NONE)) {
1251 		semaphore_dereference(semaphore);
1252 	}
1253 	return semaphore->port;
1254 }
1255 
1256 /*
1257  * Routine:	semaphore_no_senders
1258  * Purpose:
1259  *	Called whenever the Mach port system detects no-senders
1260  *	on the semaphore port.
1261  *
1262  *	When a send-right is first created, a no-senders
1263  *	notification is armed (and a semaphore reference is donated).
1264  *
1265  *	A no-senders notification will be posted when no one else holds a
1266  *	send-right (reference) to the semaphore's port. This notification function
1267  *	will consume the semaphore reference donated to the extant collection of
1268  *	send-rights.
1269  */
1270 static void
semaphore_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)1271 semaphore_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
1272 {
1273 	semaphore_t semaphore = ipc_kobject_get_stable(port, IKOT_SEMAPHORE);
1274 
1275 	assert(semaphore != SEMAPHORE_NULL);
1276 	assert(semaphore->port == port);
1277 
1278 	semaphore_dereference(semaphore);
1279 }
1280 
1281 IPC_KOBJECT_DEFINE(IKOT_SEMAPHORE,
1282     .iko_op_stable     = true,
1283     .iko_op_no_senders = semaphore_no_senders);
1284