xref: /xnu-12377.1.9/osfmk/kern/sync_sema.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  *
31  */
32 /*
33  *	File:	kern/sync_sema.c
34  *	Author:	Joseph CaraDonna
35  *
36  *	Contains RT distributed semaphore synchronization services.
37  */
38 
39 #include <mach/mach_types.h>
40 #include <mach/mach_traps.h>
41 #include <mach/kern_return.h>
42 #include <mach/semaphore.h>
43 #include <mach/sync_policy.h>
44 #include <mach/task.h>
45 
46 #include <kern/misc_protos.h>
47 #include <kern/sync_sema.h>
48 #include <kern/spl.h>
49 #include <kern/ipc_kobject.h>
50 #include <kern/ipc_tt.h>
51 #include <kern/thread.h>
52 #include <kern/clock.h>
53 #include <kern/host.h>
54 #include <kern/waitq.h>
55 #include <kern/zalloc.h>
56 #include <kern/mach_param.h>
57 
58 static const uint8_t semaphore_event;
59 #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event)
60 
61 ZONE_DEFINE_ID(ZONE_ID_SEMAPHORE, "semaphores", struct semaphore,
62     ZC_ZFREE_CLEARMEM);
63 
64 os_refgrp_decl(static, sema_refgrp, "semaphore", NULL);
65 
66 /* Forward declarations */
67 
68 static inline bool
semaphore_active(semaphore_t semaphore)69 semaphore_active(semaphore_t semaphore)
70 {
71 	return semaphore->owner != TASK_NULL;
72 }
73 
74 static __inline__ uint64_t
semaphore_deadline(unsigned int sec,clock_res_t nsec)75 semaphore_deadline(
76 	unsigned int            sec,
77 	clock_res_t             nsec)
78 {
79 	uint64_t abstime;
80 
81 	nanotime_to_absolutetime(sec, nsec, &abstime);
82 	clock_absolutetime_interval_to_deadline(abstime, &abstime);
83 
84 	return abstime;
85 }
86 
87 /*
88  *	Routine:	semaphore_create
89  *
90  *	Creates a semaphore.
91  *	The port representing the semaphore is returned as a parameter.
92  */
93 kern_return_t
semaphore_create(task_t task,semaphore_t * new_semaphore,int policy,int value)94 semaphore_create(
95 	task_t                  task,
96 	semaphore_t             *new_semaphore,
97 	int                     policy,
98 	int                     value)
99 {
100 	semaphore_t s = SEMAPHORE_NULL;
101 
102 	*new_semaphore = SEMAPHORE_NULL;
103 	if (task == TASK_NULL || value < 0 || (policy & ~SYNC_POLICY_USER_MASK)) {
104 		return KERN_INVALID_ARGUMENT;
105 	}
106 
107 	s = zalloc_id(ZONE_ID_SEMAPHORE, Z_ZERO | Z_WAITOK | Z_NOFAIL);
108 
109 	/*
110 	 *  Associate the new semaphore with the task by adding
111 	 *  the new semaphore to the task's semaphore list.
112 	 */
113 	task_lock(task);
114 	/* Check for race with task_terminate */
115 	if (!task->active) {
116 		task_unlock(task);
117 		zfree_id(ZONE_ID_SEMAPHORE, s);
118 		return KERN_INVALID_TASK;
119 	}
120 
121 	waitq_init(&s->waitq, WQT_QUEUE, policy | SYNC_POLICY_INIT_LOCKED);
122 
123 	/* init everything under both the task and semaphore locks */
124 	os_ref_init_raw(&s->ref_count, &sema_refgrp);
125 	s->count = value;
126 	s->owner = task;
127 	enqueue_head(&task->semaphore_list, &s->task_link);
128 	task->semaphores_owned++;
129 
130 	semaphore_unlock(s);
131 
132 	task_unlock(task);
133 
134 	*new_semaphore = s;
135 
136 	return KERN_SUCCESS;
137 }
138 
139 /*
140  *	Routine:	semaphore_destroy_internal
141  *
142  *	Disassociate a semaphore from its owning task, mark it inactive,
143  *	and set any waiting threads running with THREAD_RESTART.
144  *
145  *	Conditions:
146  *			task is locked
147  *			semaphore is owned by the specified task
148  *			if semaphore is locked, interrupts are disabled
149  *	Returns:
150  *			with semaphore unlocked, interrupts enabled
151  */
152 static void
semaphore_destroy_internal(task_t task,semaphore_t semaphore,bool semaphore_locked)153 semaphore_destroy_internal(
154 	task_t                  task,
155 	semaphore_t             semaphore,
156 	bool                    semaphore_locked)
157 {
158 	int old_count;
159 
160 	/* unlink semaphore from owning task */
161 	assert(semaphore->owner == task);
162 	remqueue(&semaphore->task_link);
163 	task->semaphores_owned--;
164 
165 	spl_t spl_level = 0;
166 
167 	if (semaphore_locked) {
168 		spl_level = 1;
169 	} else {
170 		spl_level = splsched();
171 		semaphore_lock(semaphore);
172 	}
173 
174 	/*
175 	 * deactivate semaphore under both locks
176 	 * and then wake up all waiters.
177 	 */
178 
179 	semaphore->owner = TASK_NULL;
180 	old_count = semaphore->count;
181 	semaphore->count = 0;
182 
183 	if (old_count < 0) {
184 		waitq_wakeup64_all_locked(&semaphore->waitq,
185 		    SEMAPHORE_EVENT, THREAD_RESTART,
186 		    waitq_flags_splx(spl_level) | WAITQ_UNLOCK);
187 		/* waitq/semaphore is unlocked, splx handled */
188 		assert(ml_get_interrupts_enabled());
189 	} else {
190 		assert(circle_queue_empty(&semaphore->waitq.waitq_queue));
191 		semaphore_unlock(semaphore);
192 		splx(spl_level);
193 		assert(ml_get_interrupts_enabled());
194 	}
195 }
196 
197 /*
198  *	Routine:	semaphore_free
199  *
200  *	Free a semaphore that hit a 0 refcount.
201  *
202  *	Conditions:
203  *			Nothing is locked.
204  */
205 __attribute__((noinline))
206 static void
semaphore_free(semaphore_t semaphore)207 semaphore_free(
208 	semaphore_t             semaphore)
209 {
210 	ipc_port_t port;
211 	task_t task;
212 
213 	/*
214 	 * Last ref, clean up the port [if any]
215 	 * associated with the semaphore, destroy
216 	 * it (if still active) and then free
217 	 * the semaphore.
218 	 */
219 	port = semaphore->port;
220 	if (IP_VALID(port)) {
221 		assert(!port->ip_srights);
222 		ipc_kobject_dealloc_port(port, IPC_KOBJECT_NO_MSCOUNT,
223 		    IKOT_SEMAPHORE);
224 	}
225 
226 	/*
227 	 * If the semaphore owned by the current task,
228 	 * we know the current task can't go away,
229 	 * so we can take locks in the right order.
230 	 *
231 	 * Else we try to take locks in the "wrong" order
232 	 * but if we fail to, we take a task ref and do it "right".
233 	 */
234 	task = current_task();
235 	if (semaphore->owner == task) {
236 		task_lock(task);
237 		if (semaphore->owner == task) {
238 			semaphore_destroy_internal(task, semaphore, false);
239 		} else {
240 			assert(semaphore->owner == TASK_NULL);
241 		}
242 		task_unlock(task);
243 	} else {
244 		spl_t spl = splsched();
245 
246 		/* semaphore_destroy_internal will always enable, can't nest */
247 		assert(spl);
248 
249 		semaphore_lock(semaphore);
250 
251 		task = semaphore->owner;
252 		if (task == TASK_NULL) {
253 			semaphore_unlock(semaphore);
254 			splx(spl);
255 		} else if (task_lock_try(task)) {
256 			semaphore_destroy_internal(task, semaphore, true);
257 			/* semaphore unlocked, interrupts enabled */
258 			task_unlock(task);
259 		} else {
260 			task_reference(task);
261 			semaphore_unlock(semaphore);
262 			splx(spl);
263 
264 			task_lock(task);
265 			if (semaphore->owner == task) {
266 				semaphore_destroy_internal(task, semaphore, false);
267 			}
268 			task_unlock(task);
269 
270 			task_deallocate(task);
271 		}
272 	}
273 
274 	waitq_deinit(&semaphore->waitq);
275 	zfree_id(ZONE_ID_SEMAPHORE, semaphore);
276 }
277 
278 /*
279  *	Routine:	semaphore_destroy
280  *
281  *	Destroys a semaphore and consume the caller's reference on the
282  *	semaphore.
283  */
284 kern_return_t
semaphore_destroy(task_t task,semaphore_t semaphore)285 semaphore_destroy(
286 	task_t                  task,
287 	semaphore_t             semaphore)
288 {
289 	if (semaphore == SEMAPHORE_NULL) {
290 		return KERN_INVALID_ARGUMENT;
291 	}
292 
293 	if (task == TASK_NULL) {
294 		semaphore_dereference(semaphore);
295 		return KERN_INVALID_ARGUMENT;
296 	}
297 
298 	if (semaphore->owner == task) {
299 		task_lock(task);
300 		if (semaphore->owner == task) {
301 			semaphore_destroy_internal(task, semaphore, false);
302 		}
303 		task_unlock(task);
304 	}
305 
306 	semaphore_dereference(semaphore);
307 	return KERN_SUCCESS;
308 }
309 
310 /*
311  *	Routine:	semaphore_destroy_all
312  *
313  *	Destroy all the semaphores associated with a given task.
314  */
315 
316 void
semaphore_destroy_all(task_t task)317 semaphore_destroy_all(
318 	task_t                  task)
319 {
320 	semaphore_t semaphore;
321 
322 	task_lock(task);
323 
324 	qe_foreach_element_safe(semaphore, &task->semaphore_list, task_link) {
325 		semaphore_destroy_internal(task, semaphore, false);
326 	}
327 
328 	task_unlock(task);
329 }
330 
331 /*
332  *	Routine:	semaphore_signal_internal
333  *
334  *		Signals the semaphore as direct.
335  *	Assumptions:
336  *		Semaphore is locked.
337  */
338 static kern_return_t
semaphore_signal_internal(semaphore_t semaphore,thread_t thread,int options)339 semaphore_signal_internal(
340 	semaphore_t             semaphore,
341 	thread_t                thread,
342 	int                     options)
343 {
344 	kern_return_t kr;
345 
346 	spl_t spl_level = splsched();
347 	semaphore_lock(semaphore);
348 
349 	if (!semaphore_active(semaphore)) {
350 		semaphore_unlock(semaphore);
351 		splx(spl_level);
352 		return KERN_TERMINATED;
353 	}
354 
355 	if (thread != THREAD_NULL) {
356 		if (semaphore->count < 0) {
357 			kr = waitq_wakeup64_thread_and_unlock(
358 				&semaphore->waitq, SEMAPHORE_EVENT,
359 				thread, THREAD_AWAKENED);
360 			/* waitq/semaphore is unlocked */
361 			splx(spl_level);
362 		} else {
363 			kr = KERN_NOT_WAITING;
364 			semaphore_unlock(semaphore);
365 			splx(spl_level);
366 		}
367 		return kr;
368 	}
369 
370 	if (options & SEMAPHORE_SIGNAL_ALL) {
371 		int old_count = semaphore->count;
372 
373 		kr = KERN_NOT_WAITING;
374 		if (old_count < 0) {
375 			semaphore->count = 0;  /* always reset */
376 
377 			kr = waitq_wakeup64_all_locked(&semaphore->waitq,
378 			    SEMAPHORE_EVENT, THREAD_AWAKENED,
379 			    WAITQ_UNLOCK | waitq_flags_splx(spl_level));
380 			/* waitq / semaphore is unlocked, splx handled */
381 		} else {
382 			if (options & SEMAPHORE_SIGNAL_PREPOST) {
383 				semaphore->count++;
384 			}
385 			kr = KERN_SUCCESS;
386 			semaphore_unlock(semaphore);
387 			splx(spl_level);
388 		}
389 		return kr;
390 	}
391 
392 	if (semaphore->count < 0) {
393 		waitq_wakeup_flags_t flags = WAITQ_KEEP_LOCKED;
394 
395 		if (options & SEMAPHORE_THREAD_HANDOFF) {
396 			flags |= WAITQ_HANDOFF;
397 		}
398 		kr = waitq_wakeup64_one_locked(&semaphore->waitq,
399 		    SEMAPHORE_EVENT, THREAD_AWAKENED, flags);
400 		if (kr == KERN_SUCCESS) {
401 			semaphore_unlock(semaphore);
402 			splx(spl_level);
403 			return KERN_SUCCESS;
404 		} else {
405 			semaphore->count = 0;  /* all waiters gone */
406 		}
407 	}
408 
409 	if (options & SEMAPHORE_SIGNAL_PREPOST) {
410 		semaphore->count++;
411 	}
412 
413 	semaphore_unlock(semaphore);
414 	splx(spl_level);
415 	return KERN_NOT_WAITING;
416 }
417 
418 /*
419  *	Routine:	semaphore_signal_thread
420  *
421  *	If the specified thread is blocked on the semaphore, it is
422  *	woken up.  If a NULL thread was supplied, then any one
423  *	thread is woken up.  Otherwise the caller gets KERN_NOT_WAITING
424  *	and the	semaphore is unchanged.
425  */
426 kern_return_t
semaphore_signal_thread(semaphore_t semaphore,thread_t thread)427 semaphore_signal_thread(
428 	semaphore_t     semaphore,
429 	thread_t        thread)
430 {
431 	if (semaphore == SEMAPHORE_NULL) {
432 		return KERN_INVALID_ARGUMENT;
433 	}
434 
435 	return semaphore_signal_internal(semaphore, thread,
436 	           SEMAPHORE_OPTION_NONE);
437 }
438 
439 /*
440  *	Routine:	semaphore_signal_thread_trap
441  *
442  *	Trap interface to the semaphore_signal_thread function.
443  */
444 kern_return_t
semaphore_signal_thread_trap(struct semaphore_signal_thread_trap_args * args)445 semaphore_signal_thread_trap(
446 	struct semaphore_signal_thread_trap_args *args)
447 {
448 	mach_port_name_t sema_name = args->signal_name;
449 	mach_port_name_t thread_name = args->thread_name;
450 	semaphore_t      semaphore;
451 	thread_t         thread;
452 	kern_return_t    kr;
453 
454 	/*
455 	 * MACH_PORT_NULL is not an error. It means that we want to
456 	 * select any one thread that is already waiting, but not to
457 	 * pre-post the semaphore.
458 	 */
459 	if (thread_name != MACH_PORT_NULL) {
460 		thread = port_name_to_thread(thread_name, PORT_INTRANS_OPTIONS_NONE);
461 		if (thread == THREAD_NULL) {
462 			return KERN_INVALID_ARGUMENT;
463 		}
464 	} else {
465 		thread = THREAD_NULL;
466 	}
467 
468 	kr = port_name_to_semaphore(sema_name, &semaphore);
469 	if (kr == KERN_SUCCESS) {
470 		kr = semaphore_signal_internal(semaphore,
471 		    thread,
472 		    SEMAPHORE_OPTION_NONE);
473 		semaphore_dereference(semaphore);
474 	}
475 	if (thread != THREAD_NULL) {
476 		thread_deallocate(thread);
477 	}
478 	return kr;
479 }
480 
481 
482 
483 /*
484  *	Routine:	semaphore_signal
485  *
486  *		Traditional (in-kernel client and MIG interface) semaphore
487  *		signal routine.  Most users will access the trap version.
488  *
489  *		This interface in not defined to return info about whether
490  *		this call found a thread waiting or not.  The internal
491  *		routines (and future external routines) do.  We have to
492  *		convert those into plain KERN_SUCCESS returns.
493  */
494 kern_return_t
semaphore_signal(semaphore_t semaphore)495 semaphore_signal(
496 	semaphore_t             semaphore)
497 {
498 	kern_return_t           kr;
499 
500 	if (semaphore == SEMAPHORE_NULL) {
501 		return KERN_INVALID_ARGUMENT;
502 	}
503 
504 	kr = semaphore_signal_internal(semaphore,
505 	    THREAD_NULL,
506 	    SEMAPHORE_SIGNAL_PREPOST);
507 	if (kr == KERN_NOT_WAITING) {
508 		return KERN_SUCCESS;
509 	}
510 	return kr;
511 }
512 
513 /*
514  *	Routine:	semaphore_signal_trap
515  *
516  *	Trap interface to the semaphore_signal function.
517  */
518 kern_return_t
semaphore_signal_trap(struct semaphore_signal_trap_args * args)519 semaphore_signal_trap(
520 	struct semaphore_signal_trap_args *args)
521 {
522 	mach_port_name_t sema_name = args->signal_name;
523 
524 	return semaphore_signal_internal_trap(sema_name);
525 }
526 
527 kern_return_t
semaphore_signal_internal_trap(mach_port_name_t sema_name)528 semaphore_signal_internal_trap(mach_port_name_t sema_name)
529 {
530 	semaphore_t   semaphore;
531 	kern_return_t kr;
532 
533 	kr = port_name_to_semaphore(sema_name, &semaphore);
534 	if (kr == KERN_SUCCESS) {
535 		kr = semaphore_signal_internal(semaphore,
536 		    THREAD_NULL,
537 		    SEMAPHORE_SIGNAL_PREPOST);
538 		semaphore_dereference(semaphore);
539 		if (kr == KERN_NOT_WAITING) {
540 			kr = KERN_SUCCESS;
541 		}
542 	}
543 	return kr;
544 }
545 
546 /*
547  *	Routine:	semaphore_signal_all
548  *
549  *	Awakens ALL threads currently blocked on the semaphore.
550  *	The semaphore count returns to zero.
551  */
552 kern_return_t
semaphore_signal_all(semaphore_t semaphore)553 semaphore_signal_all(
554 	semaphore_t             semaphore)
555 {
556 	kern_return_t kr;
557 
558 	if (semaphore == SEMAPHORE_NULL) {
559 		return KERN_INVALID_ARGUMENT;
560 	}
561 
562 	kr = semaphore_signal_internal(semaphore,
563 	    THREAD_NULL,
564 	    SEMAPHORE_SIGNAL_ALL);
565 	if (kr == KERN_NOT_WAITING) {
566 		return KERN_SUCCESS;
567 	}
568 	return kr;
569 }
570 
571 /*
572  *	Routine:	semaphore_signal_all_trap
573  *
574  *	Trap interface to the semaphore_signal_all function.
575  */
576 kern_return_t
semaphore_signal_all_trap(struct semaphore_signal_all_trap_args * args)577 semaphore_signal_all_trap(
578 	struct semaphore_signal_all_trap_args *args)
579 {
580 	mach_port_name_t sema_name = args->signal_name;
581 	semaphore_t     semaphore;
582 	kern_return_t kr;
583 
584 	kr = port_name_to_semaphore(sema_name, &semaphore);
585 	if (kr == KERN_SUCCESS) {
586 		kr = semaphore_signal_internal(semaphore,
587 		    THREAD_NULL,
588 		    SEMAPHORE_SIGNAL_ALL);
589 		semaphore_dereference(semaphore);
590 		if (kr == KERN_NOT_WAITING) {
591 			kr = KERN_SUCCESS;
592 		}
593 	}
594 	return kr;
595 }
596 
597 /*
598  *	Routine:	semaphore_convert_wait_result
599  *
600  *	Generate the return code after a semaphore wait/block.  It
601  *	takes the wait result as an input and coverts that to an
602  *	appropriate result.
603  */
604 static kern_return_t
semaphore_convert_wait_result(int wait_result)605 semaphore_convert_wait_result(int wait_result)
606 {
607 	switch (wait_result) {
608 	case THREAD_AWAKENED:
609 		return KERN_SUCCESS;
610 
611 	case THREAD_TIMED_OUT:
612 		return KERN_OPERATION_TIMED_OUT;
613 
614 	case THREAD_INTERRUPTED:
615 		return KERN_ABORTED;
616 
617 	case THREAD_RESTART:
618 		return KERN_TERMINATED;
619 
620 	default:
621 		panic("semaphore_block");
622 		return KERN_FAILURE;
623 	}
624 }
625 
626 /*
627  *	Routine:	semaphore_wait_continue
628  *
629  *	Common continuation routine after waiting on a semphore.
630  *	It returns directly to user space.
631  */
632 static void
semaphore_wait_continue(void * arg __unused,wait_result_t wr)633 semaphore_wait_continue(void *arg __unused, wait_result_t wr)
634 {
635 	thread_t self = current_thread();
636 	semaphore_cont_t caller_cont = self->sth_continuation;
637 
638 	assert(self->sth_waitsemaphore != SEMAPHORE_NULL);
639 	semaphore_dereference(self->sth_waitsemaphore);
640 	if (self->sth_signalsemaphore != SEMAPHORE_NULL) {
641 		semaphore_dereference(self->sth_signalsemaphore);
642 	}
643 
644 	assert(self->handoff_thread == THREAD_NULL);
645 	assert(caller_cont != NULL);
646 	(*caller_cont)(semaphore_convert_wait_result(wr));
647 }
648 
649 /*
650  *	Routine:	semaphore_wait_internal
651  *
652  *		Decrements the semaphore count by one.  If the count is
653  *		negative after the decrement, the calling thread blocks
654  *		(possibly at a continuation and/or with a timeout).
655  *
656  *	Assumptions:
657  *		The reference
658  *		A reference is held on the signal semaphore.
659  */
660 static kern_return_t
semaphore_wait_internal(semaphore_t wait_semaphore,semaphore_t signal_semaphore,uint64_t deadline,int option,semaphore_cont_t caller_cont)661 semaphore_wait_internal(
662 	semaphore_t             wait_semaphore,
663 	semaphore_t             signal_semaphore,
664 	uint64_t                deadline,
665 	int                     option,
666 	semaphore_cont_t        caller_cont)
667 {
668 	int           wait_result;
669 	spl_t         spl_level;
670 	kern_return_t kr = KERN_ALREADY_WAITING;
671 	thread_t      self = current_thread();
672 	thread_t      handoff_thread = THREAD_NULL;
673 	int           semaphore_signal_options = SEMAPHORE_SIGNAL_PREPOST;
674 	thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE;
675 
676 	spl_level = splsched();
677 	semaphore_lock(wait_semaphore);
678 
679 	if (!semaphore_active(wait_semaphore)) {
680 		kr = KERN_TERMINATED;
681 	} else if (wait_semaphore->count > 0) {
682 		wait_semaphore->count--;
683 		kr = KERN_SUCCESS;
684 	} else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) {
685 		kr = KERN_OPERATION_TIMED_OUT;
686 	} else {
687 		wait_semaphore->count = -1;  /* we don't keep an actual count */
688 
689 		thread_set_pending_block_hint(self, kThreadWaitSemaphore);
690 		(void)waitq_assert_wait64_locked(
691 			&wait_semaphore->waitq,
692 			SEMAPHORE_EVENT,
693 			THREAD_ABORTSAFE,
694 			TIMEOUT_URGENCY_USER_NORMAL,
695 			deadline, TIMEOUT_NO_LEEWAY,
696 			self);
697 
698 		semaphore_signal_options |= SEMAPHORE_THREAD_HANDOFF;
699 	}
700 	semaphore_unlock(wait_semaphore);
701 	splx(spl_level);
702 
703 	/*
704 	 * wait_semaphore is unlocked so we are free to go ahead and
705 	 * signal the signal_semaphore (if one was provided).
706 	 */
707 	if (signal_semaphore != SEMAPHORE_NULL) {
708 		kern_return_t signal_kr;
709 
710 		/*
711 		 * lock the signal semaphore reference we got and signal it.
712 		 * This will NOT block (we cannot block after having asserted
713 		 * our intention to wait above).
714 		 */
715 		signal_kr = semaphore_signal_internal(signal_semaphore,
716 		    THREAD_NULL, semaphore_signal_options);
717 
718 		if (signal_kr == KERN_NOT_WAITING) {
719 			assert(self->handoff_thread == THREAD_NULL);
720 			signal_kr = KERN_SUCCESS;
721 		} else if (signal_kr == KERN_TERMINATED) {
722 			/*
723 			 * Uh!Oh!  The semaphore we were to signal died.
724 			 * We have to get ourselves out of the wait in
725 			 * case we get stuck here forever (it is assumed
726 			 * that the semaphore we were posting is gating
727 			 * the decision by someone else to post the
728 			 * semaphore we are waiting on).  People will
729 			 * discover the other dead semaphore soon enough.
730 			 * If we got out of the wait cleanly (someone
731 			 * already posted a wakeup to us) then return that
732 			 * (most important) result.  Otherwise,
733 			 * return the KERN_TERMINATED status.
734 			 */
735 			assert(self->handoff_thread == THREAD_NULL);
736 			clear_wait(self, THREAD_INTERRUPTED);
737 			kr = semaphore_convert_wait_result(self->wait_result);
738 			if (kr == KERN_ABORTED) {
739 				kr = KERN_TERMINATED;
740 			}
741 		}
742 	}
743 
744 	/*
745 	 * If we had an error, or we didn't really need to wait we can
746 	 * return now that we have signalled the signal semaphore.
747 	 */
748 	if (kr != KERN_ALREADY_WAITING) {
749 		assert(self->handoff_thread == THREAD_NULL);
750 		return kr;
751 	}
752 
753 	if (self->handoff_thread) {
754 		handoff_thread = self->handoff_thread;
755 		self->handoff_thread = THREAD_NULL;
756 		handoff_option = THREAD_HANDOFF_SETRUN_NEEDED;
757 	}
758 
759 	/*
760 	 * Now, we can block.  If the caller supplied a continuation
761 	 * pointer of his own for after the block, block with the
762 	 * appropriate semaphore continuation.  This will gather the
763 	 * semaphore results, release references on the semaphore(s),
764 	 * and then call the caller's continuation.
765 	 */
766 	if (caller_cont) {
767 		self->sth_continuation = caller_cont;
768 		self->sth_waitsemaphore = wait_semaphore;
769 		self->sth_signalsemaphore = signal_semaphore;
770 
771 		thread_handoff_parameter(handoff_thread, semaphore_wait_continue,
772 		    NULL, handoff_option);
773 	} else {
774 		wait_result = thread_handoff_deallocate(handoff_thread, handoff_option);
775 	}
776 
777 	assert(self->handoff_thread == THREAD_NULL);
778 	return semaphore_convert_wait_result(wait_result);
779 }
780 
781 
782 /*
783  *	Routine:	semaphore_wait
784  *
785  *	Traditional (non-continuation) interface presented to
786  *      in-kernel clients to wait on a semaphore.
787  */
788 kern_return_t
semaphore_wait(semaphore_t semaphore)789 semaphore_wait(
790 	semaphore_t             semaphore)
791 {
792 	if (semaphore == SEMAPHORE_NULL) {
793 		return KERN_INVALID_ARGUMENT;
794 	}
795 
796 	return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
797 	           0ULL, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL);
798 }
799 
800 kern_return_t
semaphore_wait_noblock(semaphore_t semaphore)801 semaphore_wait_noblock(
802 	semaphore_t             semaphore)
803 {
804 	if (semaphore == SEMAPHORE_NULL) {
805 		return KERN_INVALID_ARGUMENT;
806 	}
807 
808 	return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
809 	           0ULL, SEMAPHORE_TIMEOUT_NOBLOCK, SEMAPHORE_CONT_NULL);
810 }
811 
812 kern_return_t
semaphore_wait_deadline(semaphore_t semaphore,uint64_t deadline)813 semaphore_wait_deadline(
814 	semaphore_t             semaphore,
815 	uint64_t                deadline)
816 {
817 	if (semaphore == SEMAPHORE_NULL) {
818 		return KERN_INVALID_ARGUMENT;
819 	}
820 
821 	return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
822 	           deadline, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL);
823 }
824 
825 /*
826  *	Trap:	semaphore_wait_trap
827  *
828  *	Trap version of semaphore wait.  Called on behalf of user-level
829  *	clients.
830  */
831 
832 kern_return_t
semaphore_wait_trap(struct semaphore_wait_trap_args * args)833 semaphore_wait_trap(
834 	struct semaphore_wait_trap_args *args)
835 {
836 	return semaphore_wait_trap_internal(args->wait_name, thread_syscall_return);
837 }
838 
839 kern_return_t
semaphore_wait_trap_internal(mach_port_name_t name,semaphore_cont_t caller_cont)840 semaphore_wait_trap_internal(
841 	mach_port_name_t name,
842 	semaphore_cont_t caller_cont)
843 {
844 	semaphore_t   semaphore;
845 	kern_return_t kr;
846 
847 	kr = port_name_to_semaphore(name, &semaphore);
848 	if (kr == KERN_SUCCESS) {
849 		kr = semaphore_wait_internal(semaphore,
850 		    SEMAPHORE_NULL,
851 		    0ULL, SEMAPHORE_OPTION_NONE,
852 		    caller_cont);
853 		semaphore_dereference(semaphore);
854 	}
855 	return kr;
856 }
857 
858 /*
859  *	Routine:	semaphore_timedwait
860  *
861  *	Traditional (non-continuation) interface presented to
862  *      in-kernel clients to wait on a semaphore with a timeout.
863  *
864  *	A timeout of {0,0} is considered non-blocking.
865  */
866 kern_return_t
semaphore_timedwait(semaphore_t semaphore,mach_timespec_t wait_time)867 semaphore_timedwait(
868 	semaphore_t             semaphore,
869 	mach_timespec_t         wait_time)
870 {
871 	int      option = SEMAPHORE_OPTION_NONE;
872 	uint64_t deadline = 0;
873 
874 	if (semaphore == SEMAPHORE_NULL) {
875 		return KERN_INVALID_ARGUMENT;
876 	}
877 
878 	if (BAD_MACH_TIMESPEC(&wait_time)) {
879 		return KERN_INVALID_VALUE;
880 	}
881 
882 	if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) {
883 		option = SEMAPHORE_TIMEOUT_NOBLOCK;
884 	} else {
885 		deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
886 	}
887 
888 	return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
889 	           deadline, option, SEMAPHORE_CONT_NULL);
890 }
891 
892 /*
893  *	Trap:	semaphore_timedwait_trap
894  *
895  *	Trap version of a semaphore_timedwait.  The timeout parameter
896  *	is passed in two distinct parts and re-assembled on this side
897  *	of the trap interface (to accomodate calling conventions that
898  *	pass structures as pointers instead of inline in registers without
899  *	having to add a copyin).
900  *
901  *	A timeout of {0,0} is considered non-blocking.
902  */
903 kern_return_t
semaphore_timedwait_trap(struct semaphore_timedwait_trap_args * args)904 semaphore_timedwait_trap(
905 	struct semaphore_timedwait_trap_args *args)
906 {
907 	return semaphore_timedwait_trap_internal(args->wait_name,
908 	           args->sec, args->nsec, thread_syscall_return);
909 }
910 
911 
912 kern_return_t
semaphore_timedwait_trap_internal(mach_port_name_t name,unsigned int sec,clock_res_t nsec,semaphore_cont_t caller_cont)913 semaphore_timedwait_trap_internal(
914 	mach_port_name_t name,
915 	unsigned int     sec,
916 	clock_res_t      nsec,
917 	semaphore_cont_t caller_cont)
918 {
919 	semaphore_t semaphore;
920 	mach_timespec_t wait_time;
921 	kern_return_t kr;
922 
923 	wait_time.tv_sec = sec;
924 	wait_time.tv_nsec = nsec;
925 	if (BAD_MACH_TIMESPEC(&wait_time)) {
926 		return KERN_INVALID_VALUE;
927 	}
928 
929 	kr = port_name_to_semaphore(name, &semaphore);
930 	if (kr == KERN_SUCCESS) {
931 		int      option = SEMAPHORE_OPTION_NONE;
932 		uint64_t deadline = 0;
933 
934 		if (sec == 0 && nsec == 0) {
935 			option = SEMAPHORE_TIMEOUT_NOBLOCK;
936 		} else {
937 			deadline = semaphore_deadline(sec, nsec);
938 		}
939 
940 		kr = semaphore_wait_internal(semaphore,
941 		    SEMAPHORE_NULL,
942 		    deadline, option,
943 		    caller_cont);
944 		semaphore_dereference(semaphore);
945 	}
946 	return kr;
947 }
948 
949 /*
950  *	Routine:	semaphore_wait_signal
951  *
952  *	Atomically register a wait on a semaphore and THEN signal
953  *	another.  This is the in-kernel entry point that does not
954  *	block at a continuation and does not free a signal_semaphore
955  *      reference.
956  */
957 kern_return_t
semaphore_wait_signal(semaphore_t wait_semaphore,semaphore_t signal_semaphore)958 semaphore_wait_signal(
959 	semaphore_t             wait_semaphore,
960 	semaphore_t             signal_semaphore)
961 {
962 	if (wait_semaphore == SEMAPHORE_NULL) {
963 		return KERN_INVALID_ARGUMENT;
964 	}
965 
966 	return semaphore_wait_internal(wait_semaphore, signal_semaphore,
967 	           0ULL, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL);
968 }
969 
970 /*
971  *	Trap:	semaphore_wait_signal_trap
972  *
973  *	Atomically register a wait on a semaphore and THEN signal
974  *	another.  This is the trap version from user space.
975  */
976 kern_return_t
semaphore_wait_signal_trap(struct semaphore_wait_signal_trap_args * args)977 semaphore_wait_signal_trap(
978 	struct semaphore_wait_signal_trap_args *args)
979 {
980 	return semaphore_wait_signal_trap_internal(args->wait_name,
981 	           args->signal_name, thread_syscall_return);
982 }
983 
984 kern_return_t
semaphore_wait_signal_trap_internal(mach_port_name_t wait_name,mach_port_name_t signal_name,semaphore_cont_t caller_cont)985 semaphore_wait_signal_trap_internal(
986 	mach_port_name_t wait_name,
987 	mach_port_name_t signal_name,
988 	semaphore_cont_t caller_cont)
989 {
990 	semaphore_t wait_semaphore;
991 	semaphore_t signal_semaphore;
992 	kern_return_t kr;
993 
994 	kr = port_name_to_semaphore(signal_name, &signal_semaphore);
995 	if (kr == KERN_SUCCESS) {
996 		kr = port_name_to_semaphore(wait_name, &wait_semaphore);
997 		if (kr == KERN_SUCCESS) {
998 			kr = semaphore_wait_internal(wait_semaphore,
999 			    signal_semaphore,
1000 			    0ULL, SEMAPHORE_OPTION_NONE,
1001 			    caller_cont);
1002 			semaphore_dereference(wait_semaphore);
1003 		}
1004 		semaphore_dereference(signal_semaphore);
1005 	}
1006 	return kr;
1007 }
1008 
1009 
1010 /*
1011  *	Routine:	semaphore_timedwait_signal
1012  *
1013  *	Atomically register a wait on a semaphore and THEN signal
1014  *	another.  This is the in-kernel entry point that does not
1015  *	block at a continuation.
1016  *
1017  *	A timeout of {0,0} is considered non-blocking.
1018  */
1019 kern_return_t
semaphore_timedwait_signal(semaphore_t wait_semaphore,semaphore_t signal_semaphore,mach_timespec_t wait_time)1020 semaphore_timedwait_signal(
1021 	semaphore_t             wait_semaphore,
1022 	semaphore_t             signal_semaphore,
1023 	mach_timespec_t         wait_time)
1024 {
1025 	int      option = SEMAPHORE_OPTION_NONE;
1026 	uint64_t deadline = 0;
1027 
1028 	if (wait_semaphore == SEMAPHORE_NULL) {
1029 		return KERN_INVALID_ARGUMENT;
1030 	}
1031 
1032 	if (BAD_MACH_TIMESPEC(&wait_time)) {
1033 		return KERN_INVALID_VALUE;
1034 	}
1035 
1036 	if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) {
1037 		option = SEMAPHORE_TIMEOUT_NOBLOCK;
1038 	} else {
1039 		deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
1040 	}
1041 
1042 	return semaphore_wait_internal(wait_semaphore, signal_semaphore,
1043 	           deadline, option, SEMAPHORE_CONT_NULL);
1044 }
1045 
1046 /*
1047  *	Trap:	semaphore_timedwait_signal_trap
1048  *
1049  *	Atomically register a timed wait on a semaphore and THEN signal
1050  *	another.  This is the trap version from user space.
1051  */
1052 kern_return_t
semaphore_timedwait_signal_trap(struct semaphore_timedwait_signal_trap_args * args)1053 semaphore_timedwait_signal_trap(
1054 	struct semaphore_timedwait_signal_trap_args *args)
1055 {
1056 	return semaphore_timedwait_signal_trap_internal(args->wait_name,
1057 	           args->signal_name, args->sec, args->nsec, thread_syscall_return);
1058 }
1059 
1060 kern_return_t
semaphore_timedwait_signal_trap_internal(mach_port_name_t wait_name,mach_port_name_t signal_name,unsigned int sec,clock_res_t nsec,semaphore_cont_t caller_cont)1061 semaphore_timedwait_signal_trap_internal(
1062 	mach_port_name_t wait_name,
1063 	mach_port_name_t signal_name,
1064 	unsigned int sec,
1065 	clock_res_t nsec,
1066 	semaphore_cont_t caller_cont)
1067 {
1068 	semaphore_t wait_semaphore;
1069 	semaphore_t signal_semaphore;
1070 	mach_timespec_t wait_time;
1071 	kern_return_t kr;
1072 
1073 	wait_time.tv_sec = sec;
1074 	wait_time.tv_nsec = nsec;
1075 	if (BAD_MACH_TIMESPEC(&wait_time)) {
1076 		return KERN_INVALID_VALUE;
1077 	}
1078 
1079 	kr = port_name_to_semaphore(signal_name, &signal_semaphore);
1080 	if (kr == KERN_SUCCESS) {
1081 		kr = port_name_to_semaphore(wait_name, &wait_semaphore);
1082 		if (kr == KERN_SUCCESS) {
1083 			int      option = SEMAPHORE_OPTION_NONE;
1084 			uint64_t deadline = 0;
1085 
1086 			if (sec == 0 && nsec == 0) {
1087 				option = SEMAPHORE_TIMEOUT_NOBLOCK;
1088 			} else {
1089 				deadline = semaphore_deadline(sec, nsec);
1090 			}
1091 
1092 			kr = semaphore_wait_internal(wait_semaphore,
1093 			    signal_semaphore,
1094 			    deadline, option,
1095 			    caller_cont);
1096 			semaphore_dereference(wait_semaphore);
1097 		}
1098 		semaphore_dereference(signal_semaphore);
1099 	}
1100 	return kr;
1101 }
1102 
1103 
1104 /*
1105  *	Routine:	semaphore_reference
1106  *
1107  *	Take out a reference on a semaphore.  This keeps the data structure
1108  *	in existence (but the semaphore may be deactivated).
1109  */
1110 void
semaphore_reference(semaphore_t semaphore)1111 semaphore_reference(
1112 	semaphore_t             semaphore)
1113 {
1114 	zone_id_require(ZONE_ID_SEMAPHORE, sizeof(*semaphore), semaphore);
1115 	os_ref_retain_raw(&semaphore->ref_count, &sema_refgrp);
1116 }
1117 
1118 /*
1119  *	Routine:	semaphore_dereference
1120  *
1121  *	Release a reference on a semaphore.  If this is the last reference,
1122  *	the semaphore data structure is deallocated.
1123  */
1124 void
semaphore_dereference(semaphore_t semaphore)1125 semaphore_dereference(
1126 	semaphore_t             semaphore)
1127 {
1128 	if (semaphore == NULL) {
1129 		return;
1130 	}
1131 
1132 	if (os_ref_release_raw(&semaphore->ref_count, &sema_refgrp) == 0) {
1133 		return semaphore_free(semaphore);
1134 	}
1135 }
1136 
1137 void
kdp_sema_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_t * waitinfo)1138 kdp_sema_find_owner(struct waitq *waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
1139 {
1140 	semaphore_t sem = __container_of(waitq, struct semaphore, waitq);
1141 	assert(event == SEMAPHORE_EVENT);
1142 
1143 	zone_id_require(ZONE_ID_SEMAPHORE, sizeof(*sem), sem);
1144 
1145 	waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port);
1146 	if (sem->owner) {
1147 		waitinfo->owner = pid_from_task(sem->owner);
1148 	}
1149 }
1150 
1151 /*
1152  *	Routine:	port_name_to_semaphore
1153  *	Purpose:
1154  *		Convert from a port name in the current space to a semaphore.
1155  *		Produces a semaphore ref, which may be null.
1156  *	Conditions:
1157  *		Nothing locked.
1158  */
1159 kern_return_t
port_name_to_semaphore(mach_port_name_t name,semaphore_t * semaphorep)1160 port_name_to_semaphore(
1161 	mach_port_name_t        name,
1162 	semaphore_t             *semaphorep)
1163 {
1164 	ipc_port_t port;
1165 	kern_return_t kr;
1166 
1167 	if (!MACH_PORT_VALID(name)) {
1168 		*semaphorep = SEMAPHORE_NULL;
1169 		return KERN_INVALID_NAME;
1170 	}
1171 
1172 	kr = ipc_port_translate_send(current_space(), name, &port);
1173 	if (kr != KERN_SUCCESS) {
1174 		*semaphorep = SEMAPHORE_NULL;
1175 		return kr;
1176 	}
1177 	/* have the port locked */
1178 
1179 	*semaphorep = convert_port_to_semaphore(port);
1180 	if (*semaphorep == SEMAPHORE_NULL) {
1181 		/* the port is valid, but doesn't denote a semaphore */
1182 		kr = KERN_INVALID_CAPABILITY;
1183 	} else {
1184 		kr = KERN_SUCCESS;
1185 	}
1186 	ip_mq_unlock(port);
1187 
1188 	return kr;
1189 }
1190 
1191 /*
1192  *	Routine:	convert_port_to_semaphore
1193  *	Purpose:
1194  *		Convert from a port to a semaphore.
1195  *		Doesn't consume the port [send-right] ref;
1196  *		produces a semaphore ref, which may be null.
1197  *	Conditions:
1198  *		Caller has a send-right reference to port.
1199  *		Port may or may not be locked.
1200  */
1201 semaphore_t
convert_port_to_semaphore(ipc_port_t port)1202 convert_port_to_semaphore(ipc_port_t port)
1203 {
1204 	semaphore_t semaphore = SEMAPHORE_NULL;
1205 
1206 	if (IP_VALID(port)) {
1207 		semaphore = ipc_kobject_get_stable(port, IKOT_SEMAPHORE);
1208 		if (semaphore != SEMAPHORE_NULL) {
1209 			zone_id_require(ZONE_ID_SEMAPHORE,
1210 			    sizeof(struct semaphore), semaphore);
1211 			semaphore_reference(semaphore);
1212 		}
1213 	}
1214 
1215 	return semaphore;
1216 }
1217 
1218 
1219 /*
1220  *	Routine:	convert_semaphore_to_port
1221  *	Purpose:
1222  *		Convert a semaphore reference to a send right to a
1223  *		semaphore port.
1224  *
1225  *		Consumes the semaphore reference.  If the semaphore
1226  *		port currently has no send rights (or doesn't exist
1227  *		yet), the reference is donated to the port to represent
1228  *		all extant send rights collectively.
1229  */
1230 ipc_port_t
convert_semaphore_to_port(semaphore_t semaphore)1231 convert_semaphore_to_port(semaphore_t semaphore)
1232 {
1233 	if (semaphore == SEMAPHORE_NULL) {
1234 		return IP_NULL;
1235 	}
1236 
1237 	zone_id_require(ZONE_ID_SEMAPHORE, sizeof(struct semaphore), semaphore);
1238 
1239 	/*
1240 	 * make a send right and donate our reference for
1241 	 * semaphore_no_senders if this is the first send right
1242 	 */
1243 	if (!ipc_kobject_make_send_lazy_alloc_port(&semaphore->port,
1244 	    semaphore, IKOT_SEMAPHORE)) {
1245 		semaphore_dereference(semaphore);
1246 	}
1247 	return semaphore->port;
1248 }
1249 
1250 /*
1251  * Routine:	semaphore_no_senders
1252  * Purpose:
1253  *	Called whenever the Mach port system detects no-senders
1254  *	on the semaphore port.
1255  *
1256  *	When a send-right is first created, a no-senders
1257  *	notification is armed (and a semaphore reference is donated).
1258  *
1259  *	A no-senders notification will be posted when no one else holds a
1260  *	send-right (reference) to the semaphore's port. This notification function
1261  *	will consume the semaphore reference donated to the extant collection of
1262  *	send-rights.
1263  */
1264 static void
semaphore_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)1265 semaphore_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
1266 {
1267 	semaphore_t semaphore = ipc_kobject_get_stable(port, IKOT_SEMAPHORE);
1268 
1269 	assert(semaphore != SEMAPHORE_NULL);
1270 	assert(semaphore->port == port);
1271 
1272 	semaphore_dereference(semaphore);
1273 }
1274 
1275 IPC_KOBJECT_DEFINE(IKOT_SEMAPHORE,
1276     .iko_op_movable_send = true,
1277     .iko_op_stable     = true,
1278     .iko_op_no_senders = semaphore_no_senders);
1279