1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 *
31 */
32 /*
33 * File: kern/sync_sema.c
34 * Author: Joseph CaraDonna
35 *
36 * Contains RT distributed semaphore synchronization services.
37 */
38
39 #include <mach/mach_types.h>
40 #include <mach/mach_traps.h>
41 #include <mach/kern_return.h>
42 #include <mach/semaphore.h>
43 #include <mach/sync_policy.h>
44 #include <mach/task.h>
45
46 #include <kern/misc_protos.h>
47 #include <kern/sync_sema.h>
48 #include <kern/spl.h>
49 #include <kern/ipc_kobject.h>
50 #include <kern/ipc_tt.h>
51 #include <kern/thread.h>
52 #include <kern/clock.h>
53 #include <ipc/ipc_port.h>
54 #include <ipc/ipc_space.h>
55 #include <kern/host.h>
56 #include <kern/waitq.h>
57 #include <kern/zalloc.h>
58 #include <kern/mach_param.h>
59
60 static const uint8_t semaphore_event;
61 #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event)
62
63 static SECURITY_READ_ONLY_LATE(zone_t) semaphore_zone;
64 ZONE_INIT(&semaphore_zone, "semaphores", sizeof(struct semaphore),
65 #if CONFIG_WAITQ_IRQSAFE_ALLOW_INVALID
66 ZC_NOGZALLOC | ZC_KASAN_NOQUARANTINE | ZC_SEQUESTER |
67 #endif
68 ZC_ZFREE_CLEARMEM, ZONE_ID_SEMAPHORE, NULL);
69
70 os_refgrp_decl(static, sema_refgrp, "semaphore", NULL);
71
72 /* Forward declarations */
73
74 static inline bool
semaphore_active(semaphore_t semaphore)75 semaphore_active(semaphore_t semaphore)
76 {
77 return semaphore->owner != TASK_NULL;
78 }
79
80 static __inline__ uint64_t
semaphore_deadline(unsigned int sec,clock_res_t nsec)81 semaphore_deadline(
82 unsigned int sec,
83 clock_res_t nsec)
84 {
85 uint64_t abstime;
86
87 nanotime_to_absolutetime(sec, nsec, &abstime);
88 clock_absolutetime_interval_to_deadline(abstime, &abstime);
89
90 return abstime;
91 }
92
93 /*
94 * Routine: semaphore_create
95 *
96 * Creates a semaphore.
97 * The port representing the semaphore is returned as a parameter.
98 */
99 kern_return_t
semaphore_create(task_t task,semaphore_t * new_semaphore,int policy,int value)100 semaphore_create(
101 task_t task,
102 semaphore_t *new_semaphore,
103 int policy,
104 int value)
105 {
106 semaphore_t s = SEMAPHORE_NULL;
107
108 *new_semaphore = SEMAPHORE_NULL;
109 if (task == TASK_NULL || value < 0 || (policy & ~SYNC_POLICY_USER_MASK)) {
110 return KERN_INVALID_ARGUMENT;
111 }
112
113 s = zalloc_flags(semaphore_zone, Z_ZERO | Z_WAITOK | Z_NOFAIL);
114
115 /*
116 * Associate the new semaphore with the task by adding
117 * the new semaphore to the task's semaphore list.
118 */
119 task_lock(task);
120 /* Check for race with task_terminate */
121 if (!task->active) {
122 task_unlock(task);
123 zfree(semaphore_zone, s);
124 return KERN_INVALID_TASK;
125 }
126
127 waitq_init(&s->waitq, policy |
128 SYNC_POLICY_DISABLE_IRQ | SYNC_POLICY_INIT_LOCKED);
129
130 /* init everything under both the task and semaphore locks */
131 os_ref_init_raw(&s->ref_count, &sema_refgrp);
132 s->count = value;
133 s->owner = task;
134 enqueue_head(&task->semaphore_list, &s->task_link);
135 task->semaphores_owned++;
136
137 semaphore_unlock(s);
138
139 task_unlock(task);
140
141 *new_semaphore = s;
142
143 return KERN_SUCCESS;
144 }
145
146 /*
147 * Routine: semaphore_destroy_internal
148 *
149 * Disassociate a semaphore from its owning task, mark it inactive,
150 * and set any waiting threads running with THREAD_RESTART.
151 *
152 * Conditions:
153 * task is locked
154 * semaphore is owned by the specified task
155 * disabling interrupts (splsched) is the responsibility of the caller.
156 * Returns:
157 * with semaphore unlocked
158 */
159 static void
semaphore_destroy_internal(task_t task,semaphore_t semaphore,bool semaphore_locked)160 semaphore_destroy_internal(
161 task_t task,
162 semaphore_t semaphore,
163 bool semaphore_locked)
164 {
165 int old_count;
166
167 /* unlink semaphore from owning task */
168 assert(semaphore->owner == task);
169 remqueue(&semaphore->task_link);
170 task->semaphores_owned--;
171
172 /*
173 * deactivate semaphore under both locks
174 * and then wake up all waiters.
175 */
176 if (!semaphore_locked) {
177 semaphore_lock(semaphore);
178 }
179
180 semaphore->owner = TASK_NULL;
181 old_count = semaphore->count;
182 semaphore->count = 0;
183
184 if (old_count < 0) {
185 waitq_wakeup64_all_locked(&semaphore->waitq,
186 SEMAPHORE_EVENT,
187 THREAD_RESTART, NULL,
188 WAITQ_ALL_PRIORITIES,
189 WAITQ_UNLOCK);
190 /* waitq/semaphore is unlocked */
191 } else {
192 assert(queue_empty(&semaphore->waitq.waitq_queue));
193 semaphore_unlock(semaphore);
194 }
195 }
196
197 /*
198 * Routine: semaphore_free
199 *
200 * Free a semaphore that hit a 0 refcount.
201 *
202 * Conditions:
203 * Nothing is locked.
204 */
205 __attribute__((noinline))
206 static void
semaphore_free(semaphore_t semaphore)207 semaphore_free(
208 semaphore_t semaphore)
209 {
210 ipc_port_t port;
211 task_t task;
212
213 /*
214 * Last ref, clean up the port [if any]
215 * associated with the semaphore, destroy
216 * it (if still active) and then free
217 * the semaphore.
218 */
219 port = semaphore->port;
220 if (IP_VALID(port)) {
221 assert(!port->ip_srights);
222 ipc_kobject_dealloc_port(port, 0, IKOT_SEMAPHORE);
223 }
224
225 /*
226 * If the semaphore owned by the current task,
227 * we know the current task can't go away,
228 * so we can take locks in the right order.
229 *
230 * Else we try to take locks in the "wrong" order
231 * but if we fail to, we take a task ref and do it "right".
232 */
233 task = current_task();
234 if (semaphore->owner == task) {
235 task_lock(task);
236 if (semaphore->owner == task) {
237 spl_t s = splsched();
238 semaphore_destroy_internal(task, semaphore, false);
239 splx(s);
240 } else {
241 assert(semaphore->owner == TASK_NULL);
242 }
243 task_unlock(task);
244 } else {
245 spl_t s = splsched();
246
247 semaphore_lock(semaphore);
248
249 task = semaphore->owner;
250 if (task == TASK_NULL) {
251 semaphore_unlock(semaphore);
252 splx(s);
253 } else if (task_lock_try(task)) {
254 semaphore_destroy_internal(task, semaphore, true);
255 splx(s);
256 /* semaphore unlocked */
257 task_unlock(task);
258 } else {
259 task_reference(task);
260 semaphore_unlock(semaphore);
261 splx(s);
262
263 task_lock(task);
264 if (semaphore->owner == task) {
265 s = splsched();
266 semaphore_destroy_internal(task, semaphore, false);
267 splx(s);
268 }
269 task_unlock(task);
270
271 task_deallocate(task);
272 }
273 }
274
275 waitq_deinit(&semaphore->waitq);
276 zfree(semaphore_zone, semaphore);
277 }
278
279 /*
280 * Routine: semaphore_destroy
281 *
282 * Destroys a semaphore and consume the caller's reference on the
283 * semaphore.
284 */
285 kern_return_t
semaphore_destroy(task_t task,semaphore_t semaphore)286 semaphore_destroy(
287 task_t task,
288 semaphore_t semaphore)
289 {
290 if (semaphore == SEMAPHORE_NULL) {
291 return KERN_INVALID_ARGUMENT;
292 }
293
294 if (task == TASK_NULL) {
295 semaphore_dereference(semaphore);
296 return KERN_INVALID_ARGUMENT;
297 }
298
299 if (semaphore->owner == task) {
300 task_lock(task);
301 if (semaphore->owner == task) {
302 spl_t spl_level = splsched();
303 semaphore_destroy_internal(task, semaphore, false);
304 splx(spl_level);
305 }
306 task_unlock(task);
307 }
308
309 semaphore_dereference(semaphore);
310 return KERN_SUCCESS;
311 }
312
313 /*
314 * Routine: semaphore_destroy_all
315 *
316 * Destroy all the semaphores associated with a given task.
317 */
318 #define SEMASPERSPL 20 /* max number of semaphores to destroy per spl hold */
319
320 void
semaphore_destroy_all(task_t task)321 semaphore_destroy_all(
322 task_t task)
323 {
324 semaphore_t semaphore;
325 uint32_t count;
326 spl_t spl_level;
327
328 count = 0;
329 task_lock(task);
330
331 qe_foreach_element_safe(semaphore, &task->semaphore_list, task_link) {
332 if (count == 0) {
333 spl_level = splsched();
334 }
335
336 semaphore_destroy_internal(task, semaphore, false);
337
338 /* throttle number of semaphores per interrupt disablement */
339 if (++count == SEMASPERSPL) {
340 count = 0;
341 splx(spl_level);
342 }
343 }
344 if (count != 0) {
345 splx(spl_level);
346 }
347
348 task_unlock(task);
349 }
350
351 /*
352 * Routine: semaphore_signal_internal
353 *
354 * Signals the semaphore as direct.
355 * Assumptions:
356 * Semaphore is locked.
357 */
358 static kern_return_t
semaphore_signal_internal(semaphore_t semaphore,thread_t thread,int options)359 semaphore_signal_internal(
360 semaphore_t semaphore,
361 thread_t thread,
362 int options)
363 {
364 kern_return_t kr;
365 spl_t spl_level;
366
367 spl_level = splsched();
368 semaphore_lock(semaphore);
369
370 if (!semaphore_active(semaphore)) {
371 semaphore_unlock(semaphore);
372 splx(spl_level);
373 return KERN_TERMINATED;
374 }
375
376 if (thread != THREAD_NULL) {
377 if (semaphore->count < 0) {
378 kr = waitq_wakeup64_thread_locked(
379 &semaphore->waitq,
380 SEMAPHORE_EVENT,
381 thread,
382 THREAD_AWAKENED,
383 WAITQ_UNLOCK);
384 /* waitq/semaphore is unlocked */
385 } else {
386 kr = KERN_NOT_WAITING;
387 semaphore_unlock(semaphore);
388 }
389 splx(spl_level);
390 return kr;
391 }
392
393 if (options & SEMAPHORE_SIGNAL_ALL) {
394 int old_count = semaphore->count;
395
396 kr = KERN_NOT_WAITING;
397 if (old_count < 0) {
398 semaphore->count = 0; /* always reset */
399 kr = waitq_wakeup64_all_locked(
400 &semaphore->waitq,
401 SEMAPHORE_EVENT,
402 THREAD_AWAKENED, NULL,
403 WAITQ_ALL_PRIORITIES,
404 WAITQ_UNLOCK);
405 /* waitq / semaphore is unlocked */
406 } else {
407 if (options & SEMAPHORE_SIGNAL_PREPOST) {
408 semaphore->count++;
409 }
410 kr = KERN_SUCCESS;
411 semaphore_unlock(semaphore);
412 }
413 splx(spl_level);
414 return kr;
415 }
416
417 if (semaphore->count < 0) {
418 waitq_options_t wq_option = (options & SEMAPHORE_THREAD_HANDOFF) ?
419 WQ_OPTION_HANDOFF : WQ_OPTION_NONE;
420 kr = waitq_wakeup64_one_locked(
421 &semaphore->waitq,
422 SEMAPHORE_EVENT,
423 THREAD_AWAKENED, NULL,
424 WAITQ_ALL_PRIORITIES,
425 WAITQ_KEEP_LOCKED,
426 wq_option);
427 if (kr == KERN_SUCCESS) {
428 semaphore_unlock(semaphore);
429 splx(spl_level);
430 return KERN_SUCCESS;
431 } else {
432 semaphore->count = 0; /* all waiters gone */
433 }
434 }
435
436 if (options & SEMAPHORE_SIGNAL_PREPOST) {
437 semaphore->count++;
438 }
439
440 semaphore_unlock(semaphore);
441 splx(spl_level);
442 return KERN_NOT_WAITING;
443 }
444
445 /*
446 * Routine: semaphore_signal_thread
447 *
448 * If the specified thread is blocked on the semaphore, it is
449 * woken up. If a NULL thread was supplied, then any one
450 * thread is woken up. Otherwise the caller gets KERN_NOT_WAITING
451 * and the semaphore is unchanged.
452 */
453 kern_return_t
semaphore_signal_thread(semaphore_t semaphore,thread_t thread)454 semaphore_signal_thread(
455 semaphore_t semaphore,
456 thread_t thread)
457 {
458 if (semaphore == SEMAPHORE_NULL) {
459 return KERN_INVALID_ARGUMENT;
460 }
461
462 return semaphore_signal_internal(semaphore, thread,
463 SEMAPHORE_OPTION_NONE);
464 }
465
466 /*
467 * Routine: semaphore_signal_thread_trap
468 *
469 * Trap interface to the semaphore_signal_thread function.
470 */
471 kern_return_t
semaphore_signal_thread_trap(struct semaphore_signal_thread_trap_args * args)472 semaphore_signal_thread_trap(
473 struct semaphore_signal_thread_trap_args *args)
474 {
475 mach_port_name_t sema_name = args->signal_name;
476 mach_port_name_t thread_name = args->thread_name;
477 semaphore_t semaphore;
478 thread_t thread;
479 kern_return_t kr;
480
481 /*
482 * MACH_PORT_NULL is not an error. It means that we want to
483 * select any one thread that is already waiting, but not to
484 * pre-post the semaphore.
485 */
486 if (thread_name != MACH_PORT_NULL) {
487 thread = port_name_to_thread(thread_name, PORT_INTRANS_OPTIONS_NONE);
488 if (thread == THREAD_NULL) {
489 return KERN_INVALID_ARGUMENT;
490 }
491 } else {
492 thread = THREAD_NULL;
493 }
494
495 kr = port_name_to_semaphore(sema_name, &semaphore);
496 if (kr == KERN_SUCCESS) {
497 kr = semaphore_signal_internal(semaphore,
498 thread,
499 SEMAPHORE_OPTION_NONE);
500 semaphore_dereference(semaphore);
501 }
502 if (thread != THREAD_NULL) {
503 thread_deallocate(thread);
504 }
505 return kr;
506 }
507
508
509
510 /*
511 * Routine: semaphore_signal
512 *
513 * Traditional (in-kernel client and MIG interface) semaphore
514 * signal routine. Most users will access the trap version.
515 *
516 * This interface in not defined to return info about whether
517 * this call found a thread waiting or not. The internal
518 * routines (and future external routines) do. We have to
519 * convert those into plain KERN_SUCCESS returns.
520 */
521 kern_return_t
semaphore_signal(semaphore_t semaphore)522 semaphore_signal(
523 semaphore_t semaphore)
524 {
525 kern_return_t kr;
526
527 if (semaphore == SEMAPHORE_NULL) {
528 return KERN_INVALID_ARGUMENT;
529 }
530
531 kr = semaphore_signal_internal(semaphore,
532 THREAD_NULL,
533 SEMAPHORE_SIGNAL_PREPOST);
534 if (kr == KERN_NOT_WAITING) {
535 return KERN_SUCCESS;
536 }
537 return kr;
538 }
539
540 /*
541 * Routine: semaphore_signal_trap
542 *
543 * Trap interface to the semaphore_signal function.
544 */
545 kern_return_t
semaphore_signal_trap(struct semaphore_signal_trap_args * args)546 semaphore_signal_trap(
547 struct semaphore_signal_trap_args *args)
548 {
549 mach_port_name_t sema_name = args->signal_name;
550
551 return semaphore_signal_internal_trap(sema_name);
552 }
553
554 kern_return_t
semaphore_signal_internal_trap(mach_port_name_t sema_name)555 semaphore_signal_internal_trap(mach_port_name_t sema_name)
556 {
557 semaphore_t semaphore;
558 kern_return_t kr;
559
560 kr = port_name_to_semaphore(sema_name, &semaphore);
561 if (kr == KERN_SUCCESS) {
562 kr = semaphore_signal_internal(semaphore,
563 THREAD_NULL,
564 SEMAPHORE_SIGNAL_PREPOST);
565 semaphore_dereference(semaphore);
566 if (kr == KERN_NOT_WAITING) {
567 kr = KERN_SUCCESS;
568 }
569 }
570 return kr;
571 }
572
573 /*
574 * Routine: semaphore_signal_all
575 *
576 * Awakens ALL threads currently blocked on the semaphore.
577 * The semaphore count returns to zero.
578 */
579 kern_return_t
semaphore_signal_all(semaphore_t semaphore)580 semaphore_signal_all(
581 semaphore_t semaphore)
582 {
583 kern_return_t kr;
584
585 if (semaphore == SEMAPHORE_NULL) {
586 return KERN_INVALID_ARGUMENT;
587 }
588
589 kr = semaphore_signal_internal(semaphore,
590 THREAD_NULL,
591 SEMAPHORE_SIGNAL_ALL);
592 if (kr == KERN_NOT_WAITING) {
593 return KERN_SUCCESS;
594 }
595 return kr;
596 }
597
598 /*
599 * Routine: semaphore_signal_all_trap
600 *
601 * Trap interface to the semaphore_signal_all function.
602 */
603 kern_return_t
semaphore_signal_all_trap(struct semaphore_signal_all_trap_args * args)604 semaphore_signal_all_trap(
605 struct semaphore_signal_all_trap_args *args)
606 {
607 mach_port_name_t sema_name = args->signal_name;
608 semaphore_t semaphore;
609 kern_return_t kr;
610
611 kr = port_name_to_semaphore(sema_name, &semaphore);
612 if (kr == KERN_SUCCESS) {
613 kr = semaphore_signal_internal(semaphore,
614 THREAD_NULL,
615 SEMAPHORE_SIGNAL_ALL);
616 semaphore_dereference(semaphore);
617 if (kr == KERN_NOT_WAITING) {
618 kr = KERN_SUCCESS;
619 }
620 }
621 return kr;
622 }
623
624 /*
625 * Routine: semaphore_convert_wait_result
626 *
627 * Generate the return code after a semaphore wait/block. It
628 * takes the wait result as an input and coverts that to an
629 * appropriate result.
630 */
631 static kern_return_t
semaphore_convert_wait_result(int wait_result)632 semaphore_convert_wait_result(int wait_result)
633 {
634 switch (wait_result) {
635 case THREAD_AWAKENED:
636 return KERN_SUCCESS;
637
638 case THREAD_TIMED_OUT:
639 return KERN_OPERATION_TIMED_OUT;
640
641 case THREAD_INTERRUPTED:
642 return KERN_ABORTED;
643
644 case THREAD_RESTART:
645 return KERN_TERMINATED;
646
647 default:
648 panic("semaphore_block");
649 return KERN_FAILURE;
650 }
651 }
652
653 /*
654 * Routine: semaphore_wait_continue
655 *
656 * Common continuation routine after waiting on a semphore.
657 * It returns directly to user space.
658 */
659 static void
semaphore_wait_continue(void * arg __unused,wait_result_t wr)660 semaphore_wait_continue(void *arg __unused, wait_result_t wr)
661 {
662 thread_t self = current_thread();
663 semaphore_cont_t caller_cont = self->sth_continuation;
664
665 assert(self->sth_waitsemaphore != SEMAPHORE_NULL);
666 semaphore_dereference(self->sth_waitsemaphore);
667 if (self->sth_signalsemaphore != SEMAPHORE_NULL) {
668 semaphore_dereference(self->sth_signalsemaphore);
669 }
670
671 assert(self->handoff_thread == THREAD_NULL);
672 assert(caller_cont != NULL);
673 (*caller_cont)(semaphore_convert_wait_result(wr));
674 }
675
676 /*
677 * Routine: semaphore_wait_internal
678 *
679 * Decrements the semaphore count by one. If the count is
680 * negative after the decrement, the calling thread blocks
681 * (possibly at a continuation and/or with a timeout).
682 *
683 * Assumptions:
684 * The reference
685 * A reference is held on the signal semaphore.
686 */
687 static kern_return_t
semaphore_wait_internal(semaphore_t wait_semaphore,semaphore_t signal_semaphore,uint64_t deadline,int option,semaphore_cont_t caller_cont)688 semaphore_wait_internal(
689 semaphore_t wait_semaphore,
690 semaphore_t signal_semaphore,
691 uint64_t deadline,
692 int option,
693 semaphore_cont_t caller_cont)
694 {
695 int wait_result;
696 spl_t spl_level;
697 kern_return_t kr = KERN_ALREADY_WAITING;
698 thread_t self = current_thread();
699 thread_t handoff_thread = THREAD_NULL;
700 int semaphore_signal_options = SEMAPHORE_SIGNAL_PREPOST;
701 thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE;
702
703 spl_level = splsched();
704 semaphore_lock(wait_semaphore);
705
706 if (!semaphore_active(wait_semaphore)) {
707 kr = KERN_TERMINATED;
708 } else if (wait_semaphore->count > 0) {
709 wait_semaphore->count--;
710 kr = KERN_SUCCESS;
711 } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) {
712 kr = KERN_OPERATION_TIMED_OUT;
713 } else {
714 wait_semaphore->count = -1; /* we don't keep an actual count */
715
716 thread_set_pending_block_hint(self, kThreadWaitSemaphore);
717 (void)waitq_assert_wait64_locked(
718 &wait_semaphore->waitq,
719 SEMAPHORE_EVENT,
720 THREAD_ABORTSAFE,
721 TIMEOUT_URGENCY_USER_NORMAL,
722 deadline, TIMEOUT_NO_LEEWAY,
723 self);
724
725 semaphore_signal_options |= SEMAPHORE_THREAD_HANDOFF;
726 }
727 semaphore_unlock(wait_semaphore);
728 splx(spl_level);
729
730 /*
731 * wait_semaphore is unlocked so we are free to go ahead and
732 * signal the signal_semaphore (if one was provided).
733 */
734 if (signal_semaphore != SEMAPHORE_NULL) {
735 kern_return_t signal_kr;
736
737 /*
738 * lock the signal semaphore reference we got and signal it.
739 * This will NOT block (we cannot block after having asserted
740 * our intention to wait above).
741 */
742 signal_kr = semaphore_signal_internal(signal_semaphore,
743 THREAD_NULL, semaphore_signal_options);
744
745 if (signal_kr == KERN_NOT_WAITING) {
746 assert(self->handoff_thread == THREAD_NULL);
747 signal_kr = KERN_SUCCESS;
748 } else if (signal_kr == KERN_TERMINATED) {
749 /*
750 * Uh!Oh! The semaphore we were to signal died.
751 * We have to get ourselves out of the wait in
752 * case we get stuck here forever (it is assumed
753 * that the semaphore we were posting is gating
754 * the decision by someone else to post the
755 * semaphore we are waiting on). People will
756 * discover the other dead semaphore soon enough.
757 * If we got out of the wait cleanly (someone
758 * already posted a wakeup to us) then return that
759 * (most important) result. Otherwise,
760 * return the KERN_TERMINATED status.
761 */
762 assert(self->handoff_thread == THREAD_NULL);
763 clear_wait(self, THREAD_INTERRUPTED);
764 kr = semaphore_convert_wait_result(self->wait_result);
765 if (kr == KERN_ABORTED) {
766 kr = KERN_TERMINATED;
767 }
768 }
769 }
770
771 /*
772 * If we had an error, or we didn't really need to wait we can
773 * return now that we have signalled the signal semaphore.
774 */
775 if (kr != KERN_ALREADY_WAITING) {
776 assert(self->handoff_thread == THREAD_NULL);
777 return kr;
778 }
779
780 if (self->handoff_thread) {
781 handoff_thread = self->handoff_thread;
782 self->handoff_thread = THREAD_NULL;
783 handoff_option = THREAD_HANDOFF_SETRUN_NEEDED;
784 }
785
786 /*
787 * Now, we can block. If the caller supplied a continuation
788 * pointer of his own for after the block, block with the
789 * appropriate semaphore continuation. This will gather the
790 * semaphore results, release references on the semaphore(s),
791 * and then call the caller's continuation.
792 */
793 if (caller_cont) {
794 self->sth_continuation = caller_cont;
795 self->sth_waitsemaphore = wait_semaphore;
796 self->sth_signalsemaphore = signal_semaphore;
797
798 thread_handoff_parameter(handoff_thread, semaphore_wait_continue,
799 NULL, handoff_option);
800 } else {
801 wait_result = thread_handoff_deallocate(handoff_thread, handoff_option);
802 }
803
804 assert(self->handoff_thread == THREAD_NULL);
805 return semaphore_convert_wait_result(wait_result);
806 }
807
808
809 /*
810 * Routine: semaphore_wait
811 *
812 * Traditional (non-continuation) interface presented to
813 * in-kernel clients to wait on a semaphore.
814 */
815 kern_return_t
semaphore_wait(semaphore_t semaphore)816 semaphore_wait(
817 semaphore_t semaphore)
818 {
819 if (semaphore == SEMAPHORE_NULL) {
820 return KERN_INVALID_ARGUMENT;
821 }
822
823 return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
824 0ULL, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL);
825 }
826
827 kern_return_t
semaphore_wait_noblock(semaphore_t semaphore)828 semaphore_wait_noblock(
829 semaphore_t semaphore)
830 {
831 if (semaphore == SEMAPHORE_NULL) {
832 return KERN_INVALID_ARGUMENT;
833 }
834
835 return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
836 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK, SEMAPHORE_CONT_NULL);
837 }
838
839 kern_return_t
semaphore_wait_deadline(semaphore_t semaphore,uint64_t deadline)840 semaphore_wait_deadline(
841 semaphore_t semaphore,
842 uint64_t deadline)
843 {
844 if (semaphore == SEMAPHORE_NULL) {
845 return KERN_INVALID_ARGUMENT;
846 }
847
848 return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
849 deadline, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL);
850 }
851
852 /*
853 * Trap: semaphore_wait_trap
854 *
855 * Trap version of semaphore wait. Called on behalf of user-level
856 * clients.
857 */
858
859 kern_return_t
semaphore_wait_trap(struct semaphore_wait_trap_args * args)860 semaphore_wait_trap(
861 struct semaphore_wait_trap_args *args)
862 {
863 return semaphore_wait_trap_internal(args->wait_name, thread_syscall_return);
864 }
865
866 kern_return_t
semaphore_wait_trap_internal(mach_port_name_t name,semaphore_cont_t caller_cont)867 semaphore_wait_trap_internal(
868 mach_port_name_t name,
869 semaphore_cont_t caller_cont)
870 {
871 semaphore_t semaphore;
872 kern_return_t kr;
873
874 kr = port_name_to_semaphore(name, &semaphore);
875 if (kr == KERN_SUCCESS) {
876 kr = semaphore_wait_internal(semaphore,
877 SEMAPHORE_NULL,
878 0ULL, SEMAPHORE_OPTION_NONE,
879 caller_cont);
880 semaphore_dereference(semaphore);
881 }
882 return kr;
883 }
884
885 /*
886 * Routine: semaphore_timedwait
887 *
888 * Traditional (non-continuation) interface presented to
889 * in-kernel clients to wait on a semaphore with a timeout.
890 *
891 * A timeout of {0,0} is considered non-blocking.
892 */
893 kern_return_t
semaphore_timedwait(semaphore_t semaphore,mach_timespec_t wait_time)894 semaphore_timedwait(
895 semaphore_t semaphore,
896 mach_timespec_t wait_time)
897 {
898 int option = SEMAPHORE_OPTION_NONE;
899 uint64_t deadline = 0;
900
901 if (semaphore == SEMAPHORE_NULL) {
902 return KERN_INVALID_ARGUMENT;
903 }
904
905 if (BAD_MACH_TIMESPEC(&wait_time)) {
906 return KERN_INVALID_VALUE;
907 }
908
909 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) {
910 option = SEMAPHORE_TIMEOUT_NOBLOCK;
911 } else {
912 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
913 }
914
915 return semaphore_wait_internal(semaphore, SEMAPHORE_NULL,
916 deadline, option, SEMAPHORE_CONT_NULL);
917 }
918
919 /*
920 * Trap: semaphore_timedwait_trap
921 *
922 * Trap version of a semaphore_timedwait. The timeout parameter
923 * is passed in two distinct parts and re-assembled on this side
924 * of the trap interface (to accomodate calling conventions that
925 * pass structures as pointers instead of inline in registers without
926 * having to add a copyin).
927 *
928 * A timeout of {0,0} is considered non-blocking.
929 */
930 kern_return_t
semaphore_timedwait_trap(struct semaphore_timedwait_trap_args * args)931 semaphore_timedwait_trap(
932 struct semaphore_timedwait_trap_args *args)
933 {
934 return semaphore_timedwait_trap_internal(args->wait_name,
935 args->sec, args->nsec, thread_syscall_return);
936 }
937
938
939 kern_return_t
semaphore_timedwait_trap_internal(mach_port_name_t name,unsigned int sec,clock_res_t nsec,semaphore_cont_t caller_cont)940 semaphore_timedwait_trap_internal(
941 mach_port_name_t name,
942 unsigned int sec,
943 clock_res_t nsec,
944 semaphore_cont_t caller_cont)
945 {
946 semaphore_t semaphore;
947 mach_timespec_t wait_time;
948 kern_return_t kr;
949
950 wait_time.tv_sec = sec;
951 wait_time.tv_nsec = nsec;
952 if (BAD_MACH_TIMESPEC(&wait_time)) {
953 return KERN_INVALID_VALUE;
954 }
955
956 kr = port_name_to_semaphore(name, &semaphore);
957 if (kr == KERN_SUCCESS) {
958 int option = SEMAPHORE_OPTION_NONE;
959 uint64_t deadline = 0;
960
961 if (sec == 0 && nsec == 0) {
962 option = SEMAPHORE_TIMEOUT_NOBLOCK;
963 } else {
964 deadline = semaphore_deadline(sec, nsec);
965 }
966
967 kr = semaphore_wait_internal(semaphore,
968 SEMAPHORE_NULL,
969 deadline, option,
970 caller_cont);
971 semaphore_dereference(semaphore);
972 }
973 return kr;
974 }
975
976 /*
977 * Routine: semaphore_wait_signal
978 *
979 * Atomically register a wait on a semaphore and THEN signal
980 * another. This is the in-kernel entry point that does not
981 * block at a continuation and does not free a signal_semaphore
982 * reference.
983 */
984 kern_return_t
semaphore_wait_signal(semaphore_t wait_semaphore,semaphore_t signal_semaphore)985 semaphore_wait_signal(
986 semaphore_t wait_semaphore,
987 semaphore_t signal_semaphore)
988 {
989 if (wait_semaphore == SEMAPHORE_NULL) {
990 return KERN_INVALID_ARGUMENT;
991 }
992
993 return semaphore_wait_internal(wait_semaphore, signal_semaphore,
994 0ULL, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL);
995 }
996
997 /*
998 * Trap: semaphore_wait_signal_trap
999 *
1000 * Atomically register a wait on a semaphore and THEN signal
1001 * another. This is the trap version from user space.
1002 */
1003 kern_return_t
semaphore_wait_signal_trap(struct semaphore_wait_signal_trap_args * args)1004 semaphore_wait_signal_trap(
1005 struct semaphore_wait_signal_trap_args *args)
1006 {
1007 return semaphore_wait_signal_trap_internal(args->wait_name,
1008 args->signal_name, thread_syscall_return);
1009 }
1010
1011 kern_return_t
semaphore_wait_signal_trap_internal(mach_port_name_t wait_name,mach_port_name_t signal_name,semaphore_cont_t caller_cont)1012 semaphore_wait_signal_trap_internal(
1013 mach_port_name_t wait_name,
1014 mach_port_name_t signal_name,
1015 semaphore_cont_t caller_cont)
1016 {
1017 semaphore_t wait_semaphore;
1018 semaphore_t signal_semaphore;
1019 kern_return_t kr;
1020
1021 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
1022 if (kr == KERN_SUCCESS) {
1023 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
1024 if (kr == KERN_SUCCESS) {
1025 kr = semaphore_wait_internal(wait_semaphore,
1026 signal_semaphore,
1027 0ULL, SEMAPHORE_OPTION_NONE,
1028 caller_cont);
1029 semaphore_dereference(wait_semaphore);
1030 }
1031 semaphore_dereference(signal_semaphore);
1032 }
1033 return kr;
1034 }
1035
1036
1037 /*
1038 * Routine: semaphore_timedwait_signal
1039 *
1040 * Atomically register a wait on a semaphore and THEN signal
1041 * another. This is the in-kernel entry point that does not
1042 * block at a continuation.
1043 *
1044 * A timeout of {0,0} is considered non-blocking.
1045 */
1046 kern_return_t
semaphore_timedwait_signal(semaphore_t wait_semaphore,semaphore_t signal_semaphore,mach_timespec_t wait_time)1047 semaphore_timedwait_signal(
1048 semaphore_t wait_semaphore,
1049 semaphore_t signal_semaphore,
1050 mach_timespec_t wait_time)
1051 {
1052 int option = SEMAPHORE_OPTION_NONE;
1053 uint64_t deadline = 0;
1054
1055 if (wait_semaphore == SEMAPHORE_NULL) {
1056 return KERN_INVALID_ARGUMENT;
1057 }
1058
1059 if (BAD_MACH_TIMESPEC(&wait_time)) {
1060 return KERN_INVALID_VALUE;
1061 }
1062
1063 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) {
1064 option = SEMAPHORE_TIMEOUT_NOBLOCK;
1065 } else {
1066 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
1067 }
1068
1069 return semaphore_wait_internal(wait_semaphore, signal_semaphore,
1070 deadline, option, SEMAPHORE_CONT_NULL);
1071 }
1072
1073 /*
1074 * Trap: semaphore_timedwait_signal_trap
1075 *
1076 * Atomically register a timed wait on a semaphore and THEN signal
1077 * another. This is the trap version from user space.
1078 */
1079 kern_return_t
semaphore_timedwait_signal_trap(struct semaphore_timedwait_signal_trap_args * args)1080 semaphore_timedwait_signal_trap(
1081 struct semaphore_timedwait_signal_trap_args *args)
1082 {
1083 return semaphore_timedwait_signal_trap_internal(args->wait_name,
1084 args->signal_name, args->sec, args->nsec, thread_syscall_return);
1085 }
1086
1087 kern_return_t
semaphore_timedwait_signal_trap_internal(mach_port_name_t wait_name,mach_port_name_t signal_name,unsigned int sec,clock_res_t nsec,semaphore_cont_t caller_cont)1088 semaphore_timedwait_signal_trap_internal(
1089 mach_port_name_t wait_name,
1090 mach_port_name_t signal_name,
1091 unsigned int sec,
1092 clock_res_t nsec,
1093 semaphore_cont_t caller_cont)
1094 {
1095 semaphore_t wait_semaphore;
1096 semaphore_t signal_semaphore;
1097 mach_timespec_t wait_time;
1098 kern_return_t kr;
1099
1100 wait_time.tv_sec = sec;
1101 wait_time.tv_nsec = nsec;
1102 if (BAD_MACH_TIMESPEC(&wait_time)) {
1103 return KERN_INVALID_VALUE;
1104 }
1105
1106 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
1107 if (kr == KERN_SUCCESS) {
1108 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
1109 if (kr == KERN_SUCCESS) {
1110 int option = SEMAPHORE_OPTION_NONE;
1111 uint64_t deadline = 0;
1112
1113 if (sec == 0 && nsec == 0) {
1114 option = SEMAPHORE_TIMEOUT_NOBLOCK;
1115 } else {
1116 deadline = semaphore_deadline(sec, nsec);
1117 }
1118
1119 kr = semaphore_wait_internal(wait_semaphore,
1120 signal_semaphore,
1121 deadline, option,
1122 caller_cont);
1123 semaphore_dereference(wait_semaphore);
1124 }
1125 semaphore_dereference(signal_semaphore);
1126 }
1127 return kr;
1128 }
1129
1130
1131 /*
1132 * Routine: semaphore_reference
1133 *
1134 * Take out a reference on a semaphore. This keeps the data structure
1135 * in existence (but the semaphore may be deactivated).
1136 */
1137 void
semaphore_reference(semaphore_t semaphore)1138 semaphore_reference(
1139 semaphore_t semaphore)
1140 {
1141 zone_id_require(ZONE_ID_SEMAPHORE, sizeof(*semaphore), semaphore);
1142 os_ref_retain_raw(&semaphore->ref_count, &sema_refgrp);
1143 }
1144
1145 /*
1146 * Routine: semaphore_dereference
1147 *
1148 * Release a reference on a semaphore. If this is the last reference,
1149 * the semaphore data structure is deallocated.
1150 */
1151 void
semaphore_dereference(semaphore_t semaphore)1152 semaphore_dereference(
1153 semaphore_t semaphore)
1154 {
1155 if (semaphore == NULL) {
1156 return;
1157 }
1158
1159 if (os_ref_release_raw(&semaphore->ref_count, &sema_refgrp) == 0) {
1160 return semaphore_free(semaphore);
1161 }
1162 }
1163
1164 void
kdp_sema_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_t * waitinfo)1165 kdp_sema_find_owner(struct waitq *waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
1166 {
1167 semaphore_t sem = __container_of(waitq, struct semaphore, waitq);
1168 assert(event == SEMAPHORE_EVENT);
1169
1170 zone_id_require(ZONE_ID_SEMAPHORE, sizeof(*sem), sem);
1171
1172 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port);
1173 if (sem->owner) {
1174 waitinfo->owner = pid_from_task(sem->owner);
1175 }
1176 }
1177
1178 /*
1179 * Routine: port_name_to_semaphore
1180 * Purpose:
1181 * Convert from a port name in the current space to a semaphore.
1182 * Produces a semaphore ref, which may be null.
1183 * Conditions:
1184 * Nothing locked.
1185 */
1186 kern_return_t
port_name_to_semaphore(mach_port_name_t name,semaphore_t * semaphorep)1187 port_name_to_semaphore(
1188 mach_port_name_t name,
1189 semaphore_t *semaphorep)
1190 {
1191 ipc_port_t port;
1192 kern_return_t kr;
1193
1194 if (!MACH_PORT_VALID(name)) {
1195 *semaphorep = SEMAPHORE_NULL;
1196 return KERN_INVALID_NAME;
1197 }
1198
1199 kr = ipc_port_translate_send(current_space(), name, &port);
1200 if (kr != KERN_SUCCESS) {
1201 *semaphorep = SEMAPHORE_NULL;
1202 return kr;
1203 }
1204 /* have the port locked */
1205
1206 *semaphorep = convert_port_to_semaphore(port);
1207 if (*semaphorep == SEMAPHORE_NULL) {
1208 /* the port is valid, but doesn't denote a semaphore */
1209 kr = KERN_INVALID_CAPABILITY;
1210 } else {
1211 kr = KERN_SUCCESS;
1212 }
1213 ip_mq_unlock(port);
1214
1215 return kr;
1216 }
1217
1218 /*
1219 * Routine: convert_port_to_semaphore
1220 * Purpose:
1221 * Convert from a port to a semaphore.
1222 * Doesn't consume the port [send-right] ref;
1223 * produces a semaphore ref, which may be null.
1224 * Conditions:
1225 * Caller has a send-right reference to port.
1226 * Port may or may not be locked.
1227 */
1228 semaphore_t
convert_port_to_semaphore(ipc_port_t port)1229 convert_port_to_semaphore(ipc_port_t port)
1230 {
1231 semaphore_t semaphore = SEMAPHORE_NULL;
1232
1233 if (IP_VALID(port)) {
1234 semaphore = ipc_kobject_get_stable(port, IKOT_SEMAPHORE);
1235 if (semaphore != SEMAPHORE_NULL) {
1236 semaphore_reference(semaphore);
1237 }
1238 }
1239
1240 return semaphore;
1241 }
1242
1243
1244 /*
1245 * Routine: convert_semaphore_to_port
1246 * Purpose:
1247 * Convert a semaphore reference to a send right to a
1248 * semaphore port.
1249 *
1250 * Consumes the semaphore reference. If the semaphore
1251 * port currently has no send rights (or doesn't exist
1252 * yet), the reference is donated to the port to represent
1253 * all extant send rights collectively.
1254 */
1255 ipc_port_t
convert_semaphore_to_port(semaphore_t semaphore)1256 convert_semaphore_to_port(semaphore_t semaphore)
1257 {
1258 if (semaphore == SEMAPHORE_NULL) {
1259 return IP_NULL;
1260 }
1261
1262 /*
1263 * make a send right and donate our reference for
1264 * semaphore_no_senders if this is the first send right
1265 */
1266 if (!ipc_kobject_make_send_lazy_alloc_port(&semaphore->port,
1267 semaphore, IKOT_SEMAPHORE, IPC_KOBJECT_ALLOC_NONE, 0)) {
1268 semaphore_dereference(semaphore);
1269 }
1270 return semaphore->port;
1271 }
1272
1273 /*
1274 * Routine: semaphore_no_senders
1275 * Purpose:
1276 * Called whenever the Mach port system detects no-senders
1277 * on the semaphore port.
1278 *
1279 * When a send-right is first created, a no-senders
1280 * notification is armed (and a semaphore reference is donated).
1281 *
1282 * A no-senders notification will be posted when no one else holds a
1283 * send-right (reference) to the semaphore's port. This notification function
1284 * will consume the semaphore reference donated to the extant collection of
1285 * send-rights.
1286 */
1287 static void
semaphore_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)1288 semaphore_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
1289 {
1290 semaphore_t semaphore = ipc_kobject_get_stable(port, IKOT_SEMAPHORE);
1291
1292 assert(semaphore != SEMAPHORE_NULL);
1293 assert(semaphore->port == port);
1294
1295 semaphore_dereference(semaphore);
1296 }
1297
1298 IPC_KOBJECT_DEFINE(IKOT_SEMAPHORE,
1299 .iko_op_stable = true,
1300 .iko_op_no_senders = semaphore_no_senders);
1301