1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/mach_traps.h>
31 #include <mach/kern_return.h>
32 #include <mach/sync_policy.h>
33 #include <mach/task.h>
34
35 #include <kern/misc_protos.h>
36 #include <kern/spl.h>
37 #include <kern/ipc_tt.h>
38 #include <kern/thread.h>
39 #include <kern/clock.h>
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
42 #include <ipc/ipc_eventlink.h>
43 #include <kern/host.h>
44 #include <kern/waitq.h>
45 #include <kern/zalloc.h>
46 #include <kern/mach_param.h>
47 #include <mach/mach_traps.h>
48 #include <mach/mach_eventlink_server.h>
49
50 #include <libkern/OSAtomic.h>
51
52 static ZONE_DEFINE_TYPE(ipc_eventlink_zone, "ipc_eventlink",
53 struct ipc_eventlink_base, ZC_ZFREE_CLEARMEM);
54
55 os_refgrp_decl(static, ipc_eventlink_refgrp, "eventlink", NULL);
56
57 #if DEVELOPMENT || DEBUG
58 static queue_head_t ipc_eventlink_list = QUEUE_HEAD_INITIALIZER(ipc_eventlink_list);
59 static LCK_GRP_DECLARE(ipc_eventlink_dev_lock_grp, "ipc_eventlink_dev_lock");
60 static LCK_SPIN_DECLARE(global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp);
61
62 #define global_ipc_eventlink_lock() \
63 lck_spin_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp)
64 #define global_ipc_eventlink_lock_try() \
65 lck_spin_try_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp)
66 #define global_ipc_eventlink_unlock() \
67 lck_spin_unlock(&global_ipc_eventlink_lock)
68
69 #endif /* DEVELOPMENT || DEBUG */
70
71 /* Forward declarations */
72 static void
73 ipc_eventlink_no_senders(
74 ipc_port_t port,
75 mach_port_mscount_t mscount);
76
77 static struct ipc_eventlink_base *
78 ipc_eventlink_alloc(void);
79
80 static void
81 ipc_eventlink_initialize(
82 struct ipc_eventlink_base *ipc_eventlink_base);
83
84 static kern_return_t
85 ipc_eventlink_destroy_internal(
86 struct ipc_eventlink *ipc_eventlink);
87
88 static kern_return_t
89 ipc_eventlink_signal(
90 struct ipc_eventlink *ipc_eventlink);
91
92 static uint64_t
93 ipc_eventlink_signal_wait_until_trap_internal(
94 mach_port_name_t wait_port,
95 mach_port_name_t signal_port,
96 uint64_t count,
97 mach_eventlink_signal_wait_option_t el_option,
98 kern_clock_id_t clock_id,
99 uint64_t deadline);
100
101 static kern_return_t
102 ipc_eventlink_signal_wait_internal(
103 struct ipc_eventlink *wait_eventlink,
104 struct ipc_eventlink *signal_eventlink,
105 uint64_t deadline,
106 uint64_t *count,
107 ipc_eventlink_option_t eventlink_option);
108
109 static kern_return_t
110 ipc_eventlink_convert_wait_result(int wait_result);
111
112 static kern_return_t
113 ipc_eventlink_signal_internal_locked(
114 struct ipc_eventlink *signal_eventlink,
115 ipc_eventlink_option_t eventlink_option);
116
117 static kern_return_t
118 convert_port_to_eventlink_locked(
119 ipc_port_t port,
120 struct ipc_eventlink **ipc_eventlink_ptr);
121
122 static kern_return_t
123 port_name_to_eventlink(
124 mach_port_name_t name,
125 struct ipc_eventlink **ipc_eventlink_ptr);
126
127 IPC_KOBJECT_DEFINE(IKOT_EVENTLINK,
128 .iko_op_no_senders = ipc_eventlink_no_senders);
129
130 /*
131 * Name: ipc_eventlink_alloc
132 *
133 * Description: Allocates an ipc_eventlink struct and initializes it.
134 *
135 * Args: None.
136 *
137 * Returns:
138 * ipc_eventlink_base on Success.
139 */
140 static struct ipc_eventlink_base *
ipc_eventlink_alloc(void)141 ipc_eventlink_alloc(void)
142 {
143 struct ipc_eventlink_base *ipc_eventlink_base = IPC_EVENTLINK_BASE_NULL;
144 ipc_eventlink_base = zalloc(ipc_eventlink_zone);
145
146 ipc_eventlink_initialize(ipc_eventlink_base);
147
148 #if DEVELOPMENT || DEBUG
149 /* Add ipc_eventlink to global list */
150 global_ipc_eventlink_lock();
151 queue_enter(&ipc_eventlink_list, ipc_eventlink_base,
152 struct ipc_eventlink_base *, elb_global_elm);
153 global_ipc_eventlink_unlock();
154 #endif
155 return ipc_eventlink_base;
156 }
157
158 /*
159 * Name: ipc_eventlink_initialize
160 *
161 * Description: Initializes ipc eventlink struct.
162 *
163 * Args: ipc eventlink base.
164 *
165 * Returns:
166 * KERN_SUCCESS on Success.
167 */
168 static void
ipc_eventlink_initialize(struct ipc_eventlink_base * ipc_eventlink_base)169 ipc_eventlink_initialize(
170 struct ipc_eventlink_base *ipc_eventlink_base)
171 {
172 /* Initialize the count to 2, refs for each ipc eventlink port */
173 os_ref_init_count(&ipc_eventlink_base->elb_ref_count, &ipc_eventlink_refgrp, 2);
174 ipc_eventlink_base->elb_type = IPC_EVENTLINK_TYPE_NO_COPYIN;
175
176 for (int i = 0; i < 2; i++) {
177 struct ipc_eventlink *ipc_eventlink = &(ipc_eventlink_base->elb_eventlink[i]);
178
179 ipc_eventlink->el_port = ipc_kobject_alloc_port((ipc_kobject_t)ipc_eventlink,
180 IKOT_EVENTLINK, IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
181 /* ipc_kobject_alloc_port never fails */
182 ipc_eventlink->el_thread = THREAD_NULL;
183 ipc_eventlink->el_sync_counter = 0;
184 ipc_eventlink->el_wait_counter = UINT64_MAX;
185 ipc_eventlink->el_base = ipc_eventlink_base;
186 }
187
188 /* Must be done last */
189 waitq_init(&ipc_eventlink_base->elb_waitq, WQT_QUEUE, SYNC_POLICY_FIFO);
190 }
191
192 /*
193 * Name: mach_eventlink_create
194 *
195 * Description: Allocates an ipc_eventlink struct and initializes it.
196 *
197 * Args:
198 * task : task port of the process
199 * mach_eventlink_create_option_t: option
200 * eventlink_port_pair: eventlink port array
201 *
202 * Returns:
203 * KERN_SUCCESS on Success.
204 */
205 kern_return_t
mach_eventlink_create(task_t task,mach_eventlink_create_option_t elc_option,eventlink_port_pair_t eventlink_port_pair)206 mach_eventlink_create(
207 task_t task,
208 mach_eventlink_create_option_t elc_option,
209 eventlink_port_pair_t eventlink_port_pair)
210 {
211 int i;
212 struct ipc_eventlink_base *ipc_eventlink_base;
213
214 if (task == TASK_NULL || task != current_task() ||
215 elc_option != MELC_OPTION_NO_COPYIN) {
216 return KERN_INVALID_ARGUMENT;
217 }
218
219 ipc_eventlink_base = ipc_eventlink_alloc();
220
221 for (i = 0; i < 2; i++) {
222 eventlink_port_pair[i] = ipc_eventlink_base->elb_eventlink[i].el_port;
223 }
224
225 return KERN_SUCCESS;
226 }
227
228 /*
229 * Name: mach_eventlink_destroy
230 *
231 * Description: Destroy an ipc_eventlink, wakeup all threads.
232 *
233 * Args:
234 * eventlink: eventlink
235 *
236 * Returns:
237 * KERN_SUCCESS on Success.
238 */
239 kern_return_t
mach_eventlink_destroy(struct ipc_eventlink * ipc_eventlink)240 mach_eventlink_destroy(
241 struct ipc_eventlink *ipc_eventlink)
242 {
243 ipc_eventlink_destroy_internal(ipc_eventlink);
244
245 /* mach_eventlink_destroy should succeed for terminated eventlink */
246 return KERN_SUCCESS;
247 }
248
249 /*
250 * Name: ipc_eventlink_destroy_internal
251 *
252 * Description: Destroy an ipc_eventlink, wakeup all threads.
253 *
254 * Args:
255 * eventlink: eventlink
256 *
257 * Returns:
258 * KERN_SUCCESS on Success.
259 */
260 static kern_return_t
ipc_eventlink_destroy_internal(struct ipc_eventlink * ipc_eventlink)261 ipc_eventlink_destroy_internal(
262 struct ipc_eventlink *ipc_eventlink)
263 {
264 spl_t s;
265 struct ipc_eventlink_base *ipc_eventlink_base;
266 thread_t associated_thread[2] = {};
267 ipc_port_t ipc_eventlink_port = IPC_PORT_NULL;
268 ipc_port_t ipc_eventlink_port_remote = IPC_PORT_NULL;
269
270 if (ipc_eventlink == IPC_EVENTLINK_NULL) {
271 return KERN_TERMINATED;
272 }
273
274 s = splsched();
275 ipc_eventlink_lock(ipc_eventlink);
276
277 ipc_eventlink_base = ipc_eventlink->el_base;
278
279 /* Check if the eventlink is active */
280 if (!ipc_eventlink_active(ipc_eventlink)) {
281 ipc_eventlink_unlock(ipc_eventlink);
282 splx(s);
283 return KERN_TERMINATED;
284 }
285
286 for (int i = 0; i < 2; i++) {
287 struct ipc_eventlink *temp_ipc_eventlink = &ipc_eventlink_base->elb_eventlink[i];
288
289 /* Wakeup threads sleeping on eventlink */
290 if (temp_ipc_eventlink->el_thread) {
291 associated_thread[i] = temp_ipc_eventlink->el_thread;
292 temp_ipc_eventlink->el_thread = THREAD_NULL;
293
294 ipc_eventlink_signal_internal_locked(temp_ipc_eventlink,
295 IPC_EVENTLINK_FORCE_WAKEUP);
296 }
297
298 /* Only destroy the port on which destroy was called */
299 if (temp_ipc_eventlink == ipc_eventlink) {
300 ipc_eventlink_port = temp_ipc_eventlink->el_port;
301 assert(ipc_eventlink_port != IPC_PORT_NULL);
302 } else {
303 /* Do not destory the remote port, else eventlink_destroy will fail */
304 ipc_eventlink_port_remote = temp_ipc_eventlink->el_port;
305 assert(ipc_eventlink_port_remote != IPC_PORT_NULL);
306 /*
307 * Take a reference on the remote port, since it could go
308 * away after eventlink lock is dropped.
309 */
310 ip_reference(ipc_eventlink_port_remote);
311 }
312 assert(temp_ipc_eventlink->el_port != IPC_PORT_NULL);
313 temp_ipc_eventlink->el_port = IPC_PORT_NULL;
314 }
315
316 /* Mark the eventlink as inactive */
317 waitq_invalidate(&ipc_eventlink_base->elb_waitq);
318
319 ipc_eventlink_unlock(ipc_eventlink);
320 splx(s);
321
322 /* Destroy the local eventlink port */
323 ipc_kobject_dealloc_port(ipc_eventlink_port, 0, IKOT_EVENTLINK);
324 /* Drops port reference */
325
326 /* Clear the remote eventlink port without destroying it */
327 (void)ipc_kobject_disable(ipc_eventlink_port_remote, IKOT_EVENTLINK);
328 ip_release(ipc_eventlink_port_remote);
329
330 for (int i = 0; i < 2; i++) {
331 if (associated_thread[i] != THREAD_NULL &&
332 associated_thread[i] != THREAD_ASSOCIATE_WILD) {
333 thread_deallocate(associated_thread[i]);
334 }
335
336 /* Drop the eventlink reference given to port */
337 ipc_eventlink_deallocate(ipc_eventlink);
338 }
339 return KERN_SUCCESS;
340 }
341
342 /*
343 * Name: mach_eventlink_associate
344 *
345 * Description: Associate a thread to eventlink.
346 *
347 * Args:
348 * eventlink: eventlink
349 * thread: thread needs to be associated
350 * copyin_addr_wait: copyin addr for wait
351 * copyin_mask_wait: copyin mask for wait
352 * copyin_addr_signal: copyin addr for signal
353 * copyin_mask_signal: copyin mask for signal
354 * mach_eventlink_associate_option_t: option for eventlink associate
355 *
356 * Returns:
357 * KERN_SUCCESS on Success.
358 */
359 kern_return_t
mach_eventlink_associate(struct ipc_eventlink * ipc_eventlink,thread_t thread,mach_vm_address_t copyin_addr_wait,uint64_t copyin_mask_wait,mach_vm_address_t copyin_addr_signal,uint64_t copyin_mask_signal,mach_eventlink_associate_option_t ela_option)360 mach_eventlink_associate(
361 struct ipc_eventlink *ipc_eventlink,
362 thread_t thread,
363 mach_vm_address_t copyin_addr_wait,
364 uint64_t copyin_mask_wait,
365 mach_vm_address_t copyin_addr_signal,
366 uint64_t copyin_mask_signal,
367 mach_eventlink_associate_option_t ela_option)
368 {
369 spl_t s;
370
371 if (ipc_eventlink == IPC_EVENTLINK_NULL) {
372 return KERN_TERMINATED;
373 }
374
375 if (copyin_addr_wait != 0 || copyin_mask_wait != 0 ||
376 copyin_addr_signal != 0 || copyin_mask_signal != 0) {
377 return KERN_INVALID_ARGUMENT;
378 }
379
380 if ((thread == NULL && ela_option == MELA_OPTION_NONE) ||
381 (thread != NULL && ela_option == MELA_OPTION_ASSOCIATE_ON_WAIT)) {
382 return KERN_INVALID_ARGUMENT;
383 }
384
385 s = splsched();
386 ipc_eventlink_lock(ipc_eventlink);
387
388 /* Check if eventlink is terminated */
389 if (!ipc_eventlink_active(ipc_eventlink)) {
390 ipc_eventlink_unlock(ipc_eventlink);
391 splx(s);
392 return KERN_TERMINATED;
393 }
394
395 if (ipc_eventlink->el_thread != NULL) {
396 ipc_eventlink_unlock(ipc_eventlink);
397 splx(s);
398 return KERN_NAME_EXISTS;
399 }
400
401 if (ela_option == MELA_OPTION_ASSOCIATE_ON_WAIT) {
402 ipc_eventlink->el_thread = THREAD_ASSOCIATE_WILD;
403 } else {
404 thread_reference(thread);
405 ipc_eventlink->el_thread = thread;
406 }
407
408 ipc_eventlink_unlock(ipc_eventlink);
409 splx(s);
410 return KERN_SUCCESS;
411 }
412
413 /*
414 * Name: mach_eventlink_disassociate
415 *
416 * Description: Disassociate a thread from eventlink.
417 * Wake up the associated thread if blocked on eventlink.
418 *
419 * Args:
420 * eventlink: eventlink
421 * mach_eventlink_option_t: option for eventlink disassociate
422 *
423 * Returns:
424 * KERN_SUCCESS on Success.
425 */
426 kern_return_t
mach_eventlink_disassociate(struct ipc_eventlink * ipc_eventlink,mach_eventlink_disassociate_option_t eld_option)427 mach_eventlink_disassociate(
428 struct ipc_eventlink *ipc_eventlink,
429 mach_eventlink_disassociate_option_t eld_option)
430 {
431 spl_t s;
432 thread_t thread;
433
434 if (ipc_eventlink == IPC_EVENTLINK_NULL) {
435 return KERN_TERMINATED;
436 }
437
438 if (eld_option != MELD_OPTION_NONE) {
439 return KERN_INVALID_ARGUMENT;
440 }
441
442 s = splsched();
443 ipc_eventlink_lock(ipc_eventlink);
444
445 /* Check if eventlink is terminated */
446 if (!ipc_eventlink_active(ipc_eventlink)) {
447 ipc_eventlink_unlock(ipc_eventlink);
448 splx(s);
449 return KERN_TERMINATED;
450 }
451
452 if (ipc_eventlink->el_thread == NULL) {
453 ipc_eventlink_unlock(ipc_eventlink);
454 splx(s);
455 return KERN_INVALID_ARGUMENT;
456 }
457
458 thread = ipc_eventlink->el_thread;
459 ipc_eventlink->el_thread = NULL;
460
461 /* wake up the thread if blocked */
462 ipc_eventlink_signal_internal_locked(ipc_eventlink,
463 IPC_EVENTLINK_FORCE_WAKEUP);
464
465 ipc_eventlink_unlock(ipc_eventlink);
466 splx(s);
467
468 if (thread != THREAD_ASSOCIATE_WILD) {
469 thread_deallocate(thread);
470 }
471 return KERN_SUCCESS;
472 }
473
474 /*
475 * Name: mach_eventlink_signal_trap
476 *
477 * Description: Increment the sync count of eventlink and
478 * wake up the thread waiting if sync counter is greater
479 * than wake counter.
480 *
481 * Args:
482 * eventlink: eventlink
483 *
484 * Returns:
485 * uint64_t: Contains count and error codes.
486 */
487 uint64_t
mach_eventlink_signal_trap(mach_port_name_t port,uint64_t signal_count __unused)488 mach_eventlink_signal_trap(
489 mach_port_name_t port,
490 uint64_t signal_count __unused)
491 {
492 struct ipc_eventlink *ipc_eventlink;
493 kern_return_t kr;
494 uint64_t retval = 0;
495
496 kr = port_name_to_eventlink(port, &ipc_eventlink);
497 if (kr == KERN_SUCCESS) {
498 /* Signal the remote side of the eventlink */
499 kr = ipc_eventlink_signal(eventlink_remote_side(ipc_eventlink));
500
501 /* Deallocate ref returned by port_name_to_eventlink */
502 ipc_eventlink_deallocate(ipc_eventlink);
503 }
504
505 retval = encode_eventlink_count_and_error(0, kr);
506 return retval;
507 }
508
509 /*
510 * Name: ipc_eventlink_signal
511 *
512 * Description: Increment the sync count of eventlink and
513 * wake up the thread waiting if sync counter is greater
514 * than wake counter.
515 *
516 * Args:
517 * eventlink: eventlink
518 *
519 * Returns:
520 * KERN_SUCCESS on Success.
521 */
522 static kern_return_t
ipc_eventlink_signal(struct ipc_eventlink * ipc_eventlink)523 ipc_eventlink_signal(
524 struct ipc_eventlink *ipc_eventlink)
525 {
526 kern_return_t kr;
527 spl_t s;
528
529 if (ipc_eventlink == IPC_EVENTLINK_NULL) {
530 return KERN_INVALID_ARGUMENT;
531 }
532
533 s = splsched();
534 ipc_eventlink_lock(ipc_eventlink);
535
536 /* Check if eventlink is terminated */
537 if (!ipc_eventlink_active(ipc_eventlink)) {
538 ipc_eventlink_unlock(ipc_eventlink);
539 splx(s);
540 return KERN_TERMINATED;
541 }
542
543 kr = ipc_eventlink_signal_internal_locked(ipc_eventlink,
544 IPC_EVENTLINK_NONE);
545
546 ipc_eventlink_unlock(ipc_eventlink);
547 splx(s);
548
549 if (kr == KERN_NOT_WAITING) {
550 kr = KERN_SUCCESS;
551 }
552
553 return kr;
554 }
555
556 /*
557 * Name: mach_eventlink_wait_until_trap
558 *
559 * Description: Wait until local signal count exceeds the
560 * specified count or deadline passes.
561 *
562 * Args:
563 * wait_port: eventlink port for wait
564 * count_ptr: signal count to wait on
565 * el_option: eventlink option
566 * clock_id: clock id
567 * deadline: deadline in mach_absolute_time
568 *
569 * Returns:
570 * uint64_t: contains count and error codes
571 */
572 uint64_t
mach_eventlink_wait_until_trap(mach_port_name_t eventlink_port,uint64_t wait_count,mach_eventlink_signal_wait_option_t option,kern_clock_id_t clock_id,uint64_t deadline)573 mach_eventlink_wait_until_trap(
574 mach_port_name_t eventlink_port,
575 uint64_t wait_count,
576 mach_eventlink_signal_wait_option_t option,
577 kern_clock_id_t clock_id,
578 uint64_t deadline)
579 {
580 return ipc_eventlink_signal_wait_until_trap_internal(
581 eventlink_port,
582 MACH_PORT_NULL,
583 wait_count,
584 option,
585 clock_id,
586 deadline);
587 }
588
589 /*
590 * Name: mach_eventlink_signal_wait_until
591 *
592 * Description: Signal the opposite side of the
593 * eventlink and wait until local signal count exceeds the
594 * specified count or deadline passes.
595 *
596 * Args:
597 * wait_port: eventlink port for wait
598 * count_ptr: signal count to wait on
599 * el_option: eventlink option
600 * clock_id: clock id
601 * deadline: deadline in mach_absolute_time
602 *
603 * Returns:
604 * uint64_t: contains count and error codes
605 */
606 uint64_t
mach_eventlink_signal_wait_until_trap(mach_port_name_t eventlink_port,uint64_t wait_count,uint64_t signal_count __unused,mach_eventlink_signal_wait_option_t option,kern_clock_id_t clock_id,uint64_t deadline)607 mach_eventlink_signal_wait_until_trap(
608 mach_port_name_t eventlink_port,
609 uint64_t wait_count,
610 uint64_t signal_count __unused,
611 mach_eventlink_signal_wait_option_t option,
612 kern_clock_id_t clock_id,
613 uint64_t deadline)
614 {
615 return ipc_eventlink_signal_wait_until_trap_internal(
616 eventlink_port,
617 eventlink_port,
618 wait_count,
619 option,
620 clock_id,
621 deadline);
622 }
623
624 /*
625 * Name: ipc_eventlink_signal_wait_until_trap_internal
626 *
627 * Description: Signal the opposite side of the
628 * eventlink and wait until local signal count exceeds the
629 * specified count or deadline passes.
630 *
631 * Args:
632 * wait_port: eventlink port for wait
633 * signal_port: eventlink port for signal
634 * count: signal count to wait on
635 * el_option: eventlink option
636 * clock_id: clock id
637 * deadline: deadline in mach_absolute_time
638 *
639 * Returns:
640 * uint64_t: contains signal count and error codes
641 */
642 static uint64_t
ipc_eventlink_signal_wait_until_trap_internal(mach_port_name_t wait_port,mach_port_name_t signal_port,uint64_t count,mach_eventlink_signal_wait_option_t el_option,kern_clock_id_t clock_id,uint64_t deadline)643 ipc_eventlink_signal_wait_until_trap_internal(
644 mach_port_name_t wait_port,
645 mach_port_name_t signal_port,
646 uint64_t count,
647 mach_eventlink_signal_wait_option_t el_option,
648 kern_clock_id_t clock_id,
649 uint64_t deadline)
650 {
651 struct ipc_eventlink *wait_ipc_eventlink = IPC_EVENTLINK_NULL;
652 struct ipc_eventlink *signal_ipc_eventlink = IPC_EVENTLINK_NULL;
653 kern_return_t kr;
654 ipc_eventlink_option_t ipc_eventlink_option = IPC_EVENTLINK_NONE;
655
656 if (clock_id != KERN_CLOCK_MACH_ABSOLUTE_TIME) {
657 return encode_eventlink_count_and_error(count, KERN_INVALID_ARGUMENT);
658 }
659
660 kr = port_name_to_eventlink(wait_port, &wait_ipc_eventlink);
661 if (kr == KERN_SUCCESS) {
662 assert(wait_ipc_eventlink != IPC_EVENTLINK_NULL);
663
664 /* Get the remote side of eventlink for signal */
665 if (signal_port != MACH_PORT_NULL) {
666 signal_ipc_eventlink = eventlink_remote_side(wait_ipc_eventlink);
667 }
668
669 if (el_option & MELSW_OPTION_NO_WAIT) {
670 ipc_eventlink_option |= IPC_EVENTLINK_NO_WAIT;
671 }
672
673 kr = ipc_eventlink_signal_wait_internal(wait_ipc_eventlink,
674 signal_ipc_eventlink, deadline,
675 &count, ipc_eventlink_option);
676
677 /* release ref returned by port_name_to_eventlink */
678 ipc_eventlink_deallocate(wait_ipc_eventlink);
679 }
680 return encode_eventlink_count_and_error(count, kr);
681 }
682
683 /*
684 * Name: ipc_eventlink_signal_wait_internal
685 *
686 * Description: Signal the opposite side of the
687 * eventlink and wait until local signal count exceeds the
688 * specified count or deadline passes.
689 *
690 * Args:
691 * wait_eventlink: eventlink for wait
692 * signal_eventlink: eventlink for signal
693 * deadline: deadline in mach_absolute_time
694 * count_ptr: signal count to wait on
695 * el_option: eventlink option
696 *
697 * Returns:
698 * KERN_SUCCESS on Success.
699 * signal count is returned implicitly in count arg.
700 */
701 static kern_return_t
ipc_eventlink_signal_wait_internal(struct ipc_eventlink * wait_eventlink,struct ipc_eventlink * signal_eventlink,uint64_t deadline,uint64_t * count,ipc_eventlink_option_t eventlink_option)702 ipc_eventlink_signal_wait_internal(
703 struct ipc_eventlink *wait_eventlink,
704 struct ipc_eventlink *signal_eventlink,
705 uint64_t deadline,
706 uint64_t *count,
707 ipc_eventlink_option_t eventlink_option)
708 {
709 spl_t s;
710 kern_return_t kr = KERN_ALREADY_WAITING;
711 thread_t self = current_thread();
712 struct ipc_eventlink_base *ipc_eventlink_base = wait_eventlink->el_base;
713 thread_t handoff_thread = THREAD_NULL;
714 thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE;
715 uint64_t old_signal_count;
716 wait_result_t wr;
717
718 s = splsched();
719 ipc_eventlink_lock(wait_eventlink);
720
721 /* Check if eventlink is terminated */
722 if (!ipc_eventlink_active(wait_eventlink)) {
723 kr = KERN_TERMINATED;
724 goto unlock;
725 }
726
727 /* Check if waiting thread is associated to eventlink */
728 if (wait_eventlink->el_thread != THREAD_ASSOCIATE_WILD &&
729 wait_eventlink->el_thread != self) {
730 kr = KERN_INVALID_ARGUMENT;
731 goto unlock;
732 }
733
734 /* Check if thread already waiting for associate on wait case */
735 if (wait_eventlink->el_thread == THREAD_ASSOCIATE_WILD &&
736 wait_eventlink->el_wait_counter != UINT64_MAX) {
737 kr = KERN_INVALID_ARGUMENT;
738 goto unlock;
739 }
740
741 /* Check if the signal count exceeds the count provided */
742 if (*count < wait_eventlink->el_sync_counter) {
743 *count = wait_eventlink->el_sync_counter;
744 kr = KERN_SUCCESS;
745 } else if (eventlink_option & IPC_EVENTLINK_NO_WAIT) {
746 /* Check if no block was passed */
747 *count = wait_eventlink->el_sync_counter;
748 kr = KERN_OPERATION_TIMED_OUT;
749 } else {
750 /* Update the wait counter and add thread to waitq */
751 wait_eventlink->el_wait_counter = *count;
752 old_signal_count = wait_eventlink->el_sync_counter;
753
754 thread_set_pending_block_hint(self, kThreadWaitEventlink);
755 (void)waitq_assert_wait64_locked(
756 &ipc_eventlink_base->elb_waitq,
757 CAST_EVENT64_T(wait_eventlink),
758 THREAD_ABORTSAFE,
759 TIMEOUT_URGENCY_USER_NORMAL,
760 deadline, TIMEOUT_NO_LEEWAY,
761 self);
762
763 eventlink_option |= IPC_EVENTLINK_HANDOFF;
764 }
765
766 /* Check if we need to signal the other side of eventlink */
767 if (signal_eventlink != IPC_EVENTLINK_NULL) {
768 kern_return_t signal_kr;
769 signal_kr = ipc_eventlink_signal_internal_locked(signal_eventlink,
770 eventlink_option);
771
772 if (signal_kr == KERN_NOT_WAITING) {
773 assert(self->handoff_thread == THREAD_NULL);
774 }
775 }
776
777 if (kr != KERN_ALREADY_WAITING) {
778 goto unlock;
779 }
780
781 if (self->handoff_thread) {
782 handoff_thread = self->handoff_thread;
783 self->handoff_thread = THREAD_NULL;
784 handoff_option = THREAD_HANDOFF_SETRUN_NEEDED;
785 }
786
787 ipc_eventlink_unlock(wait_eventlink);
788 splx(s);
789
790 wr = thread_handoff_deallocate(handoff_thread, handoff_option);
791 kr = ipc_eventlink_convert_wait_result(wr);
792
793 assert(self->handoff_thread == THREAD_NULL);
794
795 /* Increment the count value if eventlink_signal was called */
796 if (kr == KERN_SUCCESS) {
797 *count += 1;
798 } else {
799 *count = old_signal_count;
800 }
801
802 return kr;
803
804 unlock:
805 ipc_eventlink_unlock(wait_eventlink);
806 splx(s);
807 assert(self->handoff_thread == THREAD_NULL);
808
809 return kr;
810 }
811
812 /*
813 * Name: ipc_eventlink_convert_wait_result
814 *
815 * Description: Convert wait result to return value
816 * for wait trap.
817 *
818 * Args:
819 * wait_result: result from thread handoff
820 *
821 * Returns:
822 * KERN_SUCCESS on Success.
823 */
824 static kern_return_t
ipc_eventlink_convert_wait_result(int wait_result)825 ipc_eventlink_convert_wait_result(int wait_result)
826 {
827 switch (wait_result) {
828 case THREAD_AWAKENED:
829 return KERN_SUCCESS;
830
831 case THREAD_TIMED_OUT:
832 return KERN_OPERATION_TIMED_OUT;
833
834 case THREAD_INTERRUPTED:
835 return KERN_ABORTED;
836
837 case THREAD_RESTART:
838 return KERN_TERMINATED;
839
840 default:
841 panic("ipc_eventlink_wait_block");
842 return KERN_FAILURE;
843 }
844 }
845
846 /*
847 * Name: ipc_eventlink_signal_internal_locked
848 *
849 * Description: Increment the sync count of eventlink and
850 * wake up the thread waiting if sync counter is greater
851 * than wake counter.
852 *
853 * Args:
854 * eventlink: eventlink
855 * ipc_eventlink_option_t: options
856 *
857 * Returns:
858 * KERN_SUCCESS on Success.
859 */
860 static kern_return_t
ipc_eventlink_signal_internal_locked(struct ipc_eventlink * signal_eventlink,ipc_eventlink_option_t eventlink_option)861 ipc_eventlink_signal_internal_locked(
862 struct ipc_eventlink *signal_eventlink,
863 ipc_eventlink_option_t eventlink_option)
864 {
865 kern_return_t kr = KERN_NOT_WAITING;
866 struct ipc_eventlink_base *ipc_eventlink_base = signal_eventlink->el_base;
867
868 if (eventlink_option & IPC_EVENTLINK_FORCE_WAKEUP) {
869 /* Adjust the wait counter */
870 signal_eventlink->el_wait_counter = UINT64_MAX;
871
872 kr = waitq_wakeup64_all_locked(
873 &ipc_eventlink_base->elb_waitq,
874 CAST_EVENT64_T(signal_eventlink),
875 THREAD_RESTART, WAITQ_ALL_PRIORITIES,
876 WAITQ_KEEP_LOCKED);
877 return kr;
878 }
879
880 /* Increment the eventlink sync count */
881 signal_eventlink->el_sync_counter++;
882
883 /* Check if thread needs to be woken up */
884 if (signal_eventlink->el_sync_counter > signal_eventlink->el_wait_counter) {
885 waitq_options_t wq_option = (eventlink_option & IPC_EVENTLINK_HANDOFF) ?
886 WQ_OPTION_HANDOFF : WQ_OPTION_NONE;
887
888 /* Adjust the wait counter */
889 signal_eventlink->el_wait_counter = UINT64_MAX;
890
891 kr = waitq_wakeup64_one_locked(
892 &ipc_eventlink_base->elb_waitq,
893 CAST_EVENT64_T(signal_eventlink),
894 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES,
895 WAITQ_KEEP_LOCKED, wq_option);
896 }
897
898 return kr;
899 }
900
901 /*
902 * Name: ipc_eventlink_reference
903 *
904 * Description: Increment ref on ipc eventlink struct
905 *
906 * Args:
907 * eventlink: eventlink
908 *
909 * Returns: None
910 */
911 void
ipc_eventlink_reference(struct ipc_eventlink * ipc_eventlink)912 ipc_eventlink_reference(
913 struct ipc_eventlink *ipc_eventlink)
914 {
915 os_ref_retain(&ipc_eventlink->el_base->elb_ref_count);
916 }
917
918 /*
919 * Name: ipc_eventlink_deallocate
920 *
921 * Description: Decrement ref on ipc eventlink struct
922 *
923 * Args:
924 * eventlink: eventlink
925 *
926 * Returns: None
927 */
928 void
ipc_eventlink_deallocate(struct ipc_eventlink * ipc_eventlink)929 ipc_eventlink_deallocate(
930 struct ipc_eventlink *ipc_eventlink)
931 {
932 if (ipc_eventlink == IPC_EVENTLINK_NULL) {
933 return;
934 }
935
936 struct ipc_eventlink_base *ipc_eventlink_base = ipc_eventlink->el_base;
937
938 if (os_ref_release(&ipc_eventlink_base->elb_ref_count) > 0) {
939 return;
940 }
941
942 waitq_deinit(&ipc_eventlink_base->elb_waitq);
943
944 assert(!ipc_eventlink_active(ipc_eventlink));
945
946 #if DEVELOPMENT || DEBUG
947 /* Remove ipc_eventlink to global list */
948 global_ipc_eventlink_lock();
949 queue_remove(&ipc_eventlink_list, ipc_eventlink_base,
950 struct ipc_eventlink_base *, elb_global_elm);
951 global_ipc_eventlink_unlock();
952 #endif
953 zfree(ipc_eventlink_zone, ipc_eventlink_base);
954 }
955
956 /*
957 * Name: convert_port_to_eventlink
958 *
959 * Description: Convert from a port name in the current
960 * space to an ipc eventlink. Produces an ipc eventlink ref,
961 * which may be null.
962 *
963 * Args:
964 * mach_port_t: eventlink port
965 *
966 * Returns:
967 * ipc_eventlink on Success.
968 */
969 struct ipc_eventlink *
convert_port_to_eventlink(mach_port_t port)970 convert_port_to_eventlink(
971 mach_port_t port)
972 {
973 struct ipc_eventlink *ipc_eventlink = IPC_EVENTLINK_NULL;
974
975 if (IP_VALID(port)) {
976 ip_mq_lock(port);
977 convert_port_to_eventlink_locked(port, &ipc_eventlink);
978 ip_mq_unlock(port);
979 }
980
981 return ipc_eventlink;
982 }
983
984 /*
985 * Name: convert_port_to_eventlink_locked
986 *
987 * Description: Convert from a port name in the current
988 * space to an ipc eventlink. Produces an ipc eventlink ref,
989 * which may be null.
990 *
991 * Args:
992 * mach_port_name_t: eventlink port name
993 * ipc_eventlink_ptr: pointer to return ipc_eventlink.
994 *
995 * Returns:
996 * KERN_SUCCESS on Success.
997 * KERN_TERMINATED on inactive eventlink.
998 */
999 static kern_return_t
convert_port_to_eventlink_locked(ipc_port_t port,struct ipc_eventlink ** ipc_eventlink_ptr)1000 convert_port_to_eventlink_locked(
1001 ipc_port_t port,
1002 struct ipc_eventlink **ipc_eventlink_ptr)
1003 {
1004 kern_return_t kr = KERN_INVALID_CAPABILITY;
1005 struct ipc_eventlink *ipc_eventlink = IPC_EVENTLINK_NULL;
1006
1007 if (ip_active(port) && ip_kotype(port) == IKOT_EVENTLINK) {
1008 ipc_eventlink = ipc_kobject_get_raw(port, IKOT_EVENTLINK);
1009 if (ipc_eventlink) {
1010 ipc_eventlink_reference(ipc_eventlink);
1011 kr = KERN_SUCCESS;
1012 } else {
1013 kr = KERN_TERMINATED;
1014 }
1015 }
1016
1017 *ipc_eventlink_ptr = ipc_eventlink;
1018 return kr;
1019 }
1020
1021 /*
1022 * Name: port_name_to_eventlink
1023 *
1024 * Description: Convert from a port name in the current
1025 * space to an ipc eventlink. Produces an ipc eventlink ref,
1026 * which may be null.
1027 *
1028 * Args:
1029 * mach_port_name_t: eventlink port name
1030 * ipc_eventlink_ptr: ptr to pass eventlink struct
1031 *
1032 * Returns:
1033 * KERN_SUCCESS on Success.
1034 */
1035 static kern_return_t
port_name_to_eventlink(mach_port_name_t name,struct ipc_eventlink ** ipc_eventlink_ptr)1036 port_name_to_eventlink(
1037 mach_port_name_t name,
1038 struct ipc_eventlink **ipc_eventlink_ptr)
1039 {
1040 ipc_port_t kern_port;
1041 kern_return_t kr;
1042
1043 if (!MACH_PORT_VALID(name)) {
1044 *ipc_eventlink_ptr = IPC_EVENTLINK_NULL;
1045 return KERN_INVALID_NAME;
1046 }
1047
1048 kr = ipc_port_translate_send(current_space(), name, &kern_port);
1049 if (kr != KERN_SUCCESS) {
1050 *ipc_eventlink_ptr = IPC_EVENTLINK_NULL;
1051 return kr;
1052 }
1053 /* have the port locked */
1054 assert(IP_VALID(kern_port));
1055
1056 kr = convert_port_to_eventlink_locked(kern_port, ipc_eventlink_ptr);
1057 ip_mq_unlock(kern_port);
1058
1059 return kr;
1060 }
1061
1062 /*
1063 * Name: ipc_eventlink_no_senders
1064 *
1065 * Description: Destroy an ipc_eventlink, wakeup all threads.
1066 *
1067 * Returns:
1068 * None.
1069 */
1070 static void
ipc_eventlink_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1071 ipc_eventlink_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
1072 {
1073 kern_return_t kr;
1074 struct ipc_eventlink *ipc_eventlink;
1075
1076 if (!ip_active(port)) {
1077 return;
1078 }
1079
1080 /* Get ipc_eventlink reference */
1081 ip_mq_lock(port);
1082
1083 /* Make sure port is still active */
1084 if (!ip_active(port)) {
1085 ip_mq_unlock(port);
1086 return;
1087 }
1088
1089 convert_port_to_eventlink_locked(port, &ipc_eventlink);
1090 ip_mq_unlock(port);
1091
1092 kr = ipc_eventlink_destroy_internal(ipc_eventlink);
1093 if (kr == KERN_TERMINATED) {
1094 /* eventlink is already inactive, destroy the port */
1095 ipc_kobject_dealloc_port(port, mscount, IKOT_EVENTLINK);
1096 }
1097
1098 /* Drop the reference returned by convert_port_to_eventlink_locked */
1099 ipc_eventlink_deallocate(ipc_eventlink);
1100 }
1101
1102 #define WAITQ_TO_EVENTLINK(wq) ((struct ipc_eventlink_base *) ((uintptr_t)(wq) - offsetof(struct ipc_eventlink_base, elb_waitq)))
1103
1104 /*
1105 * Name: kdp_eventlink_find_owner
1106 *
1107 * Description: Find who will signal the waiting thread.
1108 *
1109 * Args:
1110 * waitq: eventlink waitq
1111 * wait_event: eventlink wait event
1112 * waitinfo: waitinfo struct
1113 *
1114 * Returns:
1115 * None.
1116 */
1117 void
kdp_eventlink_find_owner(struct waitq * waitq,event64_t event,thread_waitinfo_t * waitinfo)1118 kdp_eventlink_find_owner(
1119 struct waitq *waitq,
1120 event64_t event,
1121 thread_waitinfo_t *waitinfo)
1122 {
1123 assert(waitinfo->wait_type == kThreadWaitEventlink);
1124 waitinfo->owner = 0;
1125 waitinfo->context = 0;
1126
1127 if (waitq_held(waitq)) {
1128 return;
1129 }
1130
1131 struct ipc_eventlink_base *ipc_eventlink_base = WAITQ_TO_EVENTLINK(waitq);
1132
1133 if (event == CAST_EVENT64_T(&ipc_eventlink_base->elb_eventlink[0])) {
1134 /* Use the other end of eventlink for signal thread */
1135 if (ipc_eventlink_base->elb_eventlink[1].el_thread != THREAD_ASSOCIATE_WILD) {
1136 waitinfo->owner = thread_tid(ipc_eventlink_base->elb_eventlink[1].el_thread);
1137 } else {
1138 waitinfo->owner = 0;
1139 }
1140 } else if (event == CAST_EVENT64_T(&ipc_eventlink_base->elb_eventlink[1])) {
1141 /* Use the other end of eventlink for signal thread */
1142 if (ipc_eventlink_base->elb_eventlink[0].el_thread != THREAD_ASSOCIATE_WILD) {
1143 waitinfo->owner = thread_tid(ipc_eventlink_base->elb_eventlink[0].el_thread);
1144 } else {
1145 waitinfo->owner = 0;
1146 }
1147 }
1148
1149 return;
1150 }
1151