xref: /xnu-8019.80.24/osfmk/ipc/ipc_eventlink.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <mach/mach_traps.h>
31 #include <mach/kern_return.h>
32 #include <mach/sync_policy.h>
33 #include <mach/task.h>
34 
35 #include <kern/misc_protos.h>
36 #include <kern/spl.h>
37 #include <kern/ipc_tt.h>
38 #include <kern/thread.h>
39 #include <kern/clock.h>
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
42 #include <ipc/ipc_eventlink.h>
43 #include <kern/host.h>
44 #include <kern/waitq.h>
45 #include <kern/zalloc.h>
46 #include <kern/mach_param.h>
47 #include <mach/mach_traps.h>
48 #include <mach/mach_eventlink_server.h>
49 
50 #include <libkern/OSAtomic.h>
51 
52 static ZONE_DECLARE(ipc_eventlink_zone, "ipc_eventlink",
53     sizeof(struct ipc_eventlink_base),
54 #if CONFIG_WAITQ_IRQSAFE_ALLOW_INVALID
55     ZC_NOGZALLOC | ZC_KASAN_NOQUARANTINE | ZC_SEQUESTER |
56 #endif
57     ZC_ZFREE_CLEARMEM);
58 
59 os_refgrp_decl(static, ipc_eventlink_refgrp, "eventlink", NULL);
60 
61 #if DEVELOPMENT || DEBUG
62 static queue_head_t ipc_eventlink_list = QUEUE_HEAD_INITIALIZER(ipc_eventlink_list);
63 static LCK_GRP_DECLARE(ipc_eventlink_dev_lock_grp, "ipc_eventlink_dev_lock");
64 static LCK_SPIN_DECLARE(global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp);
65 
66 #define global_ipc_eventlink_lock() \
67 	lck_spin_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp)
68 #define global_ipc_eventlink_lock_try() \
69 	lck_spin_try_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp)
70 #define global_ipc_eventlink_unlock() \
71 	lck_spin_unlock(&global_ipc_eventlink_lock)
72 
73 #endif /* DEVELOPMENT || DEBUG */
74 
75 /* Forward declarations */
76 static void
77 ipc_eventlink_no_senders(
78 	ipc_port_t          port,
79 	mach_port_mscount_t mscount);
80 
81 static struct ipc_eventlink_base *
82 ipc_eventlink_alloc(void);
83 
84 static void
85 ipc_eventlink_initialize(
86 	struct ipc_eventlink_base *ipc_eventlink_base);
87 
88 static kern_return_t
89 ipc_eventlink_destroy_internal(
90 	struct ipc_eventlink *ipc_eventlink);
91 
92 static kern_return_t
93 ipc_eventlink_signal(
94 	struct ipc_eventlink *ipc_eventlink);
95 
96 static uint64_t
97 ipc_eventlink_signal_wait_until_trap_internal(
98 	mach_port_name_t                     wait_port,
99 	mach_port_name_t                     signal_port,
100 	uint64_t                             count,
101 	mach_eventlink_signal_wait_option_t  el_option,
102 	kern_clock_id_t                      clock_id,
103 	uint64_t                             deadline);
104 
105 static kern_return_t
106 ipc_eventlink_signal_wait_internal(
107 	struct ipc_eventlink        *wait_eventlink,
108 	struct ipc_eventlink        *signal_eventlink,
109 	uint64_t                    deadline,
110 	uint64_t                    *count,
111 	ipc_eventlink_option_t      eventlink_option);
112 
113 static kern_return_t
114 ipc_eventlink_convert_wait_result(int wait_result);
115 
116 static kern_return_t
117 ipc_eventlink_signal_internal_locked(
118 	struct ipc_eventlink         *signal_eventlink,
119 	ipc_eventlink_option_t       eventlink_option);
120 
121 static kern_return_t
122 convert_port_to_eventlink_locked(
123 	ipc_port_t                      port,
124 	struct ipc_eventlink            **ipc_eventlink_ptr);
125 
126 static kern_return_t
127 port_name_to_eventlink(
128 	mach_port_name_t              name,
129 	struct ipc_eventlink          **ipc_eventlink_ptr);
130 
131 IPC_KOBJECT_DEFINE(IKOT_EVENTLINK,
132     .iko_op_no_senders = ipc_eventlink_no_senders);
133 
134 /*
135  * Name: ipc_eventlink_alloc
136  *
137  * Description: Allocates an ipc_eventlink struct and initializes it.
138  *
139  * Args: None.
140  *
141  * Returns:
142  *   ipc_eventlink_base on Success.
143  */
144 static struct ipc_eventlink_base *
ipc_eventlink_alloc(void)145 ipc_eventlink_alloc(void)
146 {
147 	struct ipc_eventlink_base *ipc_eventlink_base = IPC_EVENTLINK_BASE_NULL;
148 	ipc_eventlink_base = zalloc(ipc_eventlink_zone);
149 
150 	ipc_eventlink_initialize(ipc_eventlink_base);
151 
152 #if DEVELOPMENT || DEBUG
153 	/* Add ipc_eventlink to global list */
154 	global_ipc_eventlink_lock();
155 	queue_enter(&ipc_eventlink_list, ipc_eventlink_base,
156 	    struct ipc_eventlink_base *, elb_global_elm);
157 	global_ipc_eventlink_unlock();
158 #endif
159 	return ipc_eventlink_base;
160 }
161 
162 /*
163  * Name: ipc_eventlink_initialize
164  *
165  * Description: Initializes ipc eventlink struct.
166  *
167  * Args: ipc eventlink base.
168  *
169  * Returns:
170  *   KERN_SUCCESS on Success.
171  */
172 static void
ipc_eventlink_initialize(struct ipc_eventlink_base * ipc_eventlink_base)173 ipc_eventlink_initialize(
174 	struct ipc_eventlink_base *ipc_eventlink_base)
175 {
176 	/* Initialize the count to 2, refs for each ipc eventlink port */
177 	os_ref_init_count(&ipc_eventlink_base->elb_ref_count, &ipc_eventlink_refgrp, 2);
178 	ipc_eventlink_base->elb_type = IPC_EVENTLINK_TYPE_NO_COPYIN;
179 
180 	for (int i = 0; i < 2; i++) {
181 		struct ipc_eventlink *ipc_eventlink = &(ipc_eventlink_base->elb_eventlink[i]);
182 
183 		ipc_eventlink->el_port = ipc_kobject_alloc_port((ipc_kobject_t)ipc_eventlink,
184 		    IKOT_EVENTLINK, IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
185 		/* ipc_kobject_alloc_port never fails */
186 		ipc_eventlink->el_thread = THREAD_NULL;
187 		ipc_eventlink->el_sync_counter = 0;
188 		ipc_eventlink->el_wait_counter = UINT64_MAX;
189 		ipc_eventlink->el_base = ipc_eventlink_base;
190 	}
191 
192 	/* Must be done last */
193 	waitq_init(&ipc_eventlink_base->elb_waitq, SYNC_POLICY_DISABLE_IRQ);
194 }
195 
196 /*
197  * Name: mach_eventlink_create
198  *
199  * Description: Allocates an ipc_eventlink struct and initializes it.
200  *
201  * Args:
202  *   task : task port of the process
203  *   mach_eventlink_create_option_t: option
204  *   eventlink_port_pair: eventlink port array
205  *
206  * Returns:
207  *   KERN_SUCCESS on Success.
208  */
209 kern_return_t
mach_eventlink_create(task_t task,mach_eventlink_create_option_t elc_option,eventlink_port_pair_t eventlink_port_pair)210 mach_eventlink_create(
211 	task_t                             task,
212 	mach_eventlink_create_option_t     elc_option,
213 	eventlink_port_pair_t              eventlink_port_pair)
214 {
215 	int i;
216 	struct ipc_eventlink_base *ipc_eventlink_base;
217 
218 	if (task == TASK_NULL || task != current_task() ||
219 	    elc_option != MELC_OPTION_NO_COPYIN) {
220 		return KERN_INVALID_ARGUMENT;
221 	}
222 
223 	ipc_eventlink_base = ipc_eventlink_alloc();
224 
225 	for (i = 0; i < 2; i++) {
226 		eventlink_port_pair[i] = ipc_eventlink_base->elb_eventlink[i].el_port;
227 	}
228 
229 	return KERN_SUCCESS;
230 }
231 
232 /*
233  * Name: mach_eventlink_destroy
234  *
235  * Description: Destroy an ipc_eventlink, wakeup all threads.
236  *
237  * Args:
238  *   eventlink: eventlink
239  *
240  * Returns:
241  *   KERN_SUCCESS on Success.
242  */
243 kern_return_t
mach_eventlink_destroy(struct ipc_eventlink * ipc_eventlink)244 mach_eventlink_destroy(
245 	struct ipc_eventlink *ipc_eventlink)
246 {
247 	ipc_eventlink_destroy_internal(ipc_eventlink);
248 
249 	/* mach_eventlink_destroy should succeed for terminated eventlink */
250 	return KERN_SUCCESS;
251 }
252 
253 /*
254  * Name: ipc_eventlink_destroy_internal
255  *
256  * Description: Destroy an ipc_eventlink, wakeup all threads.
257  *
258  * Args:
259  *   eventlink: eventlink
260  *
261  * Returns:
262  *   KERN_SUCCESS on Success.
263  */
264 static kern_return_t
ipc_eventlink_destroy_internal(struct ipc_eventlink * ipc_eventlink)265 ipc_eventlink_destroy_internal(
266 	struct ipc_eventlink *ipc_eventlink)
267 {
268 	spl_t s;
269 	struct ipc_eventlink_base *ipc_eventlink_base;
270 	thread_t associated_thread[2] = {};
271 	ipc_port_t ipc_eventlink_port = IPC_PORT_NULL;
272 	ipc_port_t ipc_eventlink_port_remote = IPC_PORT_NULL;
273 
274 	if (ipc_eventlink == IPC_EVENTLINK_NULL) {
275 		return KERN_TERMINATED;
276 	}
277 
278 	s = splsched();
279 	ipc_eventlink_lock(ipc_eventlink);
280 
281 	ipc_eventlink_base = ipc_eventlink->el_base;
282 
283 	/* Check if the eventlink is active */
284 	if (!ipc_eventlink_active(ipc_eventlink)) {
285 		ipc_eventlink_unlock(ipc_eventlink);
286 		splx(s);
287 		return KERN_TERMINATED;
288 	}
289 
290 	for (int i = 0; i < 2; i++) {
291 		struct ipc_eventlink *temp_ipc_eventlink = &ipc_eventlink_base->elb_eventlink[i];
292 
293 		/* Wakeup threads sleeping on eventlink */
294 		if (temp_ipc_eventlink->el_thread) {
295 			associated_thread[i] = temp_ipc_eventlink->el_thread;
296 			temp_ipc_eventlink->el_thread = THREAD_NULL;
297 
298 			ipc_eventlink_signal_internal_locked(temp_ipc_eventlink,
299 			    IPC_EVENTLINK_FORCE_WAKEUP);
300 		}
301 
302 		/* Only destroy the port on which destroy was called */
303 		if (temp_ipc_eventlink == ipc_eventlink) {
304 			ipc_eventlink_port = temp_ipc_eventlink->el_port;
305 			assert(ipc_eventlink_port != IPC_PORT_NULL);
306 		} else {
307 			/* Do not destory the remote port, else eventlink_destroy will fail */
308 			ipc_eventlink_port_remote = temp_ipc_eventlink->el_port;
309 			assert(ipc_eventlink_port_remote != IPC_PORT_NULL);
310 			/*
311 			 * Take a reference on the remote port, since it could go
312 			 * away after eventlink lock is dropped.
313 			 */
314 			ip_reference(ipc_eventlink_port_remote);
315 		}
316 		assert(temp_ipc_eventlink->el_port != IPC_PORT_NULL);
317 		temp_ipc_eventlink->el_port = IPC_PORT_NULL;
318 	}
319 
320 	/* Mark the eventlink as inactive */
321 	waitq_invalidate(&ipc_eventlink_base->elb_waitq);
322 
323 	ipc_eventlink_unlock(ipc_eventlink);
324 	splx(s);
325 
326 	/* Destroy the local eventlink port */
327 	ipc_kobject_dealloc_port(ipc_eventlink_port, 0, IKOT_EVENTLINK);
328 	/* Drops port reference */
329 
330 	/* Clear the remote eventlink port without destroying it */
331 	(void)ipc_kobject_disable(ipc_eventlink_port_remote, IKOT_EVENTLINK);
332 	ip_release(ipc_eventlink_port_remote);
333 
334 	for (int i = 0; i < 2; i++) {
335 		if (associated_thread[i] != THREAD_NULL &&
336 		    associated_thread[i] != THREAD_ASSOCIATE_WILD) {
337 			thread_deallocate(associated_thread[i]);
338 		}
339 
340 		/* Drop the eventlink reference given to port */
341 		ipc_eventlink_deallocate(ipc_eventlink);
342 	}
343 	return KERN_SUCCESS;
344 }
345 
346 /*
347  * Name: mach_eventlink_associate
348  *
349  * Description: Associate a thread to eventlink.
350  *
351  * Args:
352  *   eventlink: eventlink
353  *   thread: thread needs to be associated
354  *   copyin_addr_wait: copyin addr for wait
355  *   copyin_mask_wait: copyin mask for wait
356  *   copyin_addr_signal: copyin addr for signal
357  *   copyin_mask_signal: copyin mask for signal
358  *   mach_eventlink_associate_option_t: option for eventlink associate
359  *
360  * Returns:
361  *   KERN_SUCCESS on Success.
362  */
363 kern_return_t
mach_eventlink_associate(struct ipc_eventlink * ipc_eventlink,thread_t thread,mach_vm_address_t copyin_addr_wait,uint64_t copyin_mask_wait,mach_vm_address_t copyin_addr_signal,uint64_t copyin_mask_signal,mach_eventlink_associate_option_t ela_option)364 mach_eventlink_associate(
365 	struct ipc_eventlink                  *ipc_eventlink,
366 	thread_t                              thread,
367 	mach_vm_address_t                     copyin_addr_wait,
368 	uint64_t                              copyin_mask_wait,
369 	mach_vm_address_t                     copyin_addr_signal,
370 	uint64_t                              copyin_mask_signal,
371 	mach_eventlink_associate_option_t     ela_option)
372 {
373 	spl_t s;
374 
375 	if (ipc_eventlink == IPC_EVENTLINK_NULL) {
376 		return KERN_TERMINATED;
377 	}
378 
379 	if (copyin_addr_wait != 0 || copyin_mask_wait != 0 ||
380 	    copyin_addr_signal != 0 || copyin_mask_signal != 0) {
381 		return KERN_INVALID_ARGUMENT;
382 	}
383 
384 	if ((thread == NULL && ela_option == MELA_OPTION_NONE) ||
385 	    (thread != NULL && ela_option == MELA_OPTION_ASSOCIATE_ON_WAIT)) {
386 		return KERN_INVALID_ARGUMENT;
387 	}
388 
389 	s = splsched();
390 	ipc_eventlink_lock(ipc_eventlink);
391 
392 	/* Check if eventlink is terminated */
393 	if (!ipc_eventlink_active(ipc_eventlink)) {
394 		ipc_eventlink_unlock(ipc_eventlink);
395 		splx(s);
396 		return KERN_TERMINATED;
397 	}
398 
399 	if (ipc_eventlink->el_thread != NULL) {
400 		ipc_eventlink_unlock(ipc_eventlink);
401 		splx(s);
402 		return KERN_NAME_EXISTS;
403 	}
404 
405 	if (ela_option == MELA_OPTION_ASSOCIATE_ON_WAIT) {
406 		ipc_eventlink->el_thread = THREAD_ASSOCIATE_WILD;
407 	} else {
408 		thread_reference(thread);
409 		ipc_eventlink->el_thread = thread;
410 	}
411 
412 	ipc_eventlink_unlock(ipc_eventlink);
413 	splx(s);
414 	return KERN_SUCCESS;
415 }
416 
417 /*
418  * Name: mach_eventlink_disassociate
419  *
420  * Description: Disassociate a thread from eventlink.
421  * Wake up the associated thread if blocked on eventlink.
422  *
423  * Args:
424  *   eventlink: eventlink
425  *   mach_eventlink_option_t: option for eventlink disassociate
426  *
427  * Returns:
428  *   KERN_SUCCESS on Success.
429  */
430 kern_return_t
mach_eventlink_disassociate(struct ipc_eventlink * ipc_eventlink,mach_eventlink_disassociate_option_t eld_option)431 mach_eventlink_disassociate(
432 	struct ipc_eventlink                   *ipc_eventlink,
433 	mach_eventlink_disassociate_option_t   eld_option)
434 {
435 	spl_t s;
436 	thread_t thread;
437 
438 	if (ipc_eventlink == IPC_EVENTLINK_NULL) {
439 		return KERN_TERMINATED;
440 	}
441 
442 	if (eld_option != MELD_OPTION_NONE) {
443 		return KERN_INVALID_ARGUMENT;
444 	}
445 
446 	s = splsched();
447 	ipc_eventlink_lock(ipc_eventlink);
448 
449 	/* Check if eventlink is terminated */
450 	if (!ipc_eventlink_active(ipc_eventlink)) {
451 		ipc_eventlink_unlock(ipc_eventlink);
452 		splx(s);
453 		return KERN_TERMINATED;
454 	}
455 
456 	if (ipc_eventlink->el_thread == NULL) {
457 		ipc_eventlink_unlock(ipc_eventlink);
458 		splx(s);
459 		return KERN_INVALID_ARGUMENT;
460 	}
461 
462 	thread = ipc_eventlink->el_thread;
463 	ipc_eventlink->el_thread = NULL;
464 
465 	/* wake up the thread if blocked */
466 	ipc_eventlink_signal_internal_locked(ipc_eventlink,
467 	    IPC_EVENTLINK_FORCE_WAKEUP);
468 
469 	ipc_eventlink_unlock(ipc_eventlink);
470 	splx(s);
471 
472 	if (thread != THREAD_ASSOCIATE_WILD) {
473 		thread_deallocate(thread);
474 	}
475 	return KERN_SUCCESS;
476 }
477 
478 /*
479  * Name: mach_eventlink_signal_trap
480  *
481  * Description: Increment the sync count of eventlink and
482  * wake up the thread waiting if sync counter is greater
483  * than wake counter.
484  *
485  * Args:
486  *   eventlink: eventlink
487  *
488  * Returns:
489  *   uint64_t: Contains count and error codes.
490  */
491 uint64_t
mach_eventlink_signal_trap(mach_port_name_t port,uint64_t signal_count __unused)492 mach_eventlink_signal_trap(
493 	mach_port_name_t port,
494 	uint64_t         signal_count __unused)
495 {
496 	struct ipc_eventlink *ipc_eventlink;
497 	kern_return_t kr;
498 	uint64_t retval = 0;
499 
500 	kr = port_name_to_eventlink(port, &ipc_eventlink);
501 	if (kr == KERN_SUCCESS) {
502 		/* Signal the remote side of the eventlink */
503 		kr = ipc_eventlink_signal(eventlink_remote_side(ipc_eventlink));
504 
505 		/* Deallocate ref returned by port_name_to_eventlink */
506 		ipc_eventlink_deallocate(ipc_eventlink);
507 	}
508 
509 	retval = encode_eventlink_count_and_error(0, kr);
510 	return retval;
511 }
512 
513 /*
514  * Name: ipc_eventlink_signal
515  *
516  * Description: Increment the sync count of eventlink and
517  * wake up the thread waiting if sync counter is greater
518  * than wake counter.
519  *
520  * Args:
521  *   eventlink: eventlink
522  *
523  * Returns:
524  *   KERN_SUCCESS on Success.
525  */
526 static kern_return_t
ipc_eventlink_signal(struct ipc_eventlink * ipc_eventlink)527 ipc_eventlink_signal(
528 	struct ipc_eventlink *ipc_eventlink)
529 {
530 	kern_return_t kr;
531 	spl_t s;
532 
533 	if (ipc_eventlink == IPC_EVENTLINK_NULL) {
534 		return KERN_INVALID_ARGUMENT;
535 	}
536 
537 	s = splsched();
538 	ipc_eventlink_lock(ipc_eventlink);
539 
540 	/* Check if eventlink is terminated */
541 	if (!ipc_eventlink_active(ipc_eventlink)) {
542 		ipc_eventlink_unlock(ipc_eventlink);
543 		splx(s);
544 		return KERN_TERMINATED;
545 	}
546 
547 	kr = ipc_eventlink_signal_internal_locked(ipc_eventlink,
548 	    IPC_EVENTLINK_NONE);
549 
550 	ipc_eventlink_unlock(ipc_eventlink);
551 	splx(s);
552 
553 	if (kr == KERN_NOT_WAITING) {
554 		kr = KERN_SUCCESS;
555 	}
556 
557 	return kr;
558 }
559 
560 /*
561  * Name: mach_eventlink_wait_until_trap
562  *
563  * Description: Wait until local signal count exceeds the
564  * specified count or deadline passes.
565  *
566  * Args:
567  *   wait_port: eventlink port for wait
568  *   count_ptr: signal count to wait on
569  *   el_option: eventlink option
570  *   clock_id: clock id
571  *   deadline: deadline in mach_absolute_time
572  *
573  * Returns:
574  *   uint64_t: contains count and error codes
575  */
576 uint64_t
mach_eventlink_wait_until_trap(mach_port_name_t eventlink_port,uint64_t wait_count,mach_eventlink_signal_wait_option_t option,kern_clock_id_t clock_id,uint64_t deadline)577 mach_eventlink_wait_until_trap(
578 	mach_port_name_t                    eventlink_port,
579 	uint64_t                            wait_count,
580 	mach_eventlink_signal_wait_option_t option,
581 	kern_clock_id_t                     clock_id,
582 	uint64_t                            deadline)
583 {
584 	return ipc_eventlink_signal_wait_until_trap_internal(
585 		eventlink_port,
586 		MACH_PORT_NULL,
587 		wait_count,
588 		option,
589 		clock_id,
590 		deadline);
591 }
592 
593 /*
594  * Name: mach_eventlink_signal_wait_until
595  *
596  * Description: Signal the opposite side of the
597  * eventlink and wait until local signal count exceeds the
598  * specified count or deadline passes.
599  *
600  * Args:
601  *   wait_port: eventlink port for wait
602  *   count_ptr: signal count to wait on
603  *   el_option: eventlink option
604  *   clock_id: clock id
605  *   deadline: deadline in mach_absolute_time
606  *
607  * Returns:
608  *   uint64_t: contains count and error codes
609  */
610 uint64_t
mach_eventlink_signal_wait_until_trap(mach_port_name_t eventlink_port,uint64_t wait_count,uint64_t signal_count __unused,mach_eventlink_signal_wait_option_t option,kern_clock_id_t clock_id,uint64_t deadline)611 mach_eventlink_signal_wait_until_trap(
612 	mach_port_name_t                    eventlink_port,
613 	uint64_t                            wait_count,
614 	uint64_t                            signal_count __unused,
615 	mach_eventlink_signal_wait_option_t option,
616 	kern_clock_id_t                     clock_id,
617 	uint64_t                            deadline)
618 {
619 	return ipc_eventlink_signal_wait_until_trap_internal(
620 		eventlink_port,
621 		eventlink_port,
622 		wait_count,
623 		option,
624 		clock_id,
625 		deadline);
626 }
627 
628 /*
629  * Name: ipc_eventlink_signal_wait_until_trap_internal
630  *
631  * Description: Signal the opposite side of the
632  * eventlink and wait until local signal count exceeds the
633  * specified count or deadline passes.
634  *
635  * Args:
636  *   wait_port: eventlink port for wait
637  *   signal_port: eventlink port for signal
638  *   count: signal count to wait on
639  *   el_option: eventlink option
640  *   clock_id: clock id
641  *   deadline: deadline in mach_absolute_time
642  *
643  * Returns:
644  *   uint64_t: contains signal count and error codes
645  */
646 static uint64_t
ipc_eventlink_signal_wait_until_trap_internal(mach_port_name_t wait_port,mach_port_name_t signal_port,uint64_t count,mach_eventlink_signal_wait_option_t el_option,kern_clock_id_t clock_id,uint64_t deadline)647 ipc_eventlink_signal_wait_until_trap_internal(
648 	mach_port_name_t                     wait_port,
649 	mach_port_name_t                     signal_port,
650 	uint64_t                             count,
651 	mach_eventlink_signal_wait_option_t  el_option,
652 	kern_clock_id_t                      clock_id,
653 	uint64_t                             deadline)
654 {
655 	struct ipc_eventlink *wait_ipc_eventlink = IPC_EVENTLINK_NULL;
656 	struct ipc_eventlink *signal_ipc_eventlink = IPC_EVENTLINK_NULL;
657 	kern_return_t kr;
658 	ipc_eventlink_option_t ipc_eventlink_option = IPC_EVENTLINK_NONE;
659 
660 	if (clock_id != KERN_CLOCK_MACH_ABSOLUTE_TIME) {
661 		return encode_eventlink_count_and_error(count, KERN_INVALID_ARGUMENT);
662 	}
663 
664 	kr = port_name_to_eventlink(wait_port, &wait_ipc_eventlink);
665 	if (kr == KERN_SUCCESS) {
666 		assert(wait_ipc_eventlink != IPC_EVENTLINK_NULL);
667 
668 		/* Get the remote side of eventlink for signal */
669 		if (signal_port != MACH_PORT_NULL) {
670 			signal_ipc_eventlink = eventlink_remote_side(wait_ipc_eventlink);
671 		}
672 
673 		if (el_option & MELSW_OPTION_NO_WAIT) {
674 			ipc_eventlink_option |= IPC_EVENTLINK_NO_WAIT;
675 		}
676 
677 		kr = ipc_eventlink_signal_wait_internal(wait_ipc_eventlink,
678 		    signal_ipc_eventlink, deadline,
679 		    &count, ipc_eventlink_option);
680 
681 		/* release ref returned by port_name_to_eventlink */
682 		ipc_eventlink_deallocate(wait_ipc_eventlink);
683 	}
684 	return encode_eventlink_count_and_error(count, kr);
685 }
686 
687 /*
688  * Name: ipc_eventlink_signal_wait_internal
689  *
690  * Description: Signal the opposite side of the
691  * eventlink and wait until local signal count exceeds the
692  * specified count or deadline passes.
693  *
694  * Args:
695  *   wait_eventlink: eventlink for wait
696  *   signal_eventlink: eventlink for signal
697  *   deadline: deadline in mach_absolute_time
698  *   count_ptr: signal count to wait on
699  *   el_option: eventlink option
700  *
701  * Returns:
702  *   KERN_SUCCESS on Success.
703  *   signal count is returned implicitly in count arg.
704  */
705 static kern_return_t
ipc_eventlink_signal_wait_internal(struct ipc_eventlink * wait_eventlink,struct ipc_eventlink * signal_eventlink,uint64_t deadline,uint64_t * count,ipc_eventlink_option_t eventlink_option)706 ipc_eventlink_signal_wait_internal(
707 	struct ipc_eventlink        *wait_eventlink,
708 	struct ipc_eventlink        *signal_eventlink,
709 	uint64_t                    deadline,
710 	uint64_t                    *count,
711 	ipc_eventlink_option_t      eventlink_option)
712 {
713 	spl_t s;
714 	kern_return_t kr = KERN_ALREADY_WAITING;
715 	thread_t self = current_thread();
716 	struct ipc_eventlink_base *ipc_eventlink_base = wait_eventlink->el_base;
717 	thread_t handoff_thread = THREAD_NULL;
718 	thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE;
719 	uint64_t old_signal_count;
720 	wait_result_t wr;
721 
722 	s = splsched();
723 	ipc_eventlink_lock(wait_eventlink);
724 
725 	/* Check if eventlink is terminated */
726 	if (!ipc_eventlink_active(wait_eventlink)) {
727 		kr = KERN_TERMINATED;
728 		goto unlock;
729 	}
730 
731 	/* Check if waiting thread is associated to eventlink */
732 	if (wait_eventlink->el_thread != THREAD_ASSOCIATE_WILD &&
733 	    wait_eventlink->el_thread != self) {
734 		kr = KERN_INVALID_ARGUMENT;
735 		goto unlock;
736 	}
737 
738 	/* Check if thread already waiting for associate on wait case */
739 	if (wait_eventlink->el_thread == THREAD_ASSOCIATE_WILD &&
740 	    wait_eventlink->el_wait_counter != UINT64_MAX) {
741 		kr = KERN_INVALID_ARGUMENT;
742 		goto unlock;
743 	}
744 
745 	/* Check if the signal count exceeds the count provided */
746 	if (*count < wait_eventlink->el_sync_counter) {
747 		*count = wait_eventlink->el_sync_counter;
748 		kr = KERN_SUCCESS;
749 	} else if (eventlink_option & IPC_EVENTLINK_NO_WAIT) {
750 		/* Check if no block was passed */
751 		*count =  wait_eventlink->el_sync_counter;
752 		kr = KERN_OPERATION_TIMED_OUT;
753 	} else {
754 		/* Update the wait counter and add thread to waitq */
755 		wait_eventlink->el_wait_counter = *count;
756 		old_signal_count = wait_eventlink->el_sync_counter;
757 
758 		thread_set_pending_block_hint(self, kThreadWaitEventlink);
759 		(void)waitq_assert_wait64_locked(
760 			&ipc_eventlink_base->elb_waitq,
761 			CAST_EVENT64_T(wait_eventlink),
762 			THREAD_ABORTSAFE,
763 			TIMEOUT_URGENCY_USER_NORMAL,
764 			deadline, TIMEOUT_NO_LEEWAY,
765 			self);
766 
767 		eventlink_option |= IPC_EVENTLINK_HANDOFF;
768 	}
769 
770 	/* Check if we need to signal the other side of eventlink */
771 	if (signal_eventlink != IPC_EVENTLINK_NULL) {
772 		kern_return_t signal_kr;
773 		signal_kr = ipc_eventlink_signal_internal_locked(signal_eventlink,
774 		    eventlink_option);
775 
776 		if (signal_kr == KERN_NOT_WAITING) {
777 			assert(self->handoff_thread == THREAD_NULL);
778 		}
779 	}
780 
781 	if (kr != KERN_ALREADY_WAITING) {
782 		goto unlock;
783 	}
784 
785 	if (self->handoff_thread) {
786 		handoff_thread = self->handoff_thread;
787 		self->handoff_thread = THREAD_NULL;
788 		handoff_option = THREAD_HANDOFF_SETRUN_NEEDED;
789 	}
790 
791 	ipc_eventlink_unlock(wait_eventlink);
792 	splx(s);
793 
794 	wr = thread_handoff_deallocate(handoff_thread, handoff_option);
795 	kr = ipc_eventlink_convert_wait_result(wr);
796 
797 	assert(self->handoff_thread == THREAD_NULL);
798 
799 	/* Increment the count value if eventlink_signal was called */
800 	if (kr == KERN_SUCCESS) {
801 		*count += 1;
802 	} else {
803 		*count = old_signal_count;
804 	}
805 
806 	return kr;
807 
808 unlock:
809 	ipc_eventlink_unlock(wait_eventlink);
810 	splx(s);
811 	assert(self->handoff_thread == THREAD_NULL);
812 
813 	return kr;
814 }
815 
816 /*
817  * Name: ipc_eventlink_convert_wait_result
818  *
819  * Description: Convert wait result to return value
820  * for wait trap.
821  *
822  * Args:
823  *   wait_result: result from thread handoff
824  *
825  * Returns:
826  *   KERN_SUCCESS on Success.
827  */
828 static kern_return_t
ipc_eventlink_convert_wait_result(int wait_result)829 ipc_eventlink_convert_wait_result(int wait_result)
830 {
831 	switch (wait_result) {
832 	case THREAD_AWAKENED:
833 		return KERN_SUCCESS;
834 
835 	case THREAD_TIMED_OUT:
836 		return KERN_OPERATION_TIMED_OUT;
837 
838 	case THREAD_INTERRUPTED:
839 		return KERN_ABORTED;
840 
841 	case THREAD_RESTART:
842 		return KERN_TERMINATED;
843 
844 	default:
845 		panic("ipc_eventlink_wait_block");
846 		return KERN_FAILURE;
847 	}
848 }
849 
850 /*
851  * Name: ipc_eventlink_signal_internal_locked
852  *
853  * Description: Increment the sync count of eventlink and
854  * wake up the thread waiting if sync counter is greater
855  * than wake counter.
856  *
857  * Args:
858  *   eventlink: eventlink
859  *   ipc_eventlink_option_t: options
860  *
861  * Returns:
862  *   KERN_SUCCESS on Success.
863  */
864 static kern_return_t
ipc_eventlink_signal_internal_locked(struct ipc_eventlink * signal_eventlink,ipc_eventlink_option_t eventlink_option)865 ipc_eventlink_signal_internal_locked(
866 	struct ipc_eventlink         *signal_eventlink,
867 	ipc_eventlink_option_t       eventlink_option)
868 {
869 	kern_return_t kr = KERN_NOT_WAITING;
870 	struct ipc_eventlink_base *ipc_eventlink_base = signal_eventlink->el_base;
871 
872 	if (eventlink_option & IPC_EVENTLINK_FORCE_WAKEUP) {
873 		/* Adjust the wait counter */
874 		signal_eventlink->el_wait_counter = UINT64_MAX;
875 
876 		kr = waitq_wakeup64_all_locked(
877 			&ipc_eventlink_base->elb_waitq,
878 			CAST_EVENT64_T(signal_eventlink),
879 			THREAD_RESTART, NULL,
880 			WAITQ_ALL_PRIORITIES,
881 			WAITQ_KEEP_LOCKED);
882 		return kr;
883 	}
884 
885 	/* Increment the eventlink sync count */
886 	signal_eventlink->el_sync_counter++;
887 
888 	/* Check if thread needs to be woken up */
889 	if (signal_eventlink->el_sync_counter > signal_eventlink->el_wait_counter) {
890 		waitq_options_t wq_option = (eventlink_option & IPC_EVENTLINK_HANDOFF) ?
891 		    WQ_OPTION_HANDOFF : WQ_OPTION_NONE;
892 
893 		/* Adjust the wait counter */
894 		signal_eventlink->el_wait_counter = UINT64_MAX;
895 
896 		kr = waitq_wakeup64_one_locked(
897 			&ipc_eventlink_base->elb_waitq,
898 			CAST_EVENT64_T(signal_eventlink),
899 			THREAD_AWAKENED, NULL,
900 			WAITQ_ALL_PRIORITIES,
901 			WAITQ_KEEP_LOCKED,
902 			wq_option);
903 	}
904 
905 	return kr;
906 }
907 
908 /*
909  * Name: ipc_eventlink_reference
910  *
911  * Description: Increment ref on ipc eventlink struct
912  *
913  * Args:
914  *   eventlink: eventlink
915  *
916  * Returns: None
917  */
918 void
ipc_eventlink_reference(struct ipc_eventlink * ipc_eventlink)919 ipc_eventlink_reference(
920 	struct ipc_eventlink *ipc_eventlink)
921 {
922 	os_ref_retain(&ipc_eventlink->el_base->elb_ref_count);
923 }
924 
925 /*
926  * Name: ipc_eventlink_deallocate
927  *
928  * Description: Decrement ref on ipc eventlink struct
929  *
930  * Args:
931  *   eventlink: eventlink
932  *
933  * Returns: None
934  */
935 void
ipc_eventlink_deallocate(struct ipc_eventlink * ipc_eventlink)936 ipc_eventlink_deallocate(
937 	struct ipc_eventlink *ipc_eventlink)
938 {
939 	if (ipc_eventlink == IPC_EVENTLINK_NULL) {
940 		return;
941 	}
942 
943 	struct ipc_eventlink_base *ipc_eventlink_base = ipc_eventlink->el_base;
944 
945 	if (os_ref_release(&ipc_eventlink_base->elb_ref_count) > 0) {
946 		return;
947 	}
948 
949 	waitq_deinit(&ipc_eventlink_base->elb_waitq);
950 
951 	assert(!ipc_eventlink_active(ipc_eventlink));
952 
953 #if DEVELOPMENT || DEBUG
954 	/* Remove ipc_eventlink to global list */
955 	global_ipc_eventlink_lock();
956 	queue_remove(&ipc_eventlink_list, ipc_eventlink_base,
957 	    struct ipc_eventlink_base *, elb_global_elm);
958 	global_ipc_eventlink_unlock();
959 #endif
960 	zfree(ipc_eventlink_zone, ipc_eventlink_base);
961 }
962 
963 /*
964  * Name: convert_port_to_eventlink
965  *
966  * Description: Convert from a port name in the current
967  * space to an ipc eventlink. Produces an ipc eventlink ref,
968  * which may be null.
969  *
970  * Args:
971  *   mach_port_t: eventlink port
972  *
973  * Returns:
974  *   ipc_eventlink on Success.
975  */
976 struct ipc_eventlink *
convert_port_to_eventlink(mach_port_t port)977 convert_port_to_eventlink(
978 	mach_port_t     port)
979 {
980 	struct ipc_eventlink *ipc_eventlink = IPC_EVENTLINK_NULL;
981 
982 	if (IP_VALID(port)) {
983 		ip_mq_lock(port);
984 		convert_port_to_eventlink_locked(port, &ipc_eventlink);
985 		ip_mq_unlock(port);
986 	}
987 
988 	return ipc_eventlink;
989 }
990 
991 /*
992  * Name: convert_port_to_eventlink_locked
993  *
994  * Description: Convert from a port name in the current
995  * space to an ipc eventlink. Produces an ipc eventlink ref,
996  * which may be null.
997  *
998  * Args:
999  *   mach_port_name_t: eventlink port name
1000  *   ipc_eventlink_ptr: pointer to return ipc_eventlink.
1001  *
1002  * Returns:
1003  *   KERN_SUCCESS on Success.
1004  *   KERN_TERMINATED on inactive eventlink.
1005  */
1006 static kern_return_t
convert_port_to_eventlink_locked(ipc_port_t port,struct ipc_eventlink ** ipc_eventlink_ptr)1007 convert_port_to_eventlink_locked(
1008 	ipc_port_t                      port,
1009 	struct ipc_eventlink            **ipc_eventlink_ptr)
1010 {
1011 	kern_return_t kr = KERN_INVALID_CAPABILITY;
1012 	struct ipc_eventlink *ipc_eventlink = IPC_EVENTLINK_NULL;
1013 
1014 	if (ip_active(port) && ip_kotype(port) == IKOT_EVENTLINK) {
1015 		ipc_eventlink = ipc_kobject_get_raw(port, IKOT_EVENTLINK);
1016 		if (ipc_eventlink) {
1017 			ipc_eventlink_reference(ipc_eventlink);
1018 			kr = KERN_SUCCESS;
1019 		} else {
1020 			kr = KERN_TERMINATED;
1021 		}
1022 	}
1023 
1024 	*ipc_eventlink_ptr = ipc_eventlink;
1025 	return kr;
1026 }
1027 
1028 /*
1029  * Name: port_name_to_eventlink
1030  *
1031  * Description: Convert from a port name in the current
1032  * space to an ipc eventlink. Produces an ipc eventlink ref,
1033  * which may be null.
1034  *
1035  * Args:
1036  *   mach_port_name_t: eventlink port name
1037  *   ipc_eventlink_ptr: ptr to pass eventlink struct
1038  *
1039  * Returns:
1040  *   KERN_SUCCESS on Success.
1041  */
1042 static kern_return_t
port_name_to_eventlink(mach_port_name_t name,struct ipc_eventlink ** ipc_eventlink_ptr)1043 port_name_to_eventlink(
1044 	mach_port_name_t              name,
1045 	struct ipc_eventlink          **ipc_eventlink_ptr)
1046 {
1047 	ipc_port_t kern_port;
1048 	kern_return_t kr;
1049 
1050 	if (!MACH_PORT_VALID(name)) {
1051 		*ipc_eventlink_ptr = IPC_EVENTLINK_NULL;
1052 		return KERN_INVALID_NAME;
1053 	}
1054 
1055 	kr = ipc_port_translate_send(current_space(), name, &kern_port);
1056 	if (kr != KERN_SUCCESS) {
1057 		*ipc_eventlink_ptr = IPC_EVENTLINK_NULL;
1058 		return kr;
1059 	}
1060 	/* have the port locked */
1061 	assert(IP_VALID(kern_port));
1062 
1063 	kr = convert_port_to_eventlink_locked(kern_port, ipc_eventlink_ptr);
1064 	ip_mq_unlock(kern_port);
1065 
1066 	return kr;
1067 }
1068 
1069 /*
1070  * Name: ipc_eventlink_no_senders
1071  *
1072  * Description: Destroy an ipc_eventlink, wakeup all threads.
1073  *
1074  * Returns:
1075  *   None.
1076  */
1077 static void
ipc_eventlink_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1078 ipc_eventlink_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
1079 {
1080 	kern_return_t kr;
1081 	struct ipc_eventlink *ipc_eventlink;
1082 
1083 	if (!ip_active(port)) {
1084 		return;
1085 	}
1086 
1087 	/* Get ipc_eventlink reference */
1088 	ip_mq_lock(port);
1089 
1090 	/* Make sure port is still active */
1091 	if (!ip_active(port)) {
1092 		ip_mq_unlock(port);
1093 		return;
1094 	}
1095 
1096 	convert_port_to_eventlink_locked(port, &ipc_eventlink);
1097 	ip_mq_unlock(port);
1098 
1099 	kr = ipc_eventlink_destroy_internal(ipc_eventlink);
1100 	if (kr == KERN_TERMINATED) {
1101 		/* eventlink is already inactive, destroy the port */
1102 		ipc_kobject_dealloc_port(port, mscount, IKOT_EVENTLINK);
1103 	}
1104 
1105 	/* Drop the reference returned by convert_port_to_eventlink_locked */
1106 	ipc_eventlink_deallocate(ipc_eventlink);
1107 }
1108 
1109 #define WAITQ_TO_EVENTLINK(wq) ((struct ipc_eventlink_base *) ((uintptr_t)(wq) - offsetof(struct ipc_eventlink_base, elb_waitq)))
1110 
1111 /*
1112  * Name: kdp_eventlink_find_owner
1113  *
1114  * Description: Find who will signal the waiting thread.
1115  *
1116  * Args:
1117  *   waitq: eventlink waitq
1118  *   wait_event: eventlink wait event
1119  *   waitinfo: waitinfo struct
1120  *
1121  * Returns:
1122  *   None.
1123  */
1124 void
kdp_eventlink_find_owner(struct waitq * waitq,event64_t event,thread_waitinfo_t * waitinfo)1125 kdp_eventlink_find_owner(
1126 	struct waitq      *waitq,
1127 	event64_t         event,
1128 	thread_waitinfo_t *waitinfo)
1129 {
1130 	assert(waitinfo->wait_type == kThreadWaitEventlink);
1131 	waitinfo->owner = 0;
1132 	waitinfo->context = 0;
1133 
1134 	if (waitq_held(waitq)) {
1135 		return;
1136 	}
1137 
1138 	struct ipc_eventlink_base *ipc_eventlink_base = WAITQ_TO_EVENTLINK(waitq);
1139 
1140 	if (event == CAST_EVENT64_T(&ipc_eventlink_base->elb_eventlink[0])) {
1141 		/* Use the other end of eventlink for signal thread */
1142 		if (ipc_eventlink_base->elb_eventlink[1].el_thread != THREAD_ASSOCIATE_WILD) {
1143 			waitinfo->owner = thread_tid(ipc_eventlink_base->elb_eventlink[1].el_thread);
1144 		} else {
1145 			waitinfo->owner = 0;
1146 		}
1147 	} else if (event == CAST_EVENT64_T(&ipc_eventlink_base->elb_eventlink[1])) {
1148 		/* Use the other end of eventlink for signal thread */
1149 		if (ipc_eventlink_base->elb_eventlink[0].el_thread != THREAD_ASSOCIATE_WILD) {
1150 			waitinfo->owner = thread_tid(ipc_eventlink_base->elb_eventlink[0].el_thread);
1151 		} else {
1152 			waitinfo->owner = 0;
1153 		}
1154 	}
1155 
1156 	return;
1157 }
1158