xref: /xnu-8020.101.4/bsd/kern/kern_event.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  *
28  */
29 /*-
30  * Copyright (c) 1999,2000,2001 Jonathan Lemon <[email protected]>
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 /*
55  *	@(#)kern_event.c       1.0 (3/31/2000)
56  */
57 #include <stdint.h>
58 #include <machine/atomic.h>
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/filedesc.h>
63 #include <sys/kernel.h>
64 #include <sys/proc_internal.h>
65 #include <sys/kauth.h>
66 #include <sys/malloc.h>
67 #include <sys/unistd.h>
68 #include <sys/file_internal.h>
69 #include <sys/fcntl.h>
70 #include <sys/select.h>
71 #include <sys/queue.h>
72 #include <sys/event.h>
73 #include <sys/eventvar.h>
74 #include <sys/protosw.h>
75 #include <sys/socket.h>
76 #include <sys/socketvar.h>
77 #include <sys/stat.h>
78 #include <sys/syscall.h> // SYS_* constants
79 #include <sys/sysctl.h>
80 #include <sys/uio.h>
81 #include <sys/sysproto.h>
82 #include <sys/user.h>
83 #include <sys/vnode_internal.h>
84 #include <string.h>
85 #include <sys/proc_info.h>
86 #include <sys/codesign.h>
87 #include <sys/pthread_shims.h>
88 #include <sys/kdebug.h>
89 #include <os/base.h>
90 #include <pexpert/pexpert.h>
91 
92 #include <kern/thread_group.h>
93 #include <kern/locks.h>
94 #include <kern/clock.h>
95 #include <kern/cpu_data.h>
96 #include <kern/policy_internal.h>
97 #include <kern/thread_call.h>
98 #include <kern/sched_prim.h>
99 #include <kern/waitq.h>
100 #include <kern/zalloc.h>
101 #include <kern/kalloc.h>
102 #include <kern/assert.h>
103 #include <kern/ast.h>
104 #include <kern/thread.h>
105 #include <kern/kcdata.h>
106 
107 #include <pthread/priority_private.h>
108 #include <pthread/workqueue_syscalls.h>
109 #include <pthread/workqueue_internal.h>
110 #include <libkern/libkern.h>
111 
112 #include <os/log.h>
113 
114 #include "net/net_str_id.h"
115 
116 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
117 #include <skywalk/lib/net_filter_event.h>
118 
119 extern bool net_check_compatible_alf(void);
120 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
121 
122 #include <mach/task.h>
123 #include <libkern/section_keywords.h>
124 
125 #if CONFIG_MEMORYSTATUS
126 #include <sys/kern_memorystatus.h>
127 #endif
128 
129 #if DEVELOPMENT || DEBUG
130 #define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK  (1U << 0)
131 #define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS     (1U << 1)
132 TUNABLE(uint32_t, kevent_debug_flags, "kevent_debug", 0);
133 #endif
134 
135 static LCK_GRP_DECLARE(kq_lck_grp, "kqueue");
136 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) kn_kq_packing_params =
137     VM_PACKING_PARAMS(KNOTE_KQ_PACKED);
138 
139 extern mach_port_name_t ipc_entry_name_mask(mach_port_name_t name); /* osfmk/ipc/ipc_entry.h */
140 extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); /* bsd/kern/kern_sig.c */
141 
142 #define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code))
143 
144 static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
145     vfs_context_t ctx);
146 static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
147 static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
148     struct kevent_qos_s *kev);
149 static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
150 
151 static const struct fileops kqueueops = {
152 	.fo_type     = DTYPE_KQUEUE,
153 	.fo_read     = fo_no_read,
154 	.fo_write    = fo_no_write,
155 	.fo_ioctl    = fo_no_ioctl,
156 	.fo_select   = kqueue_select,
157 	.fo_close    = kqueue_close,
158 	.fo_drain    = kqueue_drain,
159 	.fo_kqfilter = kqueue_kqfilter,
160 };
161 
162 static inline int kevent_modern_copyout(struct kevent_qos_s *, user_addr_t *);
163 static int kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int result);
164 static void kevent_register_wait_block(struct turnstile *ts, thread_t handoff_thread,
165     thread_continue_t cont, struct _kevent_register *cont_args) __dead2;
166 static void kevent_register_wait_return(struct _kevent_register *cont_args) __dead2;
167 static void kevent_register_wait_cleanup(struct knote *kn);
168 
169 static struct kqtailq *kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn);
170 static void kqueue_threadreq_initiate(struct kqueue *kq, workq_threadreq_t, kq_index_t qos, int flags);
171 
172 static void kqworkq_unbind(proc_t p, workq_threadreq_t);
173 static thread_qos_t kqworkq_unbind_locked(struct kqworkq *kqwq, workq_threadreq_t, thread_t thread);
174 static workq_threadreq_t kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
175 static void kqueue_update_iotier_override(kqueue_t kqu);
176 
177 static void kqworkloop_unbind(struct kqworkloop *kwql);
178 
179 enum kqwl_unbind_locked_mode {
180 	KQWL_OVERRIDE_DROP_IMMEDIATELY,
181 	KQWL_OVERRIDE_DROP_DELAYED,
182 };
183 static void kqworkloop_unbind_locked(struct kqworkloop *kwql, thread_t thread,
184     enum kqwl_unbind_locked_mode how);
185 static void kqworkloop_unbind_delayed_override_drop(thread_t thread);
186 static kq_index_t kqworkloop_override(struct kqworkloop *kqwl);
187 static void kqworkloop_set_overcommit(struct kqworkloop *kqwl);
188 enum {
189 	KQWL_UTQ_NONE,
190 	/*
191 	 * The wakeup qos is the qos of QUEUED knotes.
192 	 *
193 	 * This QoS is accounted for with the events override in the
194 	 * kqr_override_index field. It is raised each time a new knote is queued at
195 	 * a given QoS. The kqwl_wakeup_qos field is a superset of the non empty
196 	 * knote buckets and is recomputed after each event delivery.
197 	 */
198 	KQWL_UTQ_UPDATE_WAKEUP_QOS,
199 	KQWL_UTQ_RECOMPUTE_WAKEUP_QOS,
200 	KQWL_UTQ_UNBINDING, /* attempt to rebind */
201 	KQWL_UTQ_PARKING,
202 	/*
203 	 * The wakeup override is for suppressed knotes that have fired again at
204 	 * a higher QoS than the one for which they are suppressed already.
205 	 * This override is cleared when the knote suppressed list becomes empty.
206 	 */
207 	KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
208 	KQWL_UTQ_RESET_WAKEUP_OVERRIDE,
209 	/*
210 	 * The QoS is the maximum QoS of an event enqueued on this workloop in
211 	 * userland. It is copied from the only EVFILT_WORKLOOP knote with
212 	 * a NOTE_WL_THREAD_REQUEST bit set allowed on this workloop. If there is no
213 	 * such knote, this QoS is 0.
214 	 */
215 	KQWL_UTQ_SET_QOS_INDEX,
216 	KQWL_UTQ_REDRIVE_EVENTS,
217 };
218 static void kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos);
219 static int kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags);
220 
221 static struct knote *knote_alloc(void);
222 static void knote_free(struct knote *kn);
223 static int kq_add_knote(struct kqueue *kq, struct knote *kn,
224     struct knote_lock_ctx *knlc, struct proc *p);
225 static struct knote *kq_find_knote_and_kq_lock(struct kqueue *kq,
226     struct kevent_qos_s *kev, bool is_fd, struct proc *p);
227 
228 static void knote_activate(kqueue_t kqu, struct knote *kn, int result);
229 static void knote_dequeue(kqueue_t kqu, struct knote *kn);
230 
231 static void knote_apply_touch(kqueue_t kqu, struct knote *kn,
232     struct kevent_qos_s *kev, int result);
233 static void knote_suppress(kqueue_t kqu, struct knote *kn);
234 static void knote_unsuppress(kqueue_t kqu, struct knote *kn);
235 static void knote_drop(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc);
236 
237 // both these functions may dequeue the knote and it is up to the caller
238 // to enqueue the knote back
239 static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result);
240 static void knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp);
241 
242 static ZONE_DEFINE(knote_zone, "knote zone",
243     sizeof(struct knote), ZC_CACHING | ZC_ZFREE_CLEARMEM);
244 static ZONE_DEFINE(kqfile_zone, "kqueue file zone",
245     sizeof(struct kqfile), ZC_ZFREE_CLEARMEM | ZC_NOTBITAG);
246 static ZONE_DEFINE(kqworkq_zone, "kqueue workq zone",
247     sizeof(struct kqworkq), ZC_ZFREE_CLEARMEM | ZC_NOTBITAG);
248 static ZONE_DEFINE(kqworkloop_zone, "kqueue workloop zone",
249     sizeof(struct kqworkloop), ZC_CACHING | ZC_ZFREE_CLEARMEM | ZC_NOTBITAG);
250 
251 #define KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
252 
253 static int filt_no_attach(struct knote *kn, struct kevent_qos_s *kev);
254 static void filt_no_detach(struct knote *kn);
255 static int filt_bad_event(struct knote *kn, long hint);
256 static int filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev);
257 static int filt_bad_process(struct knote *kn, struct kevent_qos_s *kev);
258 
259 SECURITY_READ_ONLY_EARLY(static struct filterops) bad_filtops = {
260 	.f_attach  = filt_no_attach,
261 	.f_detach  = filt_no_detach,
262 	.f_event   = filt_bad_event,
263 	.f_touch   = filt_bad_touch,
264 	.f_process = filt_bad_process,
265 };
266 
267 #if CONFIG_MEMORYSTATUS
268 extern const struct filterops memorystatus_filtops;
269 #endif /* CONFIG_MEMORYSTATUS */
270 extern const struct filterops fs_filtops;
271 extern const struct filterops sig_filtops;
272 extern const struct filterops machport_filtops;
273 extern const struct filterops pipe_nfiltops;
274 extern const struct filterops pipe_rfiltops;
275 extern const struct filterops pipe_wfiltops;
276 extern const struct filterops ptsd_kqops;
277 extern const struct filterops ptmx_kqops;
278 extern const struct filterops soread_filtops;
279 extern const struct filterops sowrite_filtops;
280 extern const struct filterops sock_filtops;
281 extern const struct filterops soexcept_filtops;
282 extern const struct filterops spec_filtops;
283 extern const struct filterops bpfread_filtops;
284 extern const struct filterops necp_fd_rfiltops;
285 #if SKYWALK
286 extern const struct filterops skywalk_channel_rfiltops;
287 extern const struct filterops skywalk_channel_wfiltops;
288 extern const struct filterops skywalk_channel_efiltops;
289 #endif /* SKYWALK */
290 extern const struct filterops fsevent_filtops;
291 extern const struct filterops vnode_filtops;
292 extern const struct filterops tty_filtops;
293 
294 const static struct filterops file_filtops;
295 const static struct filterops kqread_filtops;
296 const static struct filterops proc_filtops;
297 const static struct filterops timer_filtops;
298 const static struct filterops user_filtops;
299 const static struct filterops workloop_filtops;
300 
301 /*
302  *
303  * Rules for adding new filters to the system:
304  * Public filters:
305  * - Add a new "EVFILT_" option value to bsd/sys/event.h (typically a negative value)
306  *   in the exported section of the header
307  * - Update the EVFILT_SYSCOUNT value to reflect the new addition
308  * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end
309  *   of the Public Filters section in the array.
310  * Private filters:
311  * - Add a new "EVFILT_" value to bsd/sys/event.h (typically a positive value)
312  *   in the XNU_KERNEL_PRIVATE section of the header
313  * - Update the EVFILTID_MAX value to reflect the new addition
314  * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of
315  *   the Private filters section of the array.
316  */
317 static_assert(EVFILTID_MAX < UINT8_MAX, "kn_filtid expects this to be true");
318 static const struct filterops * const sysfilt_ops[EVFILTID_MAX] = {
319 	/* Public Filters */
320 	[~EVFILT_READ]                  = &file_filtops,
321 	[~EVFILT_WRITE]                 = &file_filtops,
322 	[~EVFILT_AIO]                   = &bad_filtops,
323 	[~EVFILT_VNODE]                 = &file_filtops,
324 	[~EVFILT_PROC]                  = &proc_filtops,
325 	[~EVFILT_SIGNAL]                = &sig_filtops,
326 	[~EVFILT_TIMER]                 = &timer_filtops,
327 	[~EVFILT_MACHPORT]              = &machport_filtops,
328 	[~EVFILT_FS]                    = &fs_filtops,
329 	[~EVFILT_USER]                  = &user_filtops,
330 	[~EVFILT_UNUSED_11]             = &bad_filtops,
331 	[~EVFILT_VM]                    = &bad_filtops,
332 	[~EVFILT_SOCK]                  = &file_filtops,
333 #if CONFIG_MEMORYSTATUS
334 	[~EVFILT_MEMORYSTATUS]          = &memorystatus_filtops,
335 #else
336 	[~EVFILT_MEMORYSTATUS]          = &bad_filtops,
337 #endif
338 	[~EVFILT_EXCEPT]                = &file_filtops,
339 #if SKYWALK
340 	[~EVFILT_NW_CHANNEL]            = &file_filtops,
341 #else /* !SKYWALK */
342 	[~EVFILT_NW_CHANNEL]            = &bad_filtops,
343 #endif /* !SKYWALK */
344 	[~EVFILT_WORKLOOP]              = &workloop_filtops,
345 
346 	/* Private filters */
347 	[EVFILTID_KQREAD]               = &kqread_filtops,
348 	[EVFILTID_PIPE_N]               = &pipe_nfiltops,
349 	[EVFILTID_PIPE_R]               = &pipe_rfiltops,
350 	[EVFILTID_PIPE_W]               = &pipe_wfiltops,
351 	[EVFILTID_PTSD]                 = &ptsd_kqops,
352 	[EVFILTID_SOREAD]               = &soread_filtops,
353 	[EVFILTID_SOWRITE]              = &sowrite_filtops,
354 	[EVFILTID_SCK]                  = &sock_filtops,
355 	[EVFILTID_SOEXCEPT]             = &soexcept_filtops,
356 	[EVFILTID_SPEC]                 = &spec_filtops,
357 	[EVFILTID_BPFREAD]              = &bpfread_filtops,
358 	[EVFILTID_NECP_FD]              = &necp_fd_rfiltops,
359 #if SKYWALK
360 	[EVFILTID_SKYWALK_CHANNEL_W]    = &skywalk_channel_wfiltops,
361 	[EVFILTID_SKYWALK_CHANNEL_R]    = &skywalk_channel_rfiltops,
362 	[EVFILTID_SKYWALK_CHANNEL_E]    = &skywalk_channel_efiltops,
363 #else /* !SKYWALK */
364 	[EVFILTID_SKYWALK_CHANNEL_W]    = &bad_filtops,
365 	[EVFILTID_SKYWALK_CHANNEL_R]    = &bad_filtops,
366 	[EVFILTID_SKYWALK_CHANNEL_E]    = &bad_filtops,
367 #endif /* !SKYWALK */
368 	[EVFILTID_FSEVENT]              = &fsevent_filtops,
369 	[EVFILTID_VN]                   = &vnode_filtops,
370 	[EVFILTID_TTY]                  = &tty_filtops,
371 	[EVFILTID_PTMX]                 = &ptmx_kqops,
372 
373 	/* fake filter for detached knotes, keep last */
374 	[EVFILTID_DETACHED]             = &bad_filtops,
375 };
376 
377 static inline bool
kqr_thread_bound(workq_threadreq_t kqr)378 kqr_thread_bound(workq_threadreq_t kqr)
379 {
380 	return kqr->tr_state == WORKQ_TR_STATE_BOUND;
381 }
382 
383 static inline bool
kqr_thread_requested_pending(workq_threadreq_t kqr)384 kqr_thread_requested_pending(workq_threadreq_t kqr)
385 {
386 	workq_tr_state_t tr_state = kqr->tr_state;
387 	return tr_state > WORKQ_TR_STATE_IDLE && tr_state < WORKQ_TR_STATE_BOUND;
388 }
389 
390 static inline bool
kqr_thread_requested(workq_threadreq_t kqr)391 kqr_thread_requested(workq_threadreq_t kqr)
392 {
393 	return kqr->tr_state != WORKQ_TR_STATE_IDLE;
394 }
395 
396 static inline thread_t
kqr_thread_fast(workq_threadreq_t kqr)397 kqr_thread_fast(workq_threadreq_t kqr)
398 {
399 	assert(kqr_thread_bound(kqr));
400 	return kqr->tr_thread;
401 }
402 
403 static inline thread_t
kqr_thread(workq_threadreq_t kqr)404 kqr_thread(workq_threadreq_t kqr)
405 {
406 	return kqr_thread_bound(kqr) ? kqr->tr_thread : THREAD_NULL;
407 }
408 
409 static inline struct kqworkloop *
kqr_kqworkloop(workq_threadreq_t kqr)410 kqr_kqworkloop(workq_threadreq_t kqr)
411 {
412 	if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
413 		return __container_of(kqr, struct kqworkloop, kqwl_request);
414 	}
415 	return NULL;
416 }
417 
418 static inline kqueue_t
kqr_kqueue(proc_t p,workq_threadreq_t kqr)419 kqr_kqueue(proc_t p, workq_threadreq_t kqr)
420 {
421 	kqueue_t kqu;
422 	if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
423 		kqu.kqwl = kqr_kqworkloop(kqr);
424 	} else {
425 		kqu.kqwq = p->p_fd.fd_wqkqueue;
426 		assert(kqr >= kqu.kqwq->kqwq_request &&
427 		    kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS);
428 	}
429 	return kqu;
430 }
431 
432 #if CONFIG_PREADOPT_TG
433 /* There are no guarantees about which locks are held when this is called */
434 inline thread_group_qos_t
kqr_preadopt_thread_group(workq_threadreq_t req)435 kqr_preadopt_thread_group(workq_threadreq_t req)
436 {
437 	struct kqworkloop *kqwl = kqr_kqworkloop(req);
438 	return kqwl ? os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed) : NULL;
439 }
440 
441 /* There are no guarantees about which locks are held when this is called */
_Atomic(thread_group_qos_t)442 inline _Atomic(thread_group_qos_t) *
443 kqr_preadopt_thread_group_addr(workq_threadreq_t req)
444 {
445 	struct kqworkloop *kqwl = kqr_kqworkloop(req);
446 	return kqwl ? (&kqwl->kqwl_preadopt_tg) : NULL;
447 }
448 #endif
449 
450 /*
451  * kqueue/note lock implementations
452  *
453  *	The kqueue lock guards the kq state, the state of its queues,
454  *	and the kqueue-aware status and locks of individual knotes.
455  *
456  *	The kqueue workq lock is used to protect state guarding the
457  *	interaction of the kqueue with the workq.  This state cannot
458  *	be guarded by the kq lock - as it needs to be taken when we
459  *	already have the waitq set lock held (during the waitq hook
460  *	callback).  It might be better to use the waitq lock itself
461  *	for this, but the IRQ requirements make that difficult).
462  *
463  *	Knote flags, filter flags, and associated data are protected
464  *	by the underlying object lock - and are only ever looked at
465  *	by calling the filter to get a [consistent] snapshot of that
466  *	data.
467  */
468 
469 static inline void
kqlock(kqueue_t kqu)470 kqlock(kqueue_t kqu)
471 {
472 	lck_spin_lock(&kqu.kq->kq_lock);
473 }
474 
475 static inline void
kqlock_held(__assert_only kqueue_t kqu)476 kqlock_held(__assert_only kqueue_t kqu)
477 {
478 	LCK_SPIN_ASSERT(&kqu.kq->kq_lock, LCK_ASSERT_OWNED);
479 }
480 
481 static inline void
kqunlock(kqueue_t kqu)482 kqunlock(kqueue_t kqu)
483 {
484 	lck_spin_unlock(&kqu.kq->kq_lock);
485 }
486 
487 static inline void
knhash_lock(struct filedesc * fdp)488 knhash_lock(struct filedesc *fdp)
489 {
490 	lck_mtx_lock(&fdp->fd_knhashlock);
491 }
492 
493 static inline void
knhash_unlock(struct filedesc * fdp)494 knhash_unlock(struct filedesc *fdp)
495 {
496 	lck_mtx_unlock(&fdp->fd_knhashlock);
497 }
498 
499 /* wait event for knote locks */
500 static inline event_t
knote_lock_wev(struct knote * kn)501 knote_lock_wev(struct knote *kn)
502 {
503 	return (event_t)(&kn->kn_hook);
504 }
505 
506 /* wait event for kevent_register_wait_* */
507 static inline event64_t
knote_filt_wev64(struct knote * kn)508 knote_filt_wev64(struct knote *kn)
509 {
510 	/* kdp_workloop_sync_wait_find_owner knows about this */
511 	return CAST_EVENT64_T(kn);
512 }
513 
514 /* wait event for knote_post/knote_drop */
515 static inline event_t
knote_post_wev(struct knote * kn)516 knote_post_wev(struct knote *kn)
517 {
518 	return &kn->kn_kevent;
519 }
520 
521 /*!
522  * @function knote_has_qos
523  *
524  * @brief
525  * Whether the knote has a regular QoS.
526  *
527  * @discussion
528  * kn_qos_override is:
529  * - 0 on kqfiles
530  * - THREAD_QOS_LAST for special buckets (manager)
531  *
532  * Other values mean the knote participates to QoS propagation.
533  */
534 static inline bool
knote_has_qos(struct knote * kn)535 knote_has_qos(struct knote *kn)
536 {
537 	return kn->kn_qos_override > 0 && kn->kn_qos_override < THREAD_QOS_LAST;
538 }
539 
540 #pragma mark knote locks
541 
542 /*
543  * Enum used by the knote_lock_* functions.
544  *
545  * KNOTE_KQ_LOCK_ALWAYS
546  *   The function will always return with the kq lock held.
547  *
548  * KNOTE_KQ_LOCK_ON_SUCCESS
549  *   The function will return with the kq lock held if it was successful
550  *   (knote_lock() is the only function that can fail).
551  *
552  * KNOTE_KQ_LOCK_ON_FAILURE
553  *   The function will return with the kq lock held if it was unsuccessful
554  *   (knote_lock() is the only function that can fail).
555  *
556  * KNOTE_KQ_UNLOCK:
557  *   The function returns with the kq unlocked.
558  */
559 enum kqlocking {
560 	KNOTE_KQ_LOCK_ALWAYS,
561 	KNOTE_KQ_LOCK_ON_SUCCESS,
562 	KNOTE_KQ_LOCK_ON_FAILURE,
563 	KNOTE_KQ_UNLOCK,
564 };
565 
566 static struct knote_lock_ctx *
knote_lock_ctx_find(kqueue_t kqu,struct knote * kn)567 knote_lock_ctx_find(kqueue_t kqu, struct knote *kn)
568 {
569 	struct knote_lock_ctx *ctx;
570 	LIST_FOREACH(ctx, &kqu.kq->kq_knlocks, knlc_link) {
571 		if (ctx->knlc_knote == kn) {
572 			return ctx;
573 		}
574 	}
575 	panic("knote lock context not found: %p", kn);
576 	__builtin_trap();
577 }
578 
579 /* slowpath of knote_lock() */
580 __attribute__((noinline))
581 static bool __result_use_check
knote_lock_slow(kqueue_t kqu,struct knote * kn,struct knote_lock_ctx * knlc,int kqlocking)582 knote_lock_slow(kqueue_t kqu, struct knote *kn,
583     struct knote_lock_ctx *knlc, int kqlocking)
584 {
585 	struct knote_lock_ctx *owner_lc;
586 	struct uthread *uth = current_uthread();
587 	wait_result_t wr;
588 
589 	kqlock_held(kqu);
590 
591 	owner_lc = knote_lock_ctx_find(kqu, kn);
592 #if DEBUG || DEVELOPMENT
593 	knlc->knlc_state = KNOTE_LOCK_CTX_WAITING;
594 #endif
595 	owner_lc->knlc_waiters++;
596 
597 	/*
598 	 * Make our lock context visible to knote_unlock()
599 	 */
600 	uth->uu_knlock = knlc;
601 
602 	wr = lck_spin_sleep_with_inheritor(&kqu.kq->kq_lock, LCK_SLEEP_UNLOCK,
603 	    knote_lock_wev(kn), owner_lc->knlc_thread,
604 	    THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER);
605 
606 	if (wr == THREAD_RESTART) {
607 		/*
608 		 * We haven't been woken up by knote_unlock() but knote_unlock_cancel.
609 		 * We need to cleanup the state since no one did.
610 		 */
611 		uth->uu_knlock = NULL;
612 #if DEBUG || DEVELOPMENT
613 		assert(knlc->knlc_state == KNOTE_LOCK_CTX_WAITING);
614 		knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
615 #endif
616 
617 		if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
618 		    kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
619 			kqlock(kqu);
620 		}
621 		return false;
622 	} else {
623 		if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
624 		    kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) {
625 			kqlock(kqu);
626 #if DEBUG || DEVELOPMENT
627 			/*
628 			 * This state is set under the lock so we can't
629 			 * really assert this unless we hold the lock.
630 			 */
631 			assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
632 #endif
633 		}
634 		return true;
635 	}
636 }
637 
638 /*
639  * Attempts to take the "knote" lock.
640  *
641  * Called with the kqueue lock held.
642  *
643  * Returns true if the knote lock is acquired, false if it has been dropped
644  */
645 static bool __result_use_check
knote_lock(kqueue_t kqu,struct knote * kn,struct knote_lock_ctx * knlc,enum kqlocking kqlocking)646 knote_lock(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc,
647     enum kqlocking kqlocking)
648 {
649 	kqlock_held(kqu);
650 
651 #if DEBUG || DEVELOPMENT
652 	assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
653 #endif
654 	knlc->knlc_knote = kn;
655 	knlc->knlc_thread = current_thread();
656 	knlc->knlc_waiters = 0;
657 
658 	if (__improbable(kn->kn_status & KN_LOCKED)) {
659 		return knote_lock_slow(kqu, kn, knlc, kqlocking);
660 	}
661 
662 	/*
663 	 * When the knote will be dropped, the knote lock is taken before
664 	 * KN_DROPPING is set, and then the knote will be removed from any
665 	 * hash table that references it before the lock is canceled.
666 	 */
667 	assert((kn->kn_status & KN_DROPPING) == 0);
668 	LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, knlc, knlc_link);
669 	kn->kn_status |= KN_LOCKED;
670 #if DEBUG || DEVELOPMENT
671 	knlc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
672 #endif
673 
674 	if (kqlocking == KNOTE_KQ_UNLOCK ||
675 	    kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
676 		kqunlock(kqu);
677 	}
678 	return true;
679 }
680 
681 /*
682  * Unlocks a knote successfully locked with knote_lock().
683  *
684  * Called with the kqueue lock held.
685  *
686  * Returns with the kqueue lock held according to KNOTE_KQ_* mode.
687  */
688 static void
knote_unlock(kqueue_t kqu,struct knote * kn,struct knote_lock_ctx * knlc,enum kqlocking kqlocking)689 knote_unlock(kqueue_t kqu, struct knote *kn,
690     struct knote_lock_ctx *knlc, enum kqlocking kqlocking)
691 {
692 	kqlock_held(kqu);
693 
694 	assert(knlc->knlc_knote == kn);
695 	assert(kn->kn_status & KN_LOCKED);
696 #if DEBUG || DEVELOPMENT
697 	assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
698 #endif
699 
700 	LIST_REMOVE(knlc, knlc_link);
701 
702 	if (knlc->knlc_waiters) {
703 		thread_t thread = THREAD_NULL;
704 
705 		wakeup_one_with_inheritor(knote_lock_wev(kn), THREAD_AWAKENED,
706 		    LCK_WAKE_DEFAULT, &thread);
707 
708 		/*
709 		 * knote_lock_slow() publishes the lock context of waiters
710 		 * in uthread::uu_knlock.
711 		 *
712 		 * Reach out and make this context the new owner.
713 		 */
714 		struct uthread *ut = get_bsdthread_info(thread);
715 		struct knote_lock_ctx *next_owner_lc = ut->uu_knlock;
716 
717 		assert(next_owner_lc->knlc_knote == kn);
718 		next_owner_lc->knlc_waiters = knlc->knlc_waiters - 1;
719 		LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, next_owner_lc, knlc_link);
720 #if DEBUG || DEVELOPMENT
721 		next_owner_lc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
722 #endif
723 		ut->uu_knlock = NULL;
724 		thread_deallocate_safe(thread);
725 	} else {
726 		kn->kn_status &= ~KN_LOCKED;
727 	}
728 
729 	if ((kn->kn_status & KN_MERGE_QOS) && !(kn->kn_status & KN_POSTING)) {
730 		/*
731 		 * No f_event() in flight anymore, we can leave QoS "Merge" mode
732 		 *
733 		 * See knote_adjust_qos()
734 		 */
735 		kn->kn_status &= ~KN_MERGE_QOS;
736 	}
737 	if (kqlocking == KNOTE_KQ_UNLOCK) {
738 		kqunlock(kqu);
739 	}
740 #if DEBUG || DEVELOPMENT
741 	knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
742 #endif
743 }
744 
745 /*
746  * Aborts all waiters for a knote lock, and unlock the knote.
747  *
748  * Called with the kqueue lock held.
749  *
750  * Returns with the kqueue unlocked.
751  */
752 static void
knote_unlock_cancel(struct kqueue * kq,struct knote * kn,struct knote_lock_ctx * knlc)753 knote_unlock_cancel(struct kqueue *kq, struct knote *kn,
754     struct knote_lock_ctx *knlc)
755 {
756 	kqlock_held(kq);
757 
758 	assert(knlc->knlc_knote == kn);
759 	assert(kn->kn_status & KN_LOCKED);
760 	assert(kn->kn_status & KN_DROPPING);
761 
762 	LIST_REMOVE(knlc, knlc_link);
763 	kn->kn_status &= ~KN_LOCKED;
764 	kqunlock(kq);
765 
766 	if (knlc->knlc_waiters) {
767 		wakeup_all_with_inheritor(knote_lock_wev(kn), THREAD_RESTART);
768 	}
769 #if DEBUG || DEVELOPMENT
770 	knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
771 #endif
772 }
773 
774 /*
775  * Call the f_event hook of a given filter.
776  *
777  * Takes a use count to protect against concurrent drops.
778  */
779 static void
knote_post(struct knote * kn,long hint)780 knote_post(struct knote *kn, long hint)
781 {
782 	struct kqueue *kq = knote_get_kq(kn);
783 	int dropping, result;
784 
785 	kqlock(kq);
786 
787 	/*
788 	 * The select fallback is special, if KNOTE() is called,
789 	 * the contract is that kn->kn_hook _HAS_ to become NULL.
790 	 *
791 	 * the f_event() hook might not be called if we're dropping,
792 	 * so we hardcode it here, which is a little distasteful,
793 	 * but the select fallback is kinda magical in the first place.
794 	 */
795 	if (kn->kn_filtid == EVFILTID_SPEC) {
796 		kn->kn_hook = NULL;
797 	}
798 
799 	if (__improbable(kn->kn_status & (KN_DROPPING | KN_VANISHED))) {
800 		return kqunlock(kq);
801 	}
802 
803 	if (__improbable(kn->kn_status & KN_POSTING)) {
804 		panic("KNOTE() called concurrently on knote %p", kn);
805 	}
806 
807 	kn->kn_status |= KN_POSTING;
808 
809 	kqunlock(kq);
810 	result = filter_call(knote_fops(kn), f_event(kn, hint));
811 	kqlock(kq);
812 
813 	dropping = (kn->kn_status & KN_DROPPING);
814 
815 	if (!dropping && (result & FILTER_ADJUST_EVENT_IOTIER_BIT)) {
816 		kqueue_update_iotier_override(kq);
817 	}
818 
819 	if (!dropping && (result & FILTER_ACTIVE)) {
820 		knote_activate(kq, kn, result);
821 	}
822 
823 	if ((kn->kn_status & KN_LOCKED) == 0) {
824 		/*
825 		 * There's no other f_* call in flight, we can leave QoS "Merge" mode.
826 		 *
827 		 * See knote_adjust_qos()
828 		 */
829 		kn->kn_status &= ~(KN_POSTING | KN_MERGE_QOS);
830 	} else {
831 		kn->kn_status &= ~KN_POSTING;
832 	}
833 
834 	if (__improbable(dropping)) {
835 		thread_wakeup(knote_post_wev(kn));
836 	}
837 
838 	kqunlock(kq);
839 }
840 
841 /*
842  * Called by knote_drop() to wait for the last f_event() caller to be done.
843  *
844  *	- kq locked at entry
845  *	- kq unlocked at exit
846  */
847 static void
knote_wait_for_post(struct kqueue * kq,struct knote * kn)848 knote_wait_for_post(struct kqueue *kq, struct knote *kn)
849 {
850 	kqlock_held(kq);
851 
852 	assert(kn->kn_status & KN_DROPPING);
853 
854 	if (kn->kn_status & KN_POSTING) {
855 		lck_spin_sleep(&kq->kq_lock, LCK_SLEEP_UNLOCK, knote_post_wev(kn),
856 		    THREAD_UNINT | THREAD_WAIT_NOREPORT);
857 	} else {
858 		kqunlock(kq);
859 	}
860 }
861 
862 #pragma mark knote helpers for filters
863 
864 OS_ALWAYS_INLINE
865 void
knote_set_error(struct knote * kn,int error)866 knote_set_error(struct knote *kn, int error)
867 {
868 	kn->kn_flags |= EV_ERROR;
869 	kn->kn_sdata = error;
870 }
871 
872 OS_ALWAYS_INLINE
873 int64_t
knote_low_watermark(const struct knote * kn)874 knote_low_watermark(const struct knote *kn)
875 {
876 	return (kn->kn_sfflags & NOTE_LOWAT) ? kn->kn_sdata : 1;
877 }
878 
879 /*!
880  * @function knote_fill_kevent_with_sdata
881  *
882  * @brief
883  * Fills in a kevent from the current content of a knote.
884  *
885  * @discussion
886  * This is meant to be called from filter's f_event hooks.
887  * The kevent data is filled with kn->kn_sdata.
888  *
889  * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
890  *
891  * Using knote_fill_kevent is typically preferred.
892  */
893 OS_ALWAYS_INLINE
894 void
knote_fill_kevent_with_sdata(struct knote * kn,struct kevent_qos_s * kev)895 knote_fill_kevent_with_sdata(struct knote *kn, struct kevent_qos_s *kev)
896 {
897 #define knote_assert_aliases(name1, offs1, name2) \
898 	static_assert(offsetof(struct kevent_qos_s, name1) + offs1 == \
899 	    offsetof(struct kevent_internal_s, name2), \
900 	        "kevent_qos_s::" #name1 " and kevent_internal_s::" #name2 "need to alias")
901 	/*
902 	 * All the code makes assumptions on these aliasing,
903 	 * so make sure we fail the build if we ever ever ever break them.
904 	 */
905 	knote_assert_aliases(ident, 0, kei_ident);
906 #ifdef __LITTLE_ENDIAN__
907 	knote_assert_aliases(filter, 0, kei_filter);  // non trivial overlap
908 	knote_assert_aliases(filter, 1, kei_filtid);  // non trivial overlap
909 #else
910 	knote_assert_aliases(filter, 0, kei_filtid);  // non trivial overlap
911 	knote_assert_aliases(filter, 1, kei_filter);  // non trivial overlap
912 #endif
913 	knote_assert_aliases(flags, 0, kei_flags);
914 	knote_assert_aliases(qos, 0, kei_qos);
915 	knote_assert_aliases(udata, 0, kei_udata);
916 	knote_assert_aliases(fflags, 0, kei_fflags);
917 	knote_assert_aliases(xflags, 0, kei_sfflags); // non trivial overlap
918 	knote_assert_aliases(data, 0, kei_sdata);     // non trivial overlap
919 	knote_assert_aliases(ext, 0, kei_ext);
920 #undef knote_assert_aliases
921 
922 	/*
923 	 * Fix the differences between kevent_qos_s and kevent_internal_s:
924 	 * - xflags is where kn_sfflags lives, we need to zero it
925 	 * - fixup the high bits of `filter` where kn_filtid lives
926 	 */
927 	*kev = *(struct kevent_qos_s *)&kn->kn_kevent;
928 	kev->xflags = 0;
929 	kev->filter |= 0xff00;
930 	if (kn->kn_flags & EV_CLEAR) {
931 		kn->kn_fflags = 0;
932 	}
933 }
934 
935 /*!
936  * @function knote_fill_kevent
937  *
938  * @brief
939  * Fills in a kevent from the current content of a knote.
940  *
941  * @discussion
942  * This is meant to be called from filter's f_event hooks.
943  * The kevent data is filled with the passed in data.
944  *
945  * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
946  */
947 OS_ALWAYS_INLINE
948 void
knote_fill_kevent(struct knote * kn,struct kevent_qos_s * kev,int64_t data)949 knote_fill_kevent(struct knote *kn, struct kevent_qos_s *kev, int64_t data)
950 {
951 	knote_fill_kevent_with_sdata(kn, kev);
952 	kev->filter = kn->kn_filter;
953 	kev->data = data;
954 }
955 
956 
957 #pragma mark file_filtops
958 
959 static int
filt_fileattach(struct knote * kn,struct kevent_qos_s * kev)960 filt_fileattach(struct knote *kn, struct kevent_qos_s *kev)
961 {
962 	return fo_kqfilter(kn->kn_fp, kn, kev);
963 }
964 
965 SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = {
966 	.f_isfd = 1,
967 	.f_attach = filt_fileattach,
968 };
969 
970 #pragma mark kqread_filtops
971 
972 #define f_flag fp_glob->fg_flag
973 #define f_ops fp_glob->fg_ops
974 #define f_lflags fp_glob->fg_lflags
975 
976 static void
filt_kqdetach(struct knote * kn)977 filt_kqdetach(struct knote *kn)
978 {
979 	struct kqfile *kqf = (struct kqfile *)fp_get_data(kn->kn_fp);
980 	struct kqueue *kq = &kqf->kqf_kqueue;
981 
982 	kqlock(kq);
983 	KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
984 	kqunlock(kq);
985 }
986 
987 static int
filt_kqueue(struct knote * kn,__unused long hint)988 filt_kqueue(struct knote *kn, __unused long hint)
989 {
990 	struct kqueue *kq = (struct kqueue *)fp_get_data(kn->kn_fp);
991 
992 	return kq->kq_count > 0;
993 }
994 
995 static int
filt_kqtouch(struct knote * kn,struct kevent_qos_s * kev)996 filt_kqtouch(struct knote *kn, struct kevent_qos_s *kev)
997 {
998 #pragma unused(kev)
999 	struct kqueue *kq = (struct kqueue *)fp_get_data(kn->kn_fp);
1000 	int res;
1001 
1002 	kqlock(kq);
1003 	res = (kq->kq_count > 0);
1004 	kqunlock(kq);
1005 
1006 	return res;
1007 }
1008 
1009 static int
filt_kqprocess(struct knote * kn,struct kevent_qos_s * kev)1010 filt_kqprocess(struct knote *kn, struct kevent_qos_s *kev)
1011 {
1012 	struct kqueue *kq = (struct kqueue *)fp_get_data(kn->kn_fp);
1013 	int res = 0;
1014 
1015 	kqlock(kq);
1016 	if (kq->kq_count) {
1017 		knote_fill_kevent(kn, kev, kq->kq_count);
1018 		res = 1;
1019 	}
1020 	kqunlock(kq);
1021 
1022 	return res;
1023 }
1024 
1025 SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = {
1026 	.f_isfd = 1,
1027 	.f_detach = filt_kqdetach,
1028 	.f_event = filt_kqueue,
1029 	.f_touch = filt_kqtouch,
1030 	.f_process = filt_kqprocess,
1031 };
1032 
1033 #pragma mark proc_filtops
1034 
1035 static int
filt_procattach(struct knote * kn,__unused struct kevent_qos_s * kev)1036 filt_procattach(struct knote *kn, __unused struct kevent_qos_s *kev)
1037 {
1038 	struct proc *p;
1039 
1040 	assert(PID_MAX < NOTE_PDATAMASK);
1041 
1042 	if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
1043 		knote_set_error(kn, ENOTSUP);
1044 		return 0;
1045 	}
1046 
1047 	p = proc_find((int)kn->kn_id);
1048 	if (p == NULL) {
1049 		knote_set_error(kn, ESRCH);
1050 		return 0;
1051 	}
1052 
1053 	const uint32_t NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
1054 
1055 	if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits) {
1056 		do {
1057 			pid_t selfpid = proc_selfpid();
1058 
1059 			if (p->p_ppid == selfpid) {
1060 				break;  /* parent => ok */
1061 			}
1062 			if ((p->p_lflag & P_LTRACED) != 0 &&
1063 			    (p->p_oppid == selfpid)) {
1064 				break;  /* parent-in-waiting => ok */
1065 			}
1066 			if (cansignal(current_proc(), kauth_cred_get(), p, SIGKILL)) {
1067 				break; /* allowed to signal => ok */
1068 			}
1069 			proc_rele(p);
1070 			knote_set_error(kn, EACCES);
1071 			return 0;
1072 		} while (0);
1073 	}
1074 
1075 	kn->kn_proc = p;
1076 	kn->kn_flags |= EV_CLEAR;       /* automatically set */
1077 	kn->kn_sdata = 0;               /* incoming data is ignored */
1078 
1079 	proc_klist_lock();
1080 
1081 	KNOTE_ATTACH(&p->p_klist, kn);
1082 
1083 	proc_klist_unlock();
1084 
1085 	proc_rele(p);
1086 
1087 	/*
1088 	 * only captures edge-triggered events after this point
1089 	 * so it can't already be fired.
1090 	 */
1091 	return 0;
1092 }
1093 
1094 
1095 /*
1096  * The knote may be attached to a different process, which may exit,
1097  * leaving nothing for the knote to be attached to.  In that case,
1098  * the pointer to the process will have already been nulled out.
1099  */
1100 static void
filt_procdetach(struct knote * kn)1101 filt_procdetach(struct knote *kn)
1102 {
1103 	struct proc *p;
1104 
1105 	proc_klist_lock();
1106 
1107 	p = kn->kn_proc;
1108 	if (p != PROC_NULL) {
1109 		kn->kn_proc = PROC_NULL;
1110 		KNOTE_DETACH(&p->p_klist, kn);
1111 	}
1112 
1113 	proc_klist_unlock();
1114 }
1115 
1116 static int
filt_procevent(struct knote * kn,long hint)1117 filt_procevent(struct knote *kn, long hint)
1118 {
1119 	u_int event;
1120 
1121 	/* ALWAYS CALLED WITH proc_klist_lock */
1122 
1123 	/*
1124 	 * Note: a lot of bits in hint may be obtained from the knote
1125 	 * To free some of those bits, see <rdar://problem/12592988> Freeing up
1126 	 * bits in hint for filt_procevent
1127 	 *
1128 	 * mask off extra data
1129 	 */
1130 	event = (u_int)hint & NOTE_PCTRLMASK;
1131 
1132 	/*
1133 	 * termination lifecycle events can happen while a debugger
1134 	 * has reparented a process, in which case notifications
1135 	 * should be quashed except to the tracing parent. When
1136 	 * the debugger reaps the child (either via wait4(2) or
1137 	 * process exit), the child will be reparented to the original
1138 	 * parent and these knotes re-fired.
1139 	 */
1140 	if (event & NOTE_EXIT) {
1141 		if ((kn->kn_proc->p_oppid != 0)
1142 		    && (proc_getpid(knote_get_kq(kn)->kq_p) != kn->kn_proc->p_ppid)) {
1143 			/*
1144 			 * This knote is not for the current ptrace(2) parent, ignore.
1145 			 */
1146 			return 0;
1147 		}
1148 	}
1149 
1150 	/*
1151 	 * if the user is interested in this event, record it.
1152 	 */
1153 	if (kn->kn_sfflags & event) {
1154 		kn->kn_fflags |= event;
1155 	}
1156 
1157 #pragma clang diagnostic push
1158 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1159 	if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
1160 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1161 	}
1162 #pragma clang diagnostic pop
1163 
1164 
1165 	/*
1166 	 * The kernel has a wrapper in place that returns the same data
1167 	 * as is collected here, in kn_hook32.  Any changes to how
1168 	 * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
1169 	 * should also be reflected in the proc_pidnoteexit() wrapper.
1170 	 */
1171 	if (event == NOTE_EXIT) {
1172 		kn->kn_hook32 = 0;
1173 		if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
1174 			kn->kn_fflags |= NOTE_EXITSTATUS;
1175 			kn->kn_hook32 |= (hint & NOTE_PDATAMASK);
1176 		}
1177 		if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
1178 			kn->kn_fflags |= NOTE_EXIT_DETAIL;
1179 			if ((kn->kn_proc->p_lflag &
1180 			    P_LTERM_DECRYPTFAIL) != 0) {
1181 				kn->kn_hook32 |= NOTE_EXIT_DECRYPTFAIL;
1182 			}
1183 			if ((kn->kn_proc->p_lflag &
1184 			    P_LTERM_JETSAM) != 0) {
1185 				kn->kn_hook32 |= NOTE_EXIT_MEMORY;
1186 				switch (kn->kn_proc->p_lflag & P_JETSAM_MASK) {
1187 				case P_JETSAM_VMPAGESHORTAGE:
1188 					kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
1189 					break;
1190 				case P_JETSAM_VMTHRASHING:
1191 					kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMTHRASHING;
1192 					break;
1193 				case P_JETSAM_FCTHRASHING:
1194 					kn->kn_hook32 |= NOTE_EXIT_MEMORY_FCTHRASHING;
1195 					break;
1196 				case P_JETSAM_VNODE:
1197 					kn->kn_hook32 |= NOTE_EXIT_MEMORY_VNODE;
1198 					break;
1199 				case P_JETSAM_HIWAT:
1200 					kn->kn_hook32 |= NOTE_EXIT_MEMORY_HIWAT;
1201 					break;
1202 				case P_JETSAM_PID:
1203 					kn->kn_hook32 |= NOTE_EXIT_MEMORY_PID;
1204 					break;
1205 				case P_JETSAM_IDLEEXIT:
1206 					kn->kn_hook32 |= NOTE_EXIT_MEMORY_IDLE;
1207 					break;
1208 				}
1209 			}
1210 			if ((proc_getcsflags(kn->kn_proc) &
1211 			    CS_KILLED) != 0) {
1212 				kn->kn_hook32 |= NOTE_EXIT_CSERROR;
1213 			}
1214 		}
1215 	}
1216 
1217 	/* if we have any matching state, activate the knote */
1218 	return kn->kn_fflags != 0;
1219 }
1220 
1221 static int
filt_proctouch(struct knote * kn,struct kevent_qos_s * kev)1222 filt_proctouch(struct knote *kn, struct kevent_qos_s *kev)
1223 {
1224 	int res;
1225 
1226 	proc_klist_lock();
1227 
1228 	/* accept new filter flags and mask off output events no long interesting */
1229 	kn->kn_sfflags = kev->fflags;
1230 
1231 	/* restrict the current results to the (smaller?) set of new interest */
1232 	/*
1233 	 * For compatibility with previous implementations, we leave kn_fflags
1234 	 * as they were before.
1235 	 */
1236 	//kn->kn_fflags &= kn->kn_sfflags;
1237 
1238 	res = (kn->kn_fflags != 0);
1239 
1240 	proc_klist_unlock();
1241 
1242 	return res;
1243 }
1244 
1245 static int
filt_procprocess(struct knote * kn,struct kevent_qos_s * kev)1246 filt_procprocess(struct knote *kn, struct kevent_qos_s *kev)
1247 {
1248 	int res = 0;
1249 
1250 	proc_klist_lock();
1251 	if (kn->kn_fflags) {
1252 		knote_fill_kevent(kn, kev, kn->kn_hook32);
1253 		kn->kn_hook32 = 0;
1254 		res = 1;
1255 	}
1256 	proc_klist_unlock();
1257 	return res;
1258 }
1259 
1260 SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = {
1261 	.f_attach  = filt_procattach,
1262 	.f_detach  = filt_procdetach,
1263 	.f_event   = filt_procevent,
1264 	.f_touch   = filt_proctouch,
1265 	.f_process = filt_procprocess,
1266 };
1267 
1268 #pragma mark timer_filtops
1269 
1270 struct filt_timer_params {
1271 	uint64_t deadline; /* deadline in abs/cont time
1272 	                    *                      (or 0 if NOTE_ABSOLUTE and deadline is in past) */
1273 	uint64_t leeway;   /* leeway in abstime, or 0 if none */
1274 	uint64_t interval; /* interval in abstime or 0 if non-repeating timer */
1275 };
1276 
1277 /*
1278  * Values stored in the knote at rest (using Mach absolute time units)
1279  *
1280  * kn->kn_thcall        where the thread_call object is stored
1281  * kn->kn_ext[0]        next deadline or 0 if immediate expiration
1282  * kn->kn_ext[1]        leeway value
1283  * kn->kn_sdata         interval timer: the interval
1284  *                      absolute/deadline timer: 0
1285  * kn->kn_hook32        timer state (with gencount)
1286  *
1287  * TIMER_IDLE:
1288  *   The timer has either never been scheduled or been cancelled.
1289  *   It is safe to schedule a new one in this state.
1290  *
1291  * TIMER_ARMED:
1292  *   The timer has been scheduled
1293  *
1294  * TIMER_FIRED
1295  *   The timer has fired and an event needs to be delivered.
1296  *   When in this state, the callout may still be running.
1297  *
1298  * TIMER_IMMEDIATE
1299  *   The timer has fired at registration time, and the callout was never
1300  *   dispatched.
1301  */
1302 #define TIMER_IDLE       0x0
1303 #define TIMER_ARMED      0x1
1304 #define TIMER_FIRED      0x2
1305 #define TIMER_IMMEDIATE  0x3
1306 #define TIMER_STATE_MASK 0x3
1307 #define TIMER_GEN_INC    0x4
1308 
1309 static void
filt_timer_set_params(struct knote * kn,struct filt_timer_params * params)1310 filt_timer_set_params(struct knote *kn, struct filt_timer_params *params)
1311 {
1312 	kn->kn_ext[0] = params->deadline;
1313 	kn->kn_ext[1] = params->leeway;
1314 	kn->kn_sdata  = params->interval;
1315 }
1316 
1317 /*
1318  * filt_timervalidate - process data from user
1319  *
1320  * Sets up the deadline, interval, and leeway from the provided user data
1321  *
1322  * Input:
1323  *      kn_sdata        timer deadline or interval time
1324  *      kn_sfflags      style of timer, unit of measurement
1325  *
1326  * Output:
1327  *      struct filter_timer_params to apply to the filter with
1328  *      filt_timer_set_params when changes are ready to be commited.
1329  *
1330  * Returns:
1331  *      EINVAL          Invalid user data parameters
1332  *      ERANGE          Various overflows with the parameters
1333  *
1334  * Called with timer filter lock held.
1335  */
1336 static int
filt_timervalidate(const struct kevent_qos_s * kev,struct filt_timer_params * params)1337 filt_timervalidate(const struct kevent_qos_s *kev,
1338     struct filt_timer_params *params)
1339 {
1340 	/*
1341 	 * There are 5 knobs that need to be chosen for a timer registration:
1342 	 *
1343 	 * A) Units of time (what is the time duration of the specified number)
1344 	 *      Absolute and interval take:
1345 	 *              NOTE_SECONDS, NOTE_USECONDS, NOTE_NSECONDS, NOTE_MACHTIME
1346 	 *      Defaults to milliseconds if not specified
1347 	 *
1348 	 * B) Clock epoch (what is the zero point of the specified number)
1349 	 *      For interval, there is none
1350 	 *      For absolute, defaults to the gettimeofday/calendar epoch
1351 	 *      With NOTE_MACHTIME, uses mach_absolute_time()
1352 	 *      With NOTE_MACHTIME and NOTE_MACH_CONTINUOUS_TIME, uses mach_continuous_time()
1353 	 *
1354 	 * C) The knote's behavior on delivery
1355 	 *      Interval timer causes the knote to arm for the next interval unless one-shot is set
1356 	 *      Absolute is a forced one-shot timer which deletes on delivery
1357 	 *      TODO: Add a way for absolute to be not forced one-shot
1358 	 *
1359 	 * D) Whether the time duration is relative to now or absolute
1360 	 *      Interval fires at now + duration when it is set up
1361 	 *      Absolute fires at now + difference between now walltime and passed in walltime
1362 	 *      With NOTE_MACHTIME it fires at an absolute MAT or MCT.
1363 	 *
1364 	 * E) Whether the timer continues to tick across sleep
1365 	 *      By default all three do not.
1366 	 *      For interval and absolute, NOTE_MACH_CONTINUOUS_TIME causes them to tick across sleep
1367 	 *      With NOTE_ABSOLUTE | NOTE_MACHTIME | NOTE_MACH_CONTINUOUS_TIME:
1368 	 *              expires when mach_continuous_time() is > the passed in value.
1369 	 */
1370 
1371 	uint64_t multiplier;
1372 
1373 	boolean_t use_abstime = FALSE;
1374 
1375 	switch (kev->fflags & (NOTE_SECONDS | NOTE_USECONDS | NOTE_NSECONDS | NOTE_MACHTIME)) {
1376 	case NOTE_SECONDS:
1377 		multiplier = NSEC_PER_SEC;
1378 		break;
1379 	case NOTE_USECONDS:
1380 		multiplier = NSEC_PER_USEC;
1381 		break;
1382 	case NOTE_NSECONDS:
1383 		multiplier = 1;
1384 		break;
1385 	case NOTE_MACHTIME:
1386 		multiplier = 0;
1387 		use_abstime = TRUE;
1388 		break;
1389 	case 0: /* milliseconds (default) */
1390 		multiplier = NSEC_PER_SEC / 1000;
1391 		break;
1392 	default:
1393 		return EINVAL;
1394 	}
1395 
1396 	/* transform the leeway in kn_ext[1] to same time scale */
1397 	if (kev->fflags & NOTE_LEEWAY) {
1398 		uint64_t leeway_abs;
1399 
1400 		if (use_abstime) {
1401 			leeway_abs = (uint64_t)kev->ext[1];
1402 		} else {
1403 			uint64_t leeway_ns;
1404 			if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns)) {
1405 				return ERANGE;
1406 			}
1407 
1408 			nanoseconds_to_absolutetime(leeway_ns, &leeway_abs);
1409 		}
1410 
1411 		params->leeway = leeway_abs;
1412 	} else {
1413 		params->leeway = 0;
1414 	}
1415 
1416 	if (kev->fflags & NOTE_ABSOLUTE) {
1417 		uint64_t deadline_abs;
1418 
1419 		if (use_abstime) {
1420 			deadline_abs = (uint64_t)kev->data;
1421 		} else {
1422 			uint64_t calendar_deadline_ns;
1423 
1424 			if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns)) {
1425 				return ERANGE;
1426 			}
1427 
1428 			/* calendar_deadline_ns is in nanoseconds since the epoch */
1429 
1430 			clock_sec_t seconds;
1431 			clock_nsec_t nanoseconds;
1432 
1433 			/*
1434 			 * Note that the conversion through wall-time is only done once.
1435 			 *
1436 			 * If the relationship between MAT and gettimeofday changes,
1437 			 * the underlying timer does not update.
1438 			 *
1439 			 * TODO: build a wall-time denominated timer_call queue
1440 			 * and a flag to request DTRTing with wall-time timers
1441 			 */
1442 			clock_get_calendar_nanotime(&seconds, &nanoseconds);
1443 
1444 			uint64_t calendar_now_ns = (uint64_t)seconds * NSEC_PER_SEC + nanoseconds;
1445 
1446 			/* if deadline is in the future */
1447 			if (calendar_now_ns < calendar_deadline_ns) {
1448 				uint64_t interval_ns = calendar_deadline_ns - calendar_now_ns;
1449 				uint64_t interval_abs;
1450 
1451 				nanoseconds_to_absolutetime(interval_ns, &interval_abs);
1452 
1453 				/*
1454 				 * Note that the NOTE_MACH_CONTINUOUS_TIME flag here only
1455 				 * causes the timer to keep ticking across sleep, but
1456 				 * it does not change the calendar timebase.
1457 				 */
1458 
1459 				if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
1460 					clock_continuoustime_interval_to_deadline(interval_abs,
1461 					    &deadline_abs);
1462 				} else {
1463 					clock_absolutetime_interval_to_deadline(interval_abs,
1464 					    &deadline_abs);
1465 				}
1466 			} else {
1467 				deadline_abs = 0; /* cause immediate expiration */
1468 			}
1469 		}
1470 
1471 		params->deadline = deadline_abs;
1472 		params->interval = 0; /* NOTE_ABSOLUTE is non-repeating */
1473 	} else if (kev->data < 0) {
1474 		/*
1475 		 * Negative interval timers fire immediately, once.
1476 		 *
1477 		 * Ideally a negative interval would be an error, but certain clients
1478 		 * pass negative values on accident, and expect an event back.
1479 		 *
1480 		 * In the old implementation the timer would repeat with no delay
1481 		 * N times until mach_absolute_time() + (N * interval) underflowed,
1482 		 * then it would wait ~forever by accidentally arming a timer for the far future.
1483 		 *
1484 		 * We now skip the power-wasting hot spin phase and go straight to the idle phase.
1485 		 */
1486 
1487 		params->deadline = 0; /* expire immediately */
1488 		params->interval = 0; /* non-repeating */
1489 	} else {
1490 		uint64_t interval_abs = 0;
1491 
1492 		if (use_abstime) {
1493 			interval_abs = (uint64_t)kev->data;
1494 		} else {
1495 			uint64_t interval_ns;
1496 			if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns)) {
1497 				return ERANGE;
1498 			}
1499 
1500 			nanoseconds_to_absolutetime(interval_ns, &interval_abs);
1501 		}
1502 
1503 		uint64_t deadline = 0;
1504 
1505 		if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
1506 			clock_continuoustime_interval_to_deadline(interval_abs, &deadline);
1507 		} else {
1508 			clock_absolutetime_interval_to_deadline(interval_abs, &deadline);
1509 		}
1510 
1511 		params->deadline = deadline;
1512 		params->interval = interval_abs;
1513 	}
1514 
1515 	return 0;
1516 }
1517 
1518 /*
1519  * filt_timerexpire - the timer callout routine
1520  */
1521 static void
filt_timerexpire(void * knx,void * state_on_arm)1522 filt_timerexpire(void *knx, void *state_on_arm)
1523 {
1524 	struct knote *kn = knx;
1525 
1526 	uint32_t state = (uint32_t)(uintptr_t)state_on_arm;
1527 	uint32_t fired_state = state ^ TIMER_ARMED ^ TIMER_FIRED;
1528 
1529 	if (os_atomic_cmpxchg(&kn->kn_hook32, state, fired_state, relaxed)) {
1530 		// our f_event always would say FILTER_ACTIVE,
1531 		// so be leaner and just do it.
1532 		struct kqueue *kq = knote_get_kq(kn);
1533 		kqlock(kq);
1534 		knote_activate(kq, kn, FILTER_ACTIVE);
1535 		kqunlock(kq);
1536 	} else {
1537 		/*
1538 		 * The timer has been reprogrammed or canceled since it was armed,
1539 		 * and this is a late firing for the timer, just ignore it.
1540 		 */
1541 	}
1542 }
1543 
1544 /*
1545  * Does this deadline needs a timer armed for it, or has it expired?
1546  */
1547 static bool
filt_timer_is_ready(struct knote * kn)1548 filt_timer_is_ready(struct knote *kn)
1549 {
1550 	uint64_t now, deadline = kn->kn_ext[0];
1551 
1552 	if (deadline == 0) {
1553 		return true;
1554 	}
1555 
1556 	if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1557 		now = mach_continuous_time();
1558 	} else {
1559 		now = mach_absolute_time();
1560 	}
1561 	return deadline <= now;
1562 }
1563 
1564 /*
1565  * Arm a timer
1566  *
1567  * It is the responsibility of the caller to make sure the timer call
1568  * has completed or been cancelled properly prior to arming it.
1569  */
1570 static void
filt_timerarm(struct knote * kn)1571 filt_timerarm(struct knote *kn)
1572 {
1573 	uint64_t deadline = kn->kn_ext[0];
1574 	uint64_t leeway   = kn->kn_ext[1];
1575 	uint32_t state;
1576 
1577 	int filter_flags = kn->kn_sfflags;
1578 	unsigned int timer_flags = 0;
1579 
1580 	if (filter_flags & NOTE_CRITICAL) {
1581 		timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
1582 	} else if (filter_flags & NOTE_BACKGROUND) {
1583 		timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
1584 	} else {
1585 		timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
1586 	}
1587 
1588 	if (filter_flags & NOTE_LEEWAY) {
1589 		timer_flags |= THREAD_CALL_DELAY_LEEWAY;
1590 	}
1591 
1592 	if (filter_flags & NOTE_MACH_CONTINUOUS_TIME) {
1593 		timer_flags |= THREAD_CALL_CONTINUOUS;
1594 	}
1595 
1596 	/*
1597 	 * Move to ARMED.
1598 	 *
1599 	 * We increase the gencount, and setup the thread call with this expected
1600 	 * state. It means that if there was a previous generation of the timer in
1601 	 * flight that needs to be ignored, then 3 things are possible:
1602 	 *
1603 	 * - the timer fires first, filt_timerexpire() and sets the state to FIRED
1604 	 *   but we clobber it with ARMED and a new gencount. The knote will still
1605 	 *   be activated, but filt_timerprocess() which is serialized with this
1606 	 *   call will not see the FIRED bit set and will not deliver an event.
1607 	 *
1608 	 * - this code runs first, but filt_timerexpire() comes second. Because it
1609 	 *   knows an old gencount, it will debounce and not activate the knote.
1610 	 *
1611 	 * - filt_timerexpire() wasn't in flight yet, and thread_call_enter below
1612 	 *   will just cancel it properly.
1613 	 *
1614 	 * This is important as userspace expects to never be woken up for past
1615 	 * timers after filt_timertouch ran.
1616 	 */
1617 	state = os_atomic_load(&kn->kn_hook32, relaxed);
1618 	state &= ~TIMER_STATE_MASK;
1619 	state += TIMER_GEN_INC + TIMER_ARMED;
1620 	os_atomic_store(&kn->kn_hook32, state, relaxed);
1621 
1622 	thread_call_enter_delayed_with_leeway(kn->kn_thcall,
1623 	    (void *)(uintptr_t)state, deadline, leeway, timer_flags);
1624 }
1625 
1626 /*
1627  * Mark a timer as "already fired" when it is being reprogrammed
1628  *
1629  * If there is a timer in flight, this will do a best effort at canceling it,
1630  * but will not wait. If the thread call was in flight, having set the
1631  * TIMER_IMMEDIATE bit will debounce a filt_timerexpire() racing with this
1632  * cancelation.
1633  */
1634 static void
filt_timerfire_immediate(struct knote * kn)1635 filt_timerfire_immediate(struct knote *kn)
1636 {
1637 	uint32_t state;
1638 
1639 	static_assert(TIMER_IMMEDIATE == TIMER_STATE_MASK,
1640 	    "validate that this atomic or will transition to IMMEDIATE");
1641 	state = os_atomic_or_orig(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
1642 
1643 	if ((state & TIMER_STATE_MASK) == TIMER_ARMED) {
1644 		thread_call_cancel(kn->kn_thcall);
1645 	}
1646 }
1647 
1648 /*
1649  * Allocate a thread call for the knote's lifetime, and kick off the timer.
1650  */
1651 static int
filt_timerattach(struct knote * kn,struct kevent_qos_s * kev)1652 filt_timerattach(struct knote *kn, struct kevent_qos_s *kev)
1653 {
1654 	thread_call_t callout;
1655 	struct filt_timer_params params;
1656 	int error;
1657 
1658 	if ((error = filt_timervalidate(kev, &params)) != 0) {
1659 		knote_set_error(kn, error);
1660 		return 0;
1661 	}
1662 
1663 	callout = thread_call_allocate_with_options(filt_timerexpire,
1664 	    (thread_call_param_t)kn, THREAD_CALL_PRIORITY_HIGH,
1665 	    THREAD_CALL_OPTIONS_ONCE);
1666 
1667 	if (NULL == callout) {
1668 		knote_set_error(kn, ENOMEM);
1669 		return 0;
1670 	}
1671 
1672 	filt_timer_set_params(kn, &params);
1673 	kn->kn_thcall = callout;
1674 	kn->kn_flags |= EV_CLEAR;
1675 	os_atomic_store(&kn->kn_hook32, TIMER_IDLE, relaxed);
1676 
1677 	/* NOTE_ABSOLUTE implies EV_ONESHOT */
1678 	if (kn->kn_sfflags & NOTE_ABSOLUTE) {
1679 		kn->kn_flags |= EV_ONESHOT;
1680 	}
1681 
1682 	if (filt_timer_is_ready(kn)) {
1683 		os_atomic_store(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
1684 		return FILTER_ACTIVE;
1685 	} else {
1686 		filt_timerarm(kn);
1687 		return 0;
1688 	}
1689 }
1690 
1691 /*
1692  * Shut down the timer if it's running, and free the callout.
1693  */
1694 static void
filt_timerdetach(struct knote * kn)1695 filt_timerdetach(struct knote *kn)
1696 {
1697 	__assert_only boolean_t freed;
1698 
1699 	/*
1700 	 * Unconditionally cancel to make sure there can't be any filt_timerexpire()
1701 	 * running anymore.
1702 	 */
1703 	thread_call_cancel_wait(kn->kn_thcall);
1704 	freed = thread_call_free(kn->kn_thcall);
1705 	assert(freed);
1706 }
1707 
1708 /*
1709  * filt_timertouch - update timer knote with new user input
1710  *
1711  * Cancel and restart the timer based on new user data. When
1712  * the user picks up a knote, clear the count of how many timer
1713  * pops have gone off (in kn_data).
1714  */
1715 static int
filt_timertouch(struct knote * kn,struct kevent_qos_s * kev)1716 filt_timertouch(struct knote *kn, struct kevent_qos_s *kev)
1717 {
1718 	struct filt_timer_params params;
1719 	uint32_t changed_flags = (kn->kn_sfflags ^ kev->fflags);
1720 	int error;
1721 
1722 	if (changed_flags & NOTE_ABSOLUTE) {
1723 		kev->flags |= EV_ERROR;
1724 		kev->data = EINVAL;
1725 		return 0;
1726 	}
1727 
1728 	if ((error = filt_timervalidate(kev, &params)) != 0) {
1729 		kev->flags |= EV_ERROR;
1730 		kev->data = error;
1731 		return 0;
1732 	}
1733 
1734 	/* capture the new values used to compute deadline */
1735 	filt_timer_set_params(kn, &params);
1736 	kn->kn_sfflags = kev->fflags;
1737 
1738 	if (filt_timer_is_ready(kn)) {
1739 		filt_timerfire_immediate(kn);
1740 		return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS;
1741 	} else {
1742 		filt_timerarm(kn);
1743 		return FILTER_UPDATE_REQ_QOS;
1744 	}
1745 }
1746 
1747 /*
1748  * filt_timerprocess - query state of knote and snapshot event data
1749  *
1750  * Determine if the timer has fired in the past, snapshot the state
1751  * of the kevent for returning to user-space, and clear pending event
1752  * counters for the next time.
1753  */
1754 static int
filt_timerprocess(struct knote * kn,struct kevent_qos_s * kev)1755 filt_timerprocess(struct knote *kn, struct kevent_qos_s *kev)
1756 {
1757 	uint32_t state = os_atomic_load(&kn->kn_hook32, relaxed);
1758 
1759 	/*
1760 	 * filt_timerprocess is serialized with any filter routine except for
1761 	 * filt_timerexpire which atomically does a TIMER_ARMED -> TIMER_FIRED
1762 	 * transition, and on success, activates the knote.
1763 	 *
1764 	 * Hence, we don't need atomic modifications of the state, only to peek at
1765 	 * whether we see any of the "FIRED" state, and if we do, it is safe to
1766 	 * do simple state machine transitions.
1767 	 */
1768 	switch (state & TIMER_STATE_MASK) {
1769 	case TIMER_IDLE:
1770 	case TIMER_ARMED:
1771 		/*
1772 		 * This can happen if a touch resets a timer that had fired
1773 		 * without being processed
1774 		 */
1775 		return 0;
1776 	}
1777 
1778 	os_atomic_store(&kn->kn_hook32, state & ~TIMER_STATE_MASK, relaxed);
1779 
1780 	/*
1781 	 * Copy out the interesting kevent state,
1782 	 * but don't leak out the raw time calculations.
1783 	 *
1784 	 * TODO: potential enhancements - tell the user about:
1785 	 *      - deadline to which this timer thought it was expiring
1786 	 *      - return kn_sfflags in the fflags field so the client can know
1787 	 *        under what flags the timer fired
1788 	 */
1789 	knote_fill_kevent(kn, kev, 1);
1790 	kev->ext[0] = 0;
1791 	/* kev->ext[1] = 0;  JMM - shouldn't we hide this too? */
1792 
1793 	if (kn->kn_sdata != 0) {
1794 		/*
1795 		 * This is a 'repeating' timer, so we have to emit
1796 		 * how many intervals expired between the arm
1797 		 * and the process.
1798 		 *
1799 		 * A very strange style of interface, because
1800 		 * this could easily be done in the client...
1801 		 */
1802 
1803 		uint64_t now;
1804 
1805 		if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1806 			now = mach_continuous_time();
1807 		} else {
1808 			now = mach_absolute_time();
1809 		}
1810 
1811 		uint64_t first_deadline = kn->kn_ext[0];
1812 		uint64_t interval_abs   = kn->kn_sdata;
1813 		uint64_t orig_arm_time  = first_deadline - interval_abs;
1814 
1815 		assert(now > orig_arm_time);
1816 		assert(now > first_deadline);
1817 
1818 		uint64_t elapsed = now - orig_arm_time;
1819 
1820 		uint64_t num_fired = elapsed / interval_abs;
1821 
1822 		/*
1823 		 * To reach this code, we must have seen the timer pop
1824 		 * and be in repeating mode, so therefore it must have been
1825 		 * more than 'interval' time since the attach or last
1826 		 * successful touch.
1827 		 */
1828 		assert(num_fired > 0);
1829 
1830 		/* report how many intervals have elapsed to the user */
1831 		kev->data = (int64_t)num_fired;
1832 
1833 		/* We only need to re-arm the timer if it's not about to be destroyed */
1834 		if ((kn->kn_flags & EV_ONESHOT) == 0) {
1835 			/* fire at the end of the next interval */
1836 			uint64_t new_deadline = first_deadline + num_fired * interval_abs;
1837 
1838 			assert(new_deadline > now);
1839 
1840 			kn->kn_ext[0] = new_deadline;
1841 
1842 			/*
1843 			 * This can't shortcut setting up the thread call, because
1844 			 * knote_process deactivates EV_CLEAR knotes unconditionnally.
1845 			 */
1846 			filt_timerarm(kn);
1847 		}
1848 	}
1849 
1850 	return FILTER_ACTIVE;
1851 }
1852 
1853 SECURITY_READ_ONLY_EARLY(static struct filterops) timer_filtops = {
1854 	.f_extended_codes = true,
1855 	.f_attach   = filt_timerattach,
1856 	.f_detach   = filt_timerdetach,
1857 	.f_event    = filt_bad_event,
1858 	.f_touch    = filt_timertouch,
1859 	.f_process  = filt_timerprocess,
1860 };
1861 
1862 #pragma mark user_filtops
1863 
1864 static int
filt_userattach(struct knote * kn,__unused struct kevent_qos_s * kev)1865 filt_userattach(struct knote *kn, __unused struct kevent_qos_s *kev)
1866 {
1867 	if (kn->kn_sfflags & NOTE_TRIGGER) {
1868 		kn->kn_hook32 = FILTER_ACTIVE;
1869 	} else {
1870 		kn->kn_hook32 = 0;
1871 	}
1872 	return kn->kn_hook32;
1873 }
1874 
1875 static int
filt_usertouch(struct knote * kn,struct kevent_qos_s * kev)1876 filt_usertouch(struct knote *kn, struct kevent_qos_s *kev)
1877 {
1878 	uint32_t ffctrl;
1879 	int fflags;
1880 
1881 	ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1882 	fflags = kev->fflags & NOTE_FFLAGSMASK;
1883 	switch (ffctrl) {
1884 	case NOTE_FFNOP:
1885 		break;
1886 	case NOTE_FFAND:
1887 		kn->kn_sfflags &= fflags;
1888 		break;
1889 	case NOTE_FFOR:
1890 		kn->kn_sfflags |= fflags;
1891 		break;
1892 	case NOTE_FFCOPY:
1893 		kn->kn_sfflags = fflags;
1894 		break;
1895 	}
1896 	kn->kn_sdata = kev->data;
1897 
1898 	if (kev->fflags & NOTE_TRIGGER) {
1899 		kn->kn_hook32 = FILTER_ACTIVE;
1900 	}
1901 	return (int)kn->kn_hook32;
1902 }
1903 
1904 static int
filt_userprocess(struct knote * kn,struct kevent_qos_s * kev)1905 filt_userprocess(struct knote *kn, struct kevent_qos_s *kev)
1906 {
1907 	int result = (int)kn->kn_hook32;
1908 
1909 	if (result) {
1910 		/* EVFILT_USER returns the data that was passed in */
1911 		knote_fill_kevent_with_sdata(kn, kev);
1912 		kev->fflags = kn->kn_sfflags;
1913 		if (kn->kn_flags & EV_CLEAR) {
1914 			/* knote_fill_kevent cleared kn_fflags */
1915 			kn->kn_hook32 = 0;
1916 		}
1917 	}
1918 
1919 	return result;
1920 }
1921 
1922 SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = {
1923 	.f_extended_codes = true,
1924 	.f_attach  = filt_userattach,
1925 	.f_detach  = filt_no_detach,
1926 	.f_event   = filt_bad_event,
1927 	.f_touch   = filt_usertouch,
1928 	.f_process = filt_userprocess,
1929 };
1930 
1931 #pragma mark workloop_filtops
1932 
1933 #define EPREEMPTDISABLED (-1)
1934 
1935 static inline void
filt_wllock(struct kqworkloop * kqwl)1936 filt_wllock(struct kqworkloop *kqwl)
1937 {
1938 	lck_spin_lock(&kqwl->kqwl_statelock);
1939 }
1940 
1941 static inline void
filt_wlunlock(struct kqworkloop * kqwl)1942 filt_wlunlock(struct kqworkloop *kqwl)
1943 {
1944 	lck_spin_unlock(&kqwl->kqwl_statelock);
1945 }
1946 
1947 /*
1948  * Returns true when the interlock for the turnstile is the workqueue lock
1949  *
1950  * When this is the case, all turnstiles operations are delegated
1951  * to the workqueue subsystem.
1952  *
1953  * This is required because kqueue_threadreq_bind_prepost only holds the
1954  * workqueue lock but needs to move the inheritor from the workloop turnstile
1955  * away from the creator thread, so that this now fulfilled request cannot be
1956  * picked anymore by other threads.
1957  */
1958 static inline bool
filt_wlturnstile_interlock_is_workq(struct kqworkloop * kqwl)1959 filt_wlturnstile_interlock_is_workq(struct kqworkloop *kqwl)
1960 {
1961 	return kqr_thread_requested_pending(&kqwl->kqwl_request);
1962 }
1963 
1964 static void
filt_wlupdate_inheritor(struct kqworkloop * kqwl,struct turnstile * ts,turnstile_update_flags_t flags)1965 filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts,
1966     turnstile_update_flags_t flags)
1967 {
1968 	turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1969 	workq_threadreq_t kqr = &kqwl->kqwl_request;
1970 
1971 	/*
1972 	 * binding to the workq should always happen through
1973 	 * workq_kern_threadreq_update_inheritor()
1974 	 */
1975 	assert(!filt_wlturnstile_interlock_is_workq(kqwl));
1976 
1977 	if ((inheritor = kqwl->kqwl_owner)) {
1978 		flags |= TURNSTILE_INHERITOR_THREAD;
1979 	} else if ((inheritor = kqr_thread(kqr))) {
1980 		flags |= TURNSTILE_INHERITOR_THREAD;
1981 	}
1982 
1983 	turnstile_update_inheritor(ts, inheritor, flags);
1984 }
1985 
1986 #define EVFILT_WORKLOOP_EFAULT_RETRY_COUNT 100
1987 #define FILT_WLATTACH 0
1988 #define FILT_WLTOUCH  1
1989 #define FILT_WLDROP   2
1990 
1991 __result_use_check
1992 static int
filt_wlupdate(struct kqworkloop * kqwl,struct knote * kn,struct kevent_qos_s * kev,kq_index_t qos_index,int op)1993 filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn,
1994     struct kevent_qos_s *kev, kq_index_t qos_index, int op)
1995 {
1996 	user_addr_t uaddr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]);
1997 	workq_threadreq_t kqr = &kqwl->kqwl_request;
1998 	thread_t cur_owner, new_owner, extra_thread_ref = THREAD_NULL;
1999 	kq_index_t cur_override = THREAD_QOS_UNSPECIFIED;
2000 	int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
2001 	int action = KQWL_UTQ_NONE, error = 0;
2002 	bool wl_inheritor_updated = false, needs_wake = false;
2003 	uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
2004 	uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
2005 	uint64_t udata = 0;
2006 	struct turnstile *ts = TURNSTILE_NULL;
2007 
2008 	filt_wllock(kqwl);
2009 
2010 again:
2011 	new_owner = cur_owner = kqwl->kqwl_owner;
2012 
2013 	/*
2014 	 * Phase 1:
2015 	 *
2016 	 * If asked, load the uint64 value at the user provided address and compare
2017 	 * it against the passed in mask and expected value.
2018 	 *
2019 	 * If NOTE_WL_DISCOVER_OWNER is specified, translate the loaded name as
2020 	 * a thread reference.
2021 	 *
2022 	 * If NOTE_WL_END_OWNERSHIP is specified and the currently known owner is
2023 	 * the current thread, then end ownership.
2024 	 *
2025 	 * Lastly decide whether we need to perform a QoS update.
2026 	 */
2027 	if (uaddr) {
2028 		/*
2029 		 * Until <rdar://problem/24999882> exists,
2030 		 * disabling preemption copyin forces any
2031 		 * vm_fault we encounter to fail.
2032 		 */
2033 		error = copyin_atomic64(uaddr, &udata);
2034 
2035 		/*
2036 		 * If we get EFAULT, drop locks, and retry.
2037 		 * If we still get an error report it,
2038 		 * else assume the memory has been faulted
2039 		 * and attempt to copyin under lock again.
2040 		 */
2041 		switch (error) {
2042 		case 0:
2043 			break;
2044 		case EFAULT:
2045 			if (efault_retry-- > 0) {
2046 				filt_wlunlock(kqwl);
2047 				error = copyin_atomic64(uaddr, &udata);
2048 				filt_wllock(kqwl);
2049 				if (error == 0) {
2050 					goto again;
2051 				}
2052 			}
2053 			OS_FALLTHROUGH;
2054 		default:
2055 			goto out;
2056 		}
2057 
2058 		/* Update state as copied in.  */
2059 		kev->ext[EV_EXTIDX_WL_VALUE] = udata;
2060 
2061 		if ((udata & mask) != (kdata & mask)) {
2062 			error = ESTALE;
2063 		} else if (kev->fflags & NOTE_WL_DISCOVER_OWNER) {
2064 			/*
2065 			 * Decipher the owner port name, and translate accordingly.
2066 			 * The low 2 bits were borrowed for other flags, so mask them off.
2067 			 *
2068 			 * Then attempt translation to a thread reference or fail.
2069 			 */
2070 			mach_port_name_t name = (mach_port_name_t)udata & ~0x3;
2071 			if (name != MACH_PORT_NULL) {
2072 				name = ipc_entry_name_mask(name);
2073 				extra_thread_ref = port_name_to_thread(name,
2074 				    PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2075 				if (extra_thread_ref == THREAD_NULL) {
2076 					error = EOWNERDEAD;
2077 					goto out;
2078 				}
2079 				new_owner = extra_thread_ref;
2080 			}
2081 		}
2082 	}
2083 
2084 	if ((kev->fflags & NOTE_WL_END_OWNERSHIP) && new_owner == current_thread()) {
2085 		new_owner = THREAD_NULL;
2086 	}
2087 
2088 	if (error == 0) {
2089 		if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) {
2090 			action = KQWL_UTQ_SET_QOS_INDEX;
2091 		} else if (qos_index && kqr->tr_kq_qos_index != qos_index) {
2092 			action = KQWL_UTQ_SET_QOS_INDEX;
2093 		}
2094 
2095 		if (op == FILT_WLTOUCH) {
2096 			/*
2097 			 * Save off any additional fflags/data we just accepted
2098 			 * But only keep the last round of "update" bits we acted on which helps
2099 			 * debugging a lot.
2100 			 */
2101 			kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK;
2102 			kn->kn_sfflags |= kev->fflags;
2103 			if (kev->fflags & NOTE_WL_SYNC_WAKE) {
2104 				needs_wake = (kn->kn_thread != THREAD_NULL);
2105 			}
2106 		} else if (op == FILT_WLDROP) {
2107 			if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) ==
2108 			    NOTE_WL_SYNC_WAIT) {
2109 				/*
2110 				 * When deleting a SYNC_WAIT knote that hasn't been woken up
2111 				 * explicitly, issue a wake up.
2112 				 */
2113 				kn->kn_sfflags |= NOTE_WL_SYNC_WAKE;
2114 				needs_wake = (kn->kn_thread != THREAD_NULL);
2115 			}
2116 		}
2117 	}
2118 
2119 	/*
2120 	 * Phase 2:
2121 	 *
2122 	 * Commit ownership and QoS changes if any, possibly wake up waiters
2123 	 */
2124 
2125 	if (cur_owner == new_owner && action == KQWL_UTQ_NONE && !needs_wake) {
2126 		goto out;
2127 	}
2128 
2129 	kqlock(kqwl);
2130 
2131 	/* If already tracked as servicer, don't track as owner */
2132 	if (new_owner == kqr_thread(kqr)) {
2133 		new_owner = THREAD_NULL;
2134 	}
2135 
2136 	if (cur_owner != new_owner) {
2137 		kqwl->kqwl_owner = new_owner;
2138 		if (new_owner == extra_thread_ref) {
2139 			/* we just transfered this ref to kqwl_owner */
2140 			extra_thread_ref = THREAD_NULL;
2141 		}
2142 		cur_override = kqworkloop_override(kqwl);
2143 
2144 		if (new_owner) {
2145 			/* override it before we drop the old */
2146 			if (cur_override != THREAD_QOS_UNSPECIFIED) {
2147 				thread_add_kevent_override(new_owner, cur_override);
2148 			}
2149 			if (kqr_thread_requested_pending(kqr)) {
2150 				if (action == KQWL_UTQ_NONE) {
2151 					action = KQWL_UTQ_REDRIVE_EVENTS;
2152 				}
2153 			}
2154 		} else if (action == KQWL_UTQ_NONE &&
2155 		    !kqr_thread_requested(kqr) &&
2156 		    kqwl->kqwl_wakeup_qos) {
2157 			action = KQWL_UTQ_REDRIVE_EVENTS;
2158 		}
2159 	}
2160 
2161 	if (action != KQWL_UTQ_NONE) {
2162 		kqworkloop_update_threads_qos(kqwl, action, qos_index);
2163 	}
2164 
2165 	ts = kqwl->kqwl_turnstile;
2166 	if (cur_owner != new_owner && ts) {
2167 		if (action == KQWL_UTQ_REDRIVE_EVENTS) {
2168 			/*
2169 			 * Note that when action is KQWL_UTQ_REDRIVE_EVENTS,
2170 			 * the code went through workq_kern_threadreq_initiate()
2171 			 * and the workqueue has set the inheritor already
2172 			 */
2173 			assert(filt_wlturnstile_interlock_is_workq(kqwl));
2174 		} else if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2175 			workq_kern_threadreq_lock(kqwl->kqwl_p);
2176 			workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, kqr, new_owner,
2177 			    ts, TURNSTILE_IMMEDIATE_UPDATE);
2178 			workq_kern_threadreq_unlock(kqwl->kqwl_p);
2179 			if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
2180 				/*
2181 				 * If the workq is no longer the interlock, then
2182 				 * workq_kern_threadreq_update_inheritor() has finished a bind
2183 				 * and we need to fallback to the regular path.
2184 				 */
2185 				filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
2186 			}
2187 			wl_inheritor_updated = true;
2188 		} else {
2189 			filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
2190 			wl_inheritor_updated = true;
2191 		}
2192 
2193 		/*
2194 		 * We need a turnstile reference because we are dropping the interlock
2195 		 * and the caller has not called turnstile_prepare.
2196 		 */
2197 		if (wl_inheritor_updated) {
2198 			turnstile_reference(ts);
2199 		}
2200 	}
2201 
2202 	if (needs_wake && ts) {
2203 		waitq_wakeup64_thread(&ts->ts_waitq, knote_filt_wev64(kn),
2204 		    kn->kn_thread, THREAD_AWAKENED);
2205 		if (op == FILT_WLATTACH || op == FILT_WLTOUCH) {
2206 			disable_preemption();
2207 			error = EPREEMPTDISABLED;
2208 		}
2209 	}
2210 
2211 	kqunlock(kqwl);
2212 
2213 out:
2214 	/*
2215 	 * Phase 3:
2216 	 *
2217 	 * Unlock and cleanup various lingering references and things.
2218 	 */
2219 	filt_wlunlock(kqwl);
2220 
2221 #if CONFIG_WORKLOOP_DEBUG
2222 	KQWL_HISTORY_WRITE_ENTRY(kqwl, {
2223 		.updater = current_thread(),
2224 		.servicer = kqr_thread(kqr), /* Note: racy */
2225 		.old_owner = cur_owner,
2226 		.new_owner = new_owner,
2227 
2228 		.kev_ident  = kev->ident,
2229 		.error      = (int16_t)error,
2230 		.kev_flags  = kev->flags,
2231 		.kev_fflags = kev->fflags,
2232 
2233 		.kev_mask   = mask,
2234 		.kev_value  = kdata,
2235 		.in_value   = udata,
2236 	});
2237 #endif // CONFIG_WORKLOOP_DEBUG
2238 
2239 	if (wl_inheritor_updated) {
2240 		turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
2241 		turnstile_deallocate_safe(ts);
2242 	}
2243 
2244 	if (cur_owner && new_owner != cur_owner) {
2245 		if (cur_override != THREAD_QOS_UNSPECIFIED) {
2246 			thread_drop_kevent_override(cur_owner);
2247 		}
2248 		thread_deallocate_safe(cur_owner);
2249 	}
2250 	if (extra_thread_ref) {
2251 		thread_deallocate_safe(extra_thread_ref);
2252 	}
2253 	return error;
2254 }
2255 
2256 /*
2257  * Remembers the last updated that came in from userspace for debugging reasons.
2258  * - fflags is mirrored from the userspace kevent
2259  * - ext[i, i != VALUE] is mirrored from the userspace kevent
2260  * - ext[VALUE] is set to what the kernel loaded atomically
2261  * - data is set to the error if any
2262  */
2263 static inline void
filt_wlremember_last_update(struct knote * kn,struct kevent_qos_s * kev,int error)2264 filt_wlremember_last_update(struct knote *kn, struct kevent_qos_s *kev,
2265     int error)
2266 {
2267 	kn->kn_fflags = kev->fflags;
2268 	kn->kn_sdata = error;
2269 	memcpy(kn->kn_ext, kev->ext, sizeof(kev->ext));
2270 }
2271 
2272 static int
filt_wlupdate_sync_ipc(struct kqworkloop * kqwl,struct knote * kn,struct kevent_qos_s * kev,int op)2273 filt_wlupdate_sync_ipc(struct kqworkloop *kqwl, struct knote *kn,
2274     struct kevent_qos_s *kev, int op)
2275 {
2276 	user_addr_t uaddr = (user_addr_t) kev->ext[EV_EXTIDX_WL_ADDR];
2277 	uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
2278 	uint64_t mask  = kev->ext[EV_EXTIDX_WL_MASK];
2279 	uint64_t udata = 0;
2280 	int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
2281 	int error = 0;
2282 
2283 	if (op == FILT_WLATTACH) {
2284 		(void)kqueue_alloc_turnstile(&kqwl->kqwl_kqueue);
2285 	} else if (uaddr == 0) {
2286 		return 0;
2287 	}
2288 
2289 	filt_wllock(kqwl);
2290 
2291 again:
2292 
2293 	/*
2294 	 * Do the debounce thing, the lock serializing the state is the knote lock.
2295 	 */
2296 	if (uaddr) {
2297 		/*
2298 		 * Until <rdar://problem/24999882> exists,
2299 		 * disabling preemption copyin forces any
2300 		 * vm_fault we encounter to fail.
2301 		 */
2302 		error = copyin_atomic64(uaddr, &udata);
2303 
2304 		/*
2305 		 * If we get EFAULT, drop locks, and retry.
2306 		 * If we still get an error report it,
2307 		 * else assume the memory has been faulted
2308 		 * and attempt to copyin under lock again.
2309 		 */
2310 		switch (error) {
2311 		case 0:
2312 			break;
2313 		case EFAULT:
2314 			if (efault_retry-- > 0) {
2315 				filt_wlunlock(kqwl);
2316 				error = copyin_atomic64(uaddr, &udata);
2317 				filt_wllock(kqwl);
2318 				if (error == 0) {
2319 					goto again;
2320 				}
2321 			}
2322 			OS_FALLTHROUGH;
2323 		default:
2324 			goto out;
2325 		}
2326 
2327 		kev->ext[EV_EXTIDX_WL_VALUE] = udata;
2328 		kn->kn_ext[EV_EXTIDX_WL_VALUE] = udata;
2329 
2330 		if ((udata & mask) != (kdata & mask)) {
2331 			error = ESTALE;
2332 			goto out;
2333 		}
2334 	}
2335 
2336 	if (op == FILT_WLATTACH) {
2337 		error = filt_wlattach_sync_ipc(kn);
2338 		if (error == 0) {
2339 			disable_preemption();
2340 			error = EPREEMPTDISABLED;
2341 		}
2342 	}
2343 
2344 out:
2345 	filt_wlunlock(kqwl);
2346 	return error;
2347 }
2348 
2349 static int
filt_wlattach(struct knote * kn,struct kevent_qos_s * kev)2350 filt_wlattach(struct knote *kn, struct kevent_qos_s *kev)
2351 {
2352 	struct kqueue *kq = knote_get_kq(kn);
2353 	struct kqworkloop *kqwl = (struct kqworkloop *)kq;
2354 	int error = 0, result = 0;
2355 	kq_index_t qos_index = 0;
2356 
2357 	if (__improbable((kq->kq_state & KQ_WORKLOOP) == 0)) {
2358 		error = ENOTSUP;
2359 		goto out;
2360 	}
2361 
2362 	uint32_t command = (kn->kn_sfflags & NOTE_WL_COMMANDS_MASK);
2363 	switch (command) {
2364 	case NOTE_WL_THREAD_REQUEST:
2365 		if (kn->kn_id != kqwl->kqwl_dynamicid) {
2366 			error = EINVAL;
2367 			goto out;
2368 		}
2369 		qos_index = _pthread_priority_thread_qos(kn->kn_qos);
2370 		if (qos_index == THREAD_QOS_UNSPECIFIED) {
2371 			error = ERANGE;
2372 			goto out;
2373 		}
2374 		if (kqwl->kqwl_request.tr_kq_qos_index) {
2375 			/*
2376 			 * There already is a thread request, and well, you're only allowed
2377 			 * one per workloop, so fail the attach.
2378 			 */
2379 			error = EALREADY;
2380 			goto out;
2381 		}
2382 		break;
2383 	case NOTE_WL_SYNC_WAIT:
2384 	case NOTE_WL_SYNC_WAKE:
2385 		if (kn->kn_id == kqwl->kqwl_dynamicid) {
2386 			error = EINVAL;
2387 			goto out;
2388 		}
2389 		if ((kn->kn_flags & EV_DISABLE) == 0) {
2390 			error = EINVAL;
2391 			goto out;
2392 		}
2393 		if (kn->kn_sfflags & NOTE_WL_END_OWNERSHIP) {
2394 			error = EINVAL;
2395 			goto out;
2396 		}
2397 		break;
2398 
2399 	case NOTE_WL_SYNC_IPC:
2400 		if ((kn->kn_flags & EV_DISABLE) == 0) {
2401 			error = EINVAL;
2402 			goto out;
2403 		}
2404 		if (kn->kn_sfflags & (NOTE_WL_UPDATE_QOS | NOTE_WL_DISCOVER_OWNER)) {
2405 			error = EINVAL;
2406 			goto out;
2407 		}
2408 		break;
2409 	default:
2410 		error = EINVAL;
2411 		goto out;
2412 	}
2413 
2414 	if (command == NOTE_WL_SYNC_IPC) {
2415 		error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLATTACH);
2416 	} else {
2417 		error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLATTACH);
2418 	}
2419 
2420 	if (error == EPREEMPTDISABLED) {
2421 		error = 0;
2422 		result = FILTER_THREADREQ_NODEFEER;
2423 	}
2424 out:
2425 	if (error) {
2426 		/* If userland wants ESTALE to be hidden, fail the attach anyway */
2427 		if (error == ESTALE && (kn->kn_sfflags & NOTE_WL_IGNORE_ESTALE)) {
2428 			error = 0;
2429 		}
2430 		knote_set_error(kn, error);
2431 		return result;
2432 	}
2433 	if (command == NOTE_WL_SYNC_WAIT) {
2434 		return kevent_register_wait_prepare(kn, kev, result);
2435 	}
2436 	/* Just attaching the thread request successfully will fire it */
2437 	if (command == NOTE_WL_THREAD_REQUEST) {
2438 		/*
2439 		 * Thread Request knotes need an explicit touch to be active again,
2440 		 * so delivering an event needs to also consume it.
2441 		 */
2442 		kn->kn_flags |= EV_CLEAR;
2443 		return result | FILTER_ACTIVE;
2444 	}
2445 	return result;
2446 }
2447 
2448 static void __dead2
filt_wlwait_continue(void * parameter,wait_result_t wr)2449 filt_wlwait_continue(void *parameter, wait_result_t wr)
2450 {
2451 	struct _kevent_register *cont_args = parameter;
2452 	struct kqworkloop *kqwl = cont_args->kqwl;
2453 
2454 	kqlock(kqwl);
2455 	if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2456 		workq_kern_threadreq_lock(kqwl->kqwl_p);
2457 		turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
2458 		workq_kern_threadreq_unlock(kqwl->kqwl_p);
2459 	} else {
2460 		turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
2461 	}
2462 	kqunlock(kqwl);
2463 
2464 	turnstile_cleanup();
2465 
2466 	if (wr == THREAD_INTERRUPTED) {
2467 		cont_args->kev.flags |= EV_ERROR;
2468 		cont_args->kev.data = EINTR;
2469 	} else if (wr != THREAD_AWAKENED) {
2470 		panic("Unexpected wait result: %d", wr);
2471 	}
2472 
2473 	kevent_register_wait_return(cont_args);
2474 }
2475 
2476 /*
2477  * Called with the workloop mutex held, most of the time never returns as it
2478  * calls filt_wlwait_continue through a continuation.
2479  */
2480 static void __dead2
filt_wlpost_register_wait(struct uthread * uth,struct knote * kn,struct _kevent_register * cont_args)2481 filt_wlpost_register_wait(struct uthread *uth, struct knote *kn,
2482     struct _kevent_register *cont_args)
2483 {
2484 	struct kqworkloop *kqwl = cont_args->kqwl;
2485 	workq_threadreq_t kqr = &kqwl->kqwl_request;
2486 	struct turnstile *ts;
2487 	bool workq_locked = false;
2488 
2489 	kqlock_held(kqwl);
2490 
2491 	if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2492 		workq_kern_threadreq_lock(kqwl->kqwl_p);
2493 		workq_locked = true;
2494 	}
2495 
2496 	ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
2497 	    TURNSTILE_NULL, TURNSTILE_WORKLOOPS);
2498 
2499 	if (workq_locked) {
2500 		workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
2501 		    &kqwl->kqwl_request, kqwl->kqwl_owner, ts,
2502 		    TURNSTILE_DELAYED_UPDATE);
2503 		if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
2504 			/*
2505 			 * if the interlock is no longer the workqueue lock,
2506 			 * then we don't need to hold it anymore.
2507 			 */
2508 			workq_kern_threadreq_unlock(kqwl->kqwl_p);
2509 			workq_locked = false;
2510 		}
2511 	}
2512 	if (!workq_locked) {
2513 		/*
2514 		 * If the interlock is the workloop's, then it's our responsibility to
2515 		 * call update_inheritor, so just do it.
2516 		 */
2517 		filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_DELAYED_UPDATE);
2518 	}
2519 
2520 	thread_set_pending_block_hint(get_machthread(uth), kThreadWaitWorkloopSyncWait);
2521 	waitq_assert_wait64(&ts->ts_waitq, knote_filt_wev64(kn),
2522 	    THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER);
2523 
2524 	if (workq_locked) {
2525 		workq_kern_threadreq_unlock(kqwl->kqwl_p);
2526 	}
2527 
2528 	thread_t thread = kqwl->kqwl_owner ?: kqr_thread(kqr);
2529 	if (thread) {
2530 		thread_reference(thread);
2531 	}
2532 
2533 	kevent_register_wait_block(ts, thread, filt_wlwait_continue, cont_args);
2534 }
2535 
2536 /* called in stackshot context to report the thread responsible for blocking this thread */
2537 void
kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread,event64_t event,thread_waitinfo_t * waitinfo)2538 kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread,
2539     event64_t event, thread_waitinfo_t *waitinfo)
2540 {
2541 	struct knote *kn = (struct knote *)event;
2542 
2543 	zone_require(knote_zone, kn);
2544 
2545 	assert(kn->kn_thread == thread);
2546 
2547 	struct kqueue *kq = knote_get_kq(kn);
2548 
2549 	zone_require(kqworkloop_zone, kq);
2550 	assert(kq->kq_state & KQ_WORKLOOP);
2551 
2552 	struct kqworkloop *kqwl = (struct kqworkloop *)kq;
2553 	workq_threadreq_t kqr = &kqwl->kqwl_request;
2554 
2555 	thread_t kqwl_owner = kqwl->kqwl_owner;
2556 
2557 	if (kqwl_owner != THREAD_NULL) {
2558 		thread_require(kqwl_owner);
2559 		waitinfo->owner = thread_tid(kqwl->kqwl_owner);
2560 	} else if (kqr_thread_requested_pending(kqr)) {
2561 		waitinfo->owner = STACKSHOT_WAITOWNER_THREQUESTED;
2562 	} else if ((kqr->tr_state >= WORKQ_TR_STATE_BINDING) && (kqr->tr_thread != NULL)) {
2563 		thread_require(kqr->tr_thread);
2564 		waitinfo->owner = thread_tid(kqr->tr_thread);
2565 	} else {
2566 		waitinfo->owner = 0;
2567 	}
2568 
2569 	waitinfo->context = kqwl->kqwl_dynamicid;
2570 }
2571 
2572 static void
filt_wldetach(struct knote * kn)2573 filt_wldetach(struct knote *kn)
2574 {
2575 	if (kn->kn_sfflags & NOTE_WL_SYNC_IPC) {
2576 		filt_wldetach_sync_ipc(kn);
2577 	} else if (kn->kn_thread) {
2578 		kevent_register_wait_cleanup(kn);
2579 	}
2580 }
2581 
2582 static int
filt_wlvalidate_kev_flags(struct knote * kn,struct kevent_qos_s * kev,thread_qos_t * qos_index)2583 filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_qos_s *kev,
2584     thread_qos_t *qos_index)
2585 {
2586 	uint32_t new_commands = kev->fflags & NOTE_WL_COMMANDS_MASK;
2587 	uint32_t sav_commands = kn->kn_sfflags & NOTE_WL_COMMANDS_MASK;
2588 
2589 	if ((kev->fflags & NOTE_WL_DISCOVER_OWNER) && (kev->flags & EV_DELETE)) {
2590 		return EINVAL;
2591 	}
2592 	if (kev->fflags & NOTE_WL_UPDATE_QOS) {
2593 		if (kev->flags & EV_DELETE) {
2594 			return EINVAL;
2595 		}
2596 		if (sav_commands != NOTE_WL_THREAD_REQUEST) {
2597 			return EINVAL;
2598 		}
2599 		if (!(*qos_index = _pthread_priority_thread_qos(kev->qos))) {
2600 			return ERANGE;
2601 		}
2602 	}
2603 
2604 	switch (new_commands) {
2605 	case NOTE_WL_THREAD_REQUEST:
2606 		/* thread requests can only update themselves */
2607 		if (sav_commands != NOTE_WL_THREAD_REQUEST) {
2608 			return EINVAL;
2609 		}
2610 		break;
2611 
2612 	case NOTE_WL_SYNC_WAIT:
2613 		if (kev->fflags & NOTE_WL_END_OWNERSHIP) {
2614 			return EINVAL;
2615 		}
2616 		goto sync_checks;
2617 
2618 	case NOTE_WL_SYNC_WAKE:
2619 sync_checks:
2620 		if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE))) {
2621 			return EINVAL;
2622 		}
2623 		if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
2624 			return EINVAL;
2625 		}
2626 		break;
2627 
2628 	case NOTE_WL_SYNC_IPC:
2629 		if (sav_commands != NOTE_WL_SYNC_IPC) {
2630 			return EINVAL;
2631 		}
2632 		if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
2633 			return EINVAL;
2634 		}
2635 		break;
2636 
2637 	default:
2638 		return EINVAL;
2639 	}
2640 	return 0;
2641 }
2642 
2643 static int
filt_wltouch(struct knote * kn,struct kevent_qos_s * kev)2644 filt_wltouch(struct knote *kn, struct kevent_qos_s *kev)
2645 {
2646 	struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2647 	thread_qos_t qos_index = THREAD_QOS_UNSPECIFIED;
2648 	int result = 0;
2649 
2650 	int error = filt_wlvalidate_kev_flags(kn, kev, &qos_index);
2651 	if (error) {
2652 		goto out;
2653 	}
2654 
2655 	uint32_t command = kev->fflags & NOTE_WL_COMMANDS_MASK;
2656 	if (command == NOTE_WL_SYNC_IPC) {
2657 		error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLTOUCH);
2658 	} else {
2659 		error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLTOUCH);
2660 		filt_wlremember_last_update(kn, kev, error);
2661 	}
2662 	if (error == EPREEMPTDISABLED) {
2663 		error = 0;
2664 		result = FILTER_THREADREQ_NODEFEER;
2665 	}
2666 
2667 out:
2668 	if (error) {
2669 		if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
2670 			/* If userland wants ESTALE to be hidden, do not activate */
2671 			return result;
2672 		}
2673 		kev->flags |= EV_ERROR;
2674 		kev->data = error;
2675 		return result;
2676 	}
2677 	if (command == NOTE_WL_SYNC_WAIT && !(kn->kn_sfflags & NOTE_WL_SYNC_WAKE)) {
2678 		return kevent_register_wait_prepare(kn, kev, result);
2679 	}
2680 	/* Just touching the thread request successfully will fire it */
2681 	if (command == NOTE_WL_THREAD_REQUEST) {
2682 		if (kev->fflags & NOTE_WL_UPDATE_QOS) {
2683 			result |= FILTER_UPDATE_REQ_QOS;
2684 		}
2685 		result |= FILTER_ACTIVE;
2686 	}
2687 	return result;
2688 }
2689 
2690 static bool
filt_wlallow_drop(struct knote * kn,struct kevent_qos_s * kev)2691 filt_wlallow_drop(struct knote *kn, struct kevent_qos_s *kev)
2692 {
2693 	struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2694 
2695 	int error = filt_wlvalidate_kev_flags(kn, kev, NULL);
2696 	if (error) {
2697 		goto out;
2698 	}
2699 
2700 	uint32_t command = (kev->fflags & NOTE_WL_COMMANDS_MASK);
2701 	if (command == NOTE_WL_SYNC_IPC) {
2702 		error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLDROP);
2703 	} else {
2704 		error = filt_wlupdate(kqwl, kn, kev, 0, FILT_WLDROP);
2705 		filt_wlremember_last_update(kn, kev, error);
2706 	}
2707 	assert(error != EPREEMPTDISABLED);
2708 
2709 out:
2710 	if (error) {
2711 		if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
2712 			return false;
2713 		}
2714 		kev->flags |= EV_ERROR;
2715 		kev->data = error;
2716 		return false;
2717 	}
2718 	return true;
2719 }
2720 
2721 static int
filt_wlprocess(struct knote * kn,struct kevent_qos_s * kev)2722 filt_wlprocess(struct knote *kn, struct kevent_qos_s *kev)
2723 {
2724 	struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2725 	int rc = 0;
2726 
2727 	assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST);
2728 
2729 	kqlock(kqwl);
2730 
2731 	if (kqwl->kqwl_owner) {
2732 		/*
2733 		 * <rdar://problem/33584321> userspace sometimes due to events being
2734 		 * delivered but not triggering a drain session can cause a process
2735 		 * of the thread request knote.
2736 		 *
2737 		 * When that happens, the automatic deactivation due to process
2738 		 * would swallow the event, so we have to activate the knote again.
2739 		 */
2740 		knote_activate(kqwl, kn, FILTER_ACTIVE);
2741 	} else {
2742 #if DEBUG || DEVELOPMENT
2743 		if (kevent_debug_flags & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) {
2744 			/*
2745 			 * see src/queue_internal.h in libdispatch
2746 			 */
2747 #define DISPATCH_QUEUE_ENQUEUED 0x1ull
2748 			user_addr_t addr = CAST_USER_ADDR_T(kn->kn_ext[EV_EXTIDX_WL_ADDR]);
2749 			task_t t = current_task();
2750 			uint64_t val;
2751 			if (addr && task_is_active(t) && !task_is_halting(t) &&
2752 			    copyin_atomic64(addr, &val) == 0 &&
2753 			    val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 &&
2754 			    (val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) {
2755 				panic("kevent: workloop %#016llx is not enqueued "
2756 				    "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)",
2757 				    kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]);
2758 			}
2759 		}
2760 #endif
2761 		knote_fill_kevent(kn, kev, 0);
2762 		kev->fflags = kn->kn_sfflags;
2763 		rc |= FILTER_ACTIVE;
2764 	}
2765 
2766 	kqunlock(kqwl);
2767 
2768 	if (rc & FILTER_ACTIVE) {
2769 		workq_thread_set_max_qos(kqwl->kqwl_p, &kqwl->kqwl_request);
2770 	}
2771 	return rc;
2772 }
2773 
2774 SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = {
2775 	.f_extended_codes = true,
2776 	.f_attach  = filt_wlattach,
2777 	.f_detach  = filt_wldetach,
2778 	.f_event   = filt_bad_event,
2779 	.f_touch   = filt_wltouch,
2780 	.f_process = filt_wlprocess,
2781 	.f_allow_drop = filt_wlallow_drop,
2782 	.f_post_register_wait = filt_wlpost_register_wait,
2783 };
2784 
2785 #pragma mark - kqueues allocation and deallocation
2786 
2787 OS_NOINLINE
2788 static void
2789 kqworkloop_dealloc(struct kqworkloop *, bool hash_remove);
2790 
2791 static inline bool
kqworkloop_try_retain(struct kqworkloop * kqwl)2792 kqworkloop_try_retain(struct kqworkloop *kqwl)
2793 {
2794 	return os_ref_retain_try_raw(&kqwl->kqwl_retains, NULL);
2795 }
2796 
2797 static inline void
kqworkloop_retain(struct kqworkloop * kqwl)2798 kqworkloop_retain(struct kqworkloop *kqwl)
2799 {
2800 	return os_ref_retain_raw(&kqwl->kqwl_retains, NULL);
2801 }
2802 
2803 OS_ALWAYS_INLINE
2804 static inline void
kqueue_retain(kqueue_t kqu)2805 kqueue_retain(kqueue_t kqu)
2806 {
2807 	if (kqu.kq->kq_state & KQ_DYNAMIC) {
2808 		kqworkloop_retain(kqu.kqwl);
2809 	}
2810 }
2811 
2812 OS_ALWAYS_INLINE
2813 static inline void
kqworkloop_release_live(struct kqworkloop * kqwl)2814 kqworkloop_release_live(struct kqworkloop *kqwl)
2815 {
2816 	os_ref_release_live_raw(&kqwl->kqwl_retains, NULL);
2817 }
2818 
2819 OS_ALWAYS_INLINE
2820 static inline void
kqueue_release_live(kqueue_t kqu)2821 kqueue_release_live(kqueue_t kqu)
2822 {
2823 	if (kqu.kq->kq_state & KQ_DYNAMIC) {
2824 		kqworkloop_release_live(kqu.kqwl);
2825 	}
2826 }
2827 
2828 OS_ALWAYS_INLINE
2829 static inline void
kqworkloop_release(struct kqworkloop * kqwl)2830 kqworkloop_release(struct kqworkloop *kqwl)
2831 {
2832 	if (os_ref_release_raw(&kqwl->kqwl_retains, NULL) == 0) {
2833 		kqworkloop_dealloc(kqwl, true);
2834 	}
2835 }
2836 
2837 OS_ALWAYS_INLINE
2838 static inline void
kqueue_release(kqueue_t kqu)2839 kqueue_release(kqueue_t kqu)
2840 {
2841 	if (kqu.kq->kq_state & KQ_DYNAMIC) {
2842 		kqworkloop_release(kqu.kqwl);
2843 	}
2844 }
2845 
2846 /*!
2847  * @function kqueue_destroy
2848  *
2849  * @brief
2850  * Common part to all kqueue dealloc functions.
2851  */
2852 OS_NOINLINE
2853 static void
kqueue_destroy(kqueue_t kqu,zone_t zone)2854 kqueue_destroy(kqueue_t kqu, zone_t zone)
2855 {
2856 	lck_spin_destroy(&kqu.kq->kq_lock, &kq_lck_grp);
2857 
2858 	zfree(zone, kqu.kq);
2859 }
2860 
2861 /*!
2862  * @function kqueue_init
2863  *
2864  * @brief
2865  * Common part to all kqueue alloc functions.
2866  */
2867 static kqueue_t
kqueue_init(kqueue_t kqu)2868 kqueue_init(kqueue_t kqu)
2869 {
2870 	lck_spin_init(&kqu.kq->kq_lock, &kq_lck_grp, LCK_ATTR_NULL);
2871 	return kqu;
2872 }
2873 
2874 #pragma mark kqfile allocation and deallocation
2875 
2876 /*!
2877  * @function kqueue_dealloc
2878  *
2879  * @brief
2880  * Detach all knotes from a kqfile and free it.
2881  *
2882  * @discussion
2883  * We walk each list looking for knotes referencing this
2884  * this kqueue.  If we find one, we try to drop it.  But
2885  * if we fail to get a drop reference, that will wait
2886  * until it is dropped.  So, we can just restart again
2887  * safe in the assumption that the list will eventually
2888  * not contain any more references to this kqueue (either
2889  * we dropped them all, or someone else did).
2890  *
2891  * Assumes no new events are being added to the kqueue.
2892  * Nothing locked on entry or exit.
2893  */
2894 void
kqueue_dealloc(struct kqueue * kq)2895 kqueue_dealloc(struct kqueue *kq)
2896 {
2897 	KNOTE_LOCK_CTX(knlc);
2898 	struct proc *p = kq->kq_p;
2899 	struct filedesc *fdp = &p->p_fd;
2900 	struct knote *kn;
2901 
2902 	assert(kq && (kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
2903 
2904 	proc_fdlock(p);
2905 	for (int i = 0; i < fdp->fd_knlistsize; i++) {
2906 		kn = SLIST_FIRST(&fdp->fd_knlist[i]);
2907 		while (kn != NULL) {
2908 			if (kq == knote_get_kq(kn)) {
2909 				kqlock(kq);
2910 				proc_fdunlock(p);
2911 				if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
2912 					knote_drop(kq, kn, &knlc);
2913 				}
2914 				proc_fdlock(p);
2915 				/* start over at beginning of list */
2916 				kn = SLIST_FIRST(&fdp->fd_knlist[i]);
2917 				continue;
2918 			}
2919 			kn = SLIST_NEXT(kn, kn_link);
2920 		}
2921 	}
2922 
2923 	knhash_lock(fdp);
2924 	proc_fdunlock(p);
2925 
2926 	if (fdp->fd_knhashmask != 0) {
2927 		for (int i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
2928 			kn = SLIST_FIRST(&fdp->fd_knhash[i]);
2929 			while (kn != NULL) {
2930 				if (kq == knote_get_kq(kn)) {
2931 					kqlock(kq);
2932 					knhash_unlock(fdp);
2933 					if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
2934 						knote_drop(kq, kn, &knlc);
2935 					}
2936 					knhash_lock(fdp);
2937 					/* start over at beginning of list */
2938 					kn = SLIST_FIRST(&fdp->fd_knhash[i]);
2939 					continue;
2940 				}
2941 				kn = SLIST_NEXT(kn, kn_link);
2942 			}
2943 		}
2944 	}
2945 	knhash_unlock(fdp);
2946 
2947 	kqueue_destroy(kq, kqfile_zone);
2948 }
2949 
2950 /*!
2951  * @function kqueue_alloc
2952  *
2953  * @brief
2954  * Allocate a kqfile.
2955  */
2956 struct kqueue *
kqueue_alloc(struct proc * p)2957 kqueue_alloc(struct proc *p)
2958 {
2959 	struct kqfile *kqf;
2960 
2961 	/*
2962 	 * kqfiles are created with kqueue() so we need to wait for
2963 	 * the first kevent syscall to know which bit among
2964 	 * KQ_KEV_{32,64,QOS} will be set in kqf_state
2965 	 */
2966 	kqf = zalloc_flags(kqfile_zone, Z_WAITOK | Z_ZERO);
2967 	kqf->kqf_p = p;
2968 	TAILQ_INIT_AFTER_BZERO(&kqf->kqf_queue);
2969 	TAILQ_INIT_AFTER_BZERO(&kqf->kqf_suppressed);
2970 
2971 	return kqueue_init(kqf).kq;
2972 }
2973 
2974 /*!
2975  * @function kqueue_internal
2976  *
2977  * @brief
2978  * Core implementation for kqueue and guarded_kqueue_np()
2979  */
2980 int
kqueue_internal(struct proc * p,fp_initfn_t fp_init,void * initarg,int32_t * retval)2981 kqueue_internal(struct proc *p, fp_initfn_t fp_init, void *initarg, int32_t *retval)
2982 {
2983 	struct kqueue *kq;
2984 	struct fileproc *fp;
2985 	int fd, error;
2986 
2987 	error = falloc_withinit(p, &fp, &fd, vfs_context_current(),
2988 	    fp_init, initarg);
2989 	if (error) {
2990 		return error;
2991 	}
2992 
2993 	kq = kqueue_alloc(p);
2994 	if (kq == NULL) {
2995 		fp_free(p, fd, fp);
2996 		return ENOMEM;
2997 	}
2998 
2999 	fp->fp_flags |= FP_CLOEXEC | FP_CLOFORK;
3000 	fp->f_flag = FREAD | FWRITE;
3001 	fp->f_ops = &kqueueops;
3002 	fp_set_data(fp, kq);
3003 	fp->f_lflags |= FG_CONFINED;
3004 
3005 	proc_fdlock(p);
3006 	procfdtbl_releasefd(p, fd, NULL);
3007 	fp_drop(p, fd, fp, 1);
3008 	proc_fdunlock(p);
3009 
3010 	*retval = fd;
3011 	return error;
3012 }
3013 
3014 /*!
3015  * @function kqueue
3016  *
3017  * @brief
3018  * The kqueue syscall.
3019  */
3020 int
kqueue(struct proc * p,__unused struct kqueue_args * uap,int32_t * retval)3021 kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
3022 {
3023 	return kqueue_internal(p, NULL, NULL, retval);
3024 }
3025 
3026 #pragma mark kqworkq allocation and deallocation
3027 
3028 /*!
3029  * @function kqworkq_dealloc
3030  *
3031  * @brief
3032  * Deallocates a workqueue kqueue.
3033  *
3034  * @discussion
3035  * This only happens at process death, or for races with concurrent
3036  * kevent_get_kqwq calls, hence we don't have to care about knotes referencing
3037  * this kqueue, either there are none, or someone else took care of them.
3038  */
3039 void
kqworkq_dealloc(struct kqworkq * kqwq)3040 kqworkq_dealloc(struct kqworkq *kqwq)
3041 {
3042 	kqueue_destroy(kqwq, kqworkq_zone);
3043 }
3044 
3045 /*!
3046  * @function kqworkq_alloc
3047  *
3048  * @brief
3049  * Allocates a workqueue kqueue.
3050  *
3051  * @discussion
3052  * This is the slow path of kevent_get_kqwq.
3053  * This takes care of making sure procs have a single workq kqueue.
3054  */
3055 OS_NOINLINE
3056 static struct kqworkq *
kqworkq_alloc(struct proc * p,unsigned int flags)3057 kqworkq_alloc(struct proc *p, unsigned int flags)
3058 {
3059 	struct kqworkq *kqwq, *tmp;
3060 
3061 	kqwq = zalloc_flags(kqworkq_zone, Z_WAITOK | Z_ZERO);
3062 
3063 	assert((flags & KEVENT_FLAG_LEGACY32) == 0);
3064 	if (flags & KEVENT_FLAG_LEGACY64) {
3065 		kqwq->kqwq_state = KQ_WORKQ | KQ_KEV64;
3066 	} else {
3067 		kqwq->kqwq_state = KQ_WORKQ | KQ_KEV_QOS;
3068 	}
3069 	kqwq->kqwq_p = p;
3070 
3071 	for (int i = 0; i < KQWQ_NBUCKETS; i++) {
3072 		TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_queue[i]);
3073 		TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_suppressed[i]);
3074 	}
3075 	for (int i = 0; i < KQWQ_NBUCKETS; i++) {
3076 		/*
3077 		 * Because of how the bucketized system works, we mix overcommit
3078 		 * sources with not overcommit: each time we move a knote from
3079 		 * one bucket to the next due to overrides, we'd had to track
3080 		 * overcommitness, and it's really not worth it in the workloop
3081 		 * enabled world that track this faithfully.
3082 		 *
3083 		 * Incidentally, this behaves like the original manager-based
3084 		 * kqwq where event delivery always happened (hence is
3085 		 * "overcommit")
3086 		 */
3087 		kqwq->kqwq_request[i].tr_state = WORKQ_TR_STATE_IDLE;
3088 		kqwq->kqwq_request[i].tr_flags = WORKQ_TR_FLAG_KEVENT;
3089 		if (i != KQWQ_QOS_MANAGER) {
3090 			kqwq->kqwq_request[i].tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
3091 		}
3092 		kqwq->kqwq_request[i].tr_kq_qos_index = (kq_index_t)i + 1;
3093 	}
3094 
3095 	kqueue_init(kqwq);
3096 
3097 	if (!os_atomic_cmpxchgv(&p->p_fd.fd_wqkqueue, NULL, kqwq, &tmp, release)) {
3098 		kqworkq_dealloc(kqwq);
3099 		return tmp;
3100 	}
3101 
3102 	return kqwq;
3103 }
3104 
3105 #pragma mark kqworkloop allocation and deallocation
3106 
3107 #define KQ_HASH(val, mask)  (((val) ^ (val >> 8)) & (mask))
3108 #define CONFIG_KQ_HASHSIZE  CONFIG_KN_HASHSIZE
3109 
3110 OS_ALWAYS_INLINE
3111 static inline void
kqhash_lock(struct filedesc * fdp)3112 kqhash_lock(struct filedesc *fdp)
3113 {
3114 	lck_mtx_lock_spin_always(&fdp->fd_kqhashlock);
3115 }
3116 
3117 OS_ALWAYS_INLINE
3118 static inline void
kqhash_unlock(struct filedesc * fdp)3119 kqhash_unlock(struct filedesc *fdp)
3120 {
3121 	lck_mtx_unlock(&fdp->fd_kqhashlock);
3122 }
3123 
3124 OS_ALWAYS_INLINE
3125 static inline void
kqworkloop_hash_insert_locked(struct filedesc * fdp,kqueue_id_t id,struct kqworkloop * kqwl)3126 kqworkloop_hash_insert_locked(struct filedesc *fdp, kqueue_id_t id,
3127     struct kqworkloop *kqwl)
3128 {
3129 	struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
3130 	LIST_INSERT_HEAD(list, kqwl, kqwl_hashlink);
3131 }
3132 
3133 OS_ALWAYS_INLINE
3134 static inline struct kqworkloop *
kqworkloop_hash_lookup_locked(struct filedesc * fdp,kqueue_id_t id)3135 kqworkloop_hash_lookup_locked(struct filedesc *fdp, kqueue_id_t id)
3136 {
3137 	struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
3138 	struct kqworkloop *kqwl;
3139 
3140 	LIST_FOREACH(kqwl, list, kqwl_hashlink) {
3141 		if (kqwl->kqwl_dynamicid == id) {
3142 			return kqwl;
3143 		}
3144 	}
3145 	return NULL;
3146 }
3147 
3148 static struct kqworkloop *
kqworkloop_hash_lookup_and_retain(struct filedesc * fdp,kqueue_id_t kq_id)3149 kqworkloop_hash_lookup_and_retain(struct filedesc *fdp, kqueue_id_t kq_id)
3150 {
3151 	struct kqworkloop *kqwl = NULL;
3152 
3153 	kqhash_lock(fdp);
3154 	if (__probable(fdp->fd_kqhash)) {
3155 		kqwl = kqworkloop_hash_lookup_locked(fdp, kq_id);
3156 		if (kqwl && !kqworkloop_try_retain(kqwl)) {
3157 			kqwl = NULL;
3158 		}
3159 	}
3160 	kqhash_unlock(fdp);
3161 	return kqwl;
3162 }
3163 
3164 OS_NOINLINE
3165 static void
kqworkloop_hash_init(struct filedesc * fdp)3166 kqworkloop_hash_init(struct filedesc *fdp)
3167 {
3168 	struct kqwllist *alloc_hash;
3169 	u_long alloc_mask;
3170 
3171 	kqhash_unlock(fdp);
3172 	alloc_hash = hashinit(CONFIG_KQ_HASHSIZE, M_KQUEUE, &alloc_mask);
3173 	kqhash_lock(fdp);
3174 
3175 	/* See if we won the race */
3176 	if (__probable(fdp->fd_kqhashmask == 0)) {
3177 		fdp->fd_kqhash = alloc_hash;
3178 		fdp->fd_kqhashmask = alloc_mask;
3179 	} else {
3180 		kqhash_unlock(fdp);
3181 		hashdestroy(alloc_hash, M_KQUEUE, alloc_mask);
3182 		kqhash_lock(fdp);
3183 	}
3184 }
3185 
3186 /*
3187  * kqueue iotier override is only supported for kqueue that has
3188  * only one port as a mach port source. Updating the iotier
3189  * override on the mach port source will update the override
3190  * on kqueue as well. Since kqueue with iotier override will
3191  * only have one port attached, there is no logic for saturation
3192  * like qos override, the iotier override of mach port source
3193  * would be reflected in kevent iotier override.
3194  */
3195 void
kqueue_set_iotier_override(kqueue_t kqu,uint8_t iotier_override)3196 kqueue_set_iotier_override(kqueue_t kqu, uint8_t iotier_override)
3197 {
3198 	if (!(kqu.kq->kq_state & KQ_WORKLOOP)) {
3199 		return;
3200 	}
3201 
3202 	struct kqworkloop *kqwl = kqu.kqwl;
3203 	os_atomic_store(&kqwl->kqwl_iotier_override, iotier_override, relaxed);
3204 }
3205 
3206 uint8_t
kqueue_get_iotier_override(kqueue_t kqu)3207 kqueue_get_iotier_override(kqueue_t kqu)
3208 {
3209 	if (!(kqu.kq->kq_state & KQ_WORKLOOP)) {
3210 		return THROTTLE_LEVEL_END;
3211 	}
3212 
3213 	struct kqworkloop *kqwl = kqu.kqwl;
3214 	return os_atomic_load(&kqwl->kqwl_iotier_override, relaxed);
3215 }
3216 
3217 #if CONFIG_PREADOPT_TG
3218 /*
3219  * This function is called with a borrowed reference on the thread group without
3220  * kq lock held with the mqueue lock held. It may or may not have the knote lock
3221  * (called from both fevent as well as fattach/ftouch). Upon success, an
3222  * additional reference on the TG is taken
3223  */
3224 void
kqueue_set_preadopted_thread_group(kqueue_t kqu,struct thread_group * tg,thread_qos_t qos)3225 kqueue_set_preadopted_thread_group(kqueue_t kqu, struct thread_group *tg, thread_qos_t qos)
3226 {
3227 	if (!(kqu.kq->kq_state & KQ_WORKLOOP)) {
3228 		KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_PREADOPT_NA),
3229 		    (uintptr_t)thread_tid(current_thread()), 0, 0, 0);
3230 		return;
3231 	}
3232 
3233 	struct kqworkloop *kqwl = kqu.kqwl;
3234 
3235 	assert(qos < THREAD_QOS_LAST);
3236 
3237 	thread_group_retain(tg);
3238 
3239 	thread_group_qos_t old_tg; thread_group_qos_t new_tg;
3240 	int ret = os_atomic_rmw_loop(&kqwl->kqwl_preadopt_tg, old_tg, new_tg, relaxed, {
3241 		if (!KQWL_CAN_ADOPT_PREADOPT_TG(old_tg)) {
3242 		        os_atomic_rmw_loop_give_up(break);
3243 		}
3244 
3245 		if (old_tg != KQWL_PREADOPTED_TG_NULL) {
3246 		        /*
3247 		         * Note that old_tg could be a NULL TG pointer but with a QoS
3248 		         * set. See also workq_thread_reset_pri.
3249 		         *
3250 		         * Compare the QoS of existing preadopted tg with new one and
3251 		         * only overwrite the thread group if we have one with a higher
3252 		         * QoS.
3253 		         */
3254 		        thread_qos_t existing_qos = KQWL_GET_PREADOPTED_TG_QOS(old_tg);
3255 		        if (existing_qos >= qos) {
3256 		                os_atomic_rmw_loop_give_up(break);
3257 			}
3258 		}
3259 
3260 		// Transfer the ref taken earlier in the function to the kqwl
3261 		new_tg = KQWL_ENCODE_PREADOPTED_TG_QOS(tg, qos);
3262 	});
3263 
3264 	if (ret) {
3265 		KQWL_PREADOPT_TG_HISTORY_WRITE_ENTRY(kqwl, KQWL_PREADOPT_OP_INCOMING_IPC, old_tg, tg);
3266 
3267 		if (KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) {
3268 			thread_group_deallocate_safe(KQWL_GET_PREADOPTED_TG(old_tg));
3269 		}
3270 
3271 		os_atomic_store(&kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_NEEDS_REDRIVE, release);
3272 	} else {
3273 		// We failed to write to the kqwl_preadopt_tg, drop the ref we took
3274 		// earlier in the function
3275 		thread_group_deallocate_safe(tg);
3276 	}
3277 }
3278 
3279 /*
3280  * Called from fprocess of EVFILT_MACHPORT without the kqueue lock held.
3281  */
3282 bool
kqueue_process_preadopt_thread_group(thread_t thread,struct kqueue * kq,struct thread_group * tg)3283 kqueue_process_preadopt_thread_group(thread_t thread, struct kqueue *kq, struct thread_group *tg)
3284 {
3285 	bool success = false;
3286 	if (kq->kq_state & KQ_WORKLOOP) {
3287 		struct kqworkloop *kqwl = (struct kqworkloop *) kq;
3288 		thread_group_qos_t old_tg;
3289 		success = os_atomic_cmpxchgv(&kqwl->kqwl_preadopt_tg,
3290 		    KQWL_PREADOPTED_TG_SENTINEL, KQWL_PREADOPTED_TG_PROCESSED,
3291 		    &old_tg, relaxed);
3292 		if (success) {
3293 			thread_set_preadopt_thread_group(thread, tg);
3294 		}
3295 
3296 		__assert_only thread_group_qos_t preadopt_tg;
3297 		preadopt_tg = os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed);
3298 		assert(preadopt_tg == KQWL_PREADOPTED_TG_PROCESSED ||
3299 		    preadopt_tg == KQWL_PREADOPTED_TG_NEVER);
3300 	}
3301 
3302 	return success;
3303 }
3304 #endif
3305 
3306 /*!
3307  * @function kqworkloop_dealloc
3308  *
3309  * @brief
3310  * Deallocates a workloop kqueue.
3311  *
3312  * @discussion
3313  * Knotes hold references on the workloop, so we can't really reach this
3314  * function unless all of these are already gone.
3315  *
3316  * Nothing locked on entry or exit.
3317  *
3318  * @param hash_remove
3319  * Whether to remove the workloop from its hash table.
3320  */
3321 static void
kqworkloop_dealloc(struct kqworkloop * kqwl,bool hash_remove)3322 kqworkloop_dealloc(struct kqworkloop *kqwl, bool hash_remove)
3323 {
3324 	thread_t cur_owner;
3325 
3326 	cur_owner = kqwl->kqwl_owner;
3327 	if (cur_owner) {
3328 		if (kqworkloop_override(kqwl) != THREAD_QOS_UNSPECIFIED) {
3329 			thread_drop_kevent_override(cur_owner);
3330 		}
3331 		thread_deallocate(cur_owner);
3332 		kqwl->kqwl_owner = THREAD_NULL;
3333 	}
3334 
3335 	if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
3336 		struct turnstile *ts;
3337 		turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
3338 		    &ts, TURNSTILE_WORKLOOPS);
3339 		turnstile_cleanup();
3340 		turnstile_deallocate(ts);
3341 	}
3342 
3343 	if (hash_remove) {
3344 		struct filedesc *fdp = &kqwl->kqwl_p->p_fd;
3345 
3346 		kqhash_lock(fdp);
3347 		LIST_REMOVE(kqwl, kqwl_hashlink);
3348 		kqhash_unlock(fdp);
3349 	}
3350 
3351 #if CONFIG_PREADOPT_TG
3352 	thread_group_qos_t tg = os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed);
3353 	if (KQWL_HAS_VALID_PREADOPTED_TG(tg)) {
3354 		thread_group_release(KQWL_GET_PREADOPTED_TG(tg));
3355 	}
3356 #endif
3357 
3358 	assert(TAILQ_EMPTY(&kqwl->kqwl_suppressed));
3359 	assert(kqwl->kqwl_owner == THREAD_NULL);
3360 	assert(kqwl->kqwl_turnstile == TURNSTILE_NULL);
3361 
3362 	lck_spin_destroy(&kqwl->kqwl_statelock, &kq_lck_grp);
3363 	kqueue_destroy(kqwl, kqworkloop_zone);
3364 }
3365 
3366 /*!
3367  * @function kqworkloop_alloc
3368  *
3369  * @brief
3370  * Allocates a workloop kqueue.
3371  */
3372 static void
kqworkloop_init(struct kqworkloop * kqwl,proc_t p,kqueue_id_t id,workq_threadreq_param_t * trp)3373 kqworkloop_init(struct kqworkloop *kqwl, proc_t p,
3374     kqueue_id_t id, workq_threadreq_param_t *trp)
3375 {
3376 	kqwl->kqwl_state     = KQ_WORKLOOP | KQ_DYNAMIC | KQ_KEV_QOS;
3377 	os_ref_init_raw(&kqwl->kqwl_retains, NULL);
3378 	kqwl->kqwl_dynamicid = id;
3379 	kqwl->kqwl_p         = p;
3380 	if (trp) {
3381 		kqwl->kqwl_params = trp->trp_value;
3382 	}
3383 
3384 	workq_tr_flags_t tr_flags = WORKQ_TR_FLAG_WORKLOOP;
3385 	if (trp) {
3386 		if (trp->trp_flags & TRP_PRIORITY) {
3387 			tr_flags |= WORKQ_TR_FLAG_WL_OUTSIDE_QOS;
3388 		}
3389 		if (trp->trp_flags) {
3390 			tr_flags |= WORKQ_TR_FLAG_WL_PARAMS;
3391 		}
3392 	}
3393 	kqwl->kqwl_request.tr_state = WORKQ_TR_STATE_IDLE;
3394 	kqwl->kqwl_request.tr_flags = tr_flags;
3395 	os_atomic_store(&kqwl->kqwl_iotier_override, (uint8_t)THROTTLE_LEVEL_END, relaxed);
3396 #if CONFIG_PREADOPT_TG
3397 	if (task_is_app(current_task())) {
3398 		/* Apps will never adopt a thread group that is not their own. This is a
3399 		 * gross hack to simulate the post-process that is done in the voucher
3400 		 * subsystem today for thread groups */
3401 		os_atomic_store(&kqwl->kqwl_preadopt_tg, KQWL_PREADOPTED_TG_NEVER, relaxed);
3402 	}
3403 #endif
3404 
3405 	for (int i = 0; i < KQWL_NBUCKETS; i++) {
3406 		TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_queue[i]);
3407 	}
3408 	TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_suppressed);
3409 
3410 	lck_spin_init(&kqwl->kqwl_statelock, &kq_lck_grp, LCK_ATTR_NULL);
3411 
3412 	kqueue_init(kqwl);
3413 }
3414 
3415 /*!
3416  * @function kqworkloop_get_or_create
3417  *
3418  * @brief
3419  * Wrapper around kqworkloop_alloc that handles the uniquing of workloops.
3420  *
3421  * @returns
3422  * 0:      success
3423  * EINVAL: invalid parameters
3424  * EEXIST: KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST is set and a collision exists.
3425  * ENOENT: KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST is set and the entry wasn't found.
3426  * ENOMEM: allocation failed
3427  */
3428 static int
kqworkloop_get_or_create(struct proc * p,kqueue_id_t id,workq_threadreq_param_t * trp,unsigned int flags,struct kqworkloop ** kqwlp)3429 kqworkloop_get_or_create(struct proc *p, kqueue_id_t id,
3430     workq_threadreq_param_t *trp, unsigned int flags, struct kqworkloop **kqwlp)
3431 {
3432 	struct filedesc *fdp = &p->p_fd;
3433 	struct kqworkloop *alloc_kqwl = NULL;
3434 	struct kqworkloop *kqwl = NULL;
3435 	int error = 0;
3436 
3437 	assert(!trp || (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST));
3438 
3439 	if (id == 0 || id == (kqueue_id_t)-1) {
3440 		return EINVAL;
3441 	}
3442 
3443 	for (;;) {
3444 		kqhash_lock(fdp);
3445 		if (__improbable(fdp->fd_kqhash == NULL)) {
3446 			kqworkloop_hash_init(fdp);
3447 		}
3448 
3449 		kqwl = kqworkloop_hash_lookup_locked(fdp, id);
3450 		if (kqwl) {
3451 			if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
3452 				/*
3453 				 * If MUST_NOT_EXIST was passed, even if we would have failed
3454 				 * the try_retain, it could have gone the other way, and
3455 				 * userspace can't tell. Let'em fix their race.
3456 				 */
3457 				error = EEXIST;
3458 				break;
3459 			}
3460 
3461 			if (__probable(kqworkloop_try_retain(kqwl))) {
3462 				/*
3463 				 * This is a valid live workloop !
3464 				 */
3465 				*kqwlp = kqwl;
3466 				error = 0;
3467 				break;
3468 			}
3469 		}
3470 
3471 		if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST)) {
3472 			error = ENOENT;
3473 			break;
3474 		}
3475 
3476 		/*
3477 		 * We didn't find what we were looking for.
3478 		 *
3479 		 * If this is the second time we reach this point (alloc_kqwl != NULL),
3480 		 * then we're done.
3481 		 *
3482 		 * If this is the first time we reach this point (alloc_kqwl == NULL),
3483 		 * then try to allocate one without blocking.
3484 		 */
3485 		if (__probable(alloc_kqwl == NULL)) {
3486 			alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_NOWAIT | Z_ZERO);
3487 		}
3488 		if (__probable(alloc_kqwl)) {
3489 			kqworkloop_init(alloc_kqwl, p, id, trp);
3490 			kqworkloop_hash_insert_locked(fdp, id, alloc_kqwl);
3491 			kqhash_unlock(fdp);
3492 			*kqwlp = alloc_kqwl;
3493 			return 0;
3494 		}
3495 
3496 		/*
3497 		 * We have to block to allocate a workloop, drop the lock,
3498 		 * allocate one, but then we need to retry lookups as someone
3499 		 * else could race with us.
3500 		 */
3501 		kqhash_unlock(fdp);
3502 
3503 		alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_WAITOK | Z_ZERO);
3504 	}
3505 
3506 	kqhash_unlock(fdp);
3507 
3508 	if (__improbable(alloc_kqwl)) {
3509 		zfree(kqworkloop_zone, alloc_kqwl);
3510 	}
3511 
3512 	return error;
3513 }
3514 
3515 #pragma mark - knotes
3516 
3517 static int
filt_no_attach(struct knote * kn,__unused struct kevent_qos_s * kev)3518 filt_no_attach(struct knote *kn, __unused struct kevent_qos_s *kev)
3519 {
3520 	knote_set_error(kn, ENOTSUP);
3521 	return 0;
3522 }
3523 
3524 static void
filt_no_detach(__unused struct knote * kn)3525 filt_no_detach(__unused struct knote *kn)
3526 {
3527 }
3528 
3529 static int __dead2
filt_bad_event(struct knote * kn,long hint)3530 filt_bad_event(struct knote *kn, long hint)
3531 {
3532 	panic("%s[%d](%p, %ld)", __func__, kn->kn_filter, kn, hint);
3533 }
3534 
3535 static int __dead2
filt_bad_touch(struct knote * kn,struct kevent_qos_s * kev)3536 filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev)
3537 {
3538 	panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
3539 }
3540 
3541 static int __dead2
filt_bad_process(struct knote * kn,struct kevent_qos_s * kev)3542 filt_bad_process(struct knote *kn, struct kevent_qos_s *kev)
3543 {
3544 	panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
3545 }
3546 
3547 /*
3548  * knotes_dealloc - detach all knotes for the process and drop them
3549  *
3550  *		Process is in such a state that it will not try to allocate
3551  *		any more knotes during this process (stopped for exit or exec).
3552  */
3553 void
knotes_dealloc(proc_t p)3554 knotes_dealloc(proc_t p)
3555 {
3556 	struct filedesc *fdp = &p->p_fd;
3557 	struct kqueue *kq;
3558 	struct knote *kn;
3559 	struct  klist *kn_hash = NULL;
3560 	u_long kn_hashmask;
3561 	int i;
3562 
3563 	proc_fdlock(p);
3564 
3565 	/* Close all the fd-indexed knotes up front */
3566 	if (fdp->fd_knlistsize > 0) {
3567 		for (i = 0; i < fdp->fd_knlistsize; i++) {
3568 			while ((kn = SLIST_FIRST(&fdp->fd_knlist[i])) != NULL) {
3569 				kq = knote_get_kq(kn);
3570 				kqlock(kq);
3571 				proc_fdunlock(p);
3572 				knote_drop(kq, kn, NULL);
3573 				proc_fdlock(p);
3574 			}
3575 		}
3576 		/* free the table */
3577 		kfree_type(struct klist, fdp->fd_knlistsize, fdp->fd_knlist);
3578 	}
3579 	fdp->fd_knlistsize = 0;
3580 
3581 	proc_fdunlock(p);
3582 
3583 	knhash_lock(fdp);
3584 
3585 	/* Clean out all the hashed knotes as well */
3586 	if (fdp->fd_knhashmask != 0) {
3587 		for (i = 0; i <= (int)fdp->fd_knhashmask; i++) {
3588 			while ((kn = SLIST_FIRST(&fdp->fd_knhash[i])) != NULL) {
3589 				kq = knote_get_kq(kn);
3590 				kqlock(kq);
3591 				knhash_unlock(fdp);
3592 				knote_drop(kq, kn, NULL);
3593 				knhash_lock(fdp);
3594 			}
3595 		}
3596 		kn_hash = fdp->fd_knhash;
3597 		kn_hashmask = fdp->fd_knhashmask;
3598 		fdp->fd_knhashmask = 0;
3599 		fdp->fd_knhash = NULL;
3600 	}
3601 
3602 	knhash_unlock(fdp);
3603 
3604 	if (kn_hash) {
3605 		hashdestroy(kn_hash, M_KQUEUE, kn_hashmask);
3606 	}
3607 }
3608 
3609 /*
3610  * kqworkloops_dealloc - rebalance retains on kqworkloops created with
3611  * scheduling parameters
3612  *
3613  *		Process is in such a state that it will not try to allocate
3614  *		any more knotes during this process (stopped for exit or exec).
3615  */
3616 void
kqworkloops_dealloc(proc_t p)3617 kqworkloops_dealloc(proc_t p)
3618 {
3619 	struct filedesc *fdp = &p->p_fd;
3620 	struct kqworkloop *kqwl, *kqwln;
3621 	struct kqwllist tofree;
3622 
3623 	if (!fdt_flag_test(fdp, FD_WORKLOOP)) {
3624 		return;
3625 	}
3626 
3627 	kqhash_lock(fdp);
3628 
3629 	if (fdp->fd_kqhashmask == 0) {
3630 		kqhash_unlock(fdp);
3631 		return;
3632 	}
3633 
3634 	LIST_INIT(&tofree);
3635 
3636 	for (size_t i = 0; i <= fdp->fd_kqhashmask; i++) {
3637 		LIST_FOREACH_SAFE(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink, kqwln) {
3638 			/*
3639 			 * kqworkloops that have scheduling parameters have an
3640 			 * implicit retain from kqueue_workloop_ctl that needs
3641 			 * to be balanced on process exit.
3642 			 */
3643 			assert(kqwl->kqwl_params);
3644 			LIST_REMOVE(kqwl, kqwl_hashlink);
3645 			LIST_INSERT_HEAD(&tofree, kqwl, kqwl_hashlink);
3646 		}
3647 	}
3648 
3649 	kqhash_unlock(fdp);
3650 
3651 	LIST_FOREACH_SAFE(kqwl, &tofree, kqwl_hashlink, kqwln) {
3652 		uint32_t ref = os_ref_get_count_raw(&kqwl->kqwl_retains);
3653 		if (ref != 1) {
3654 			panic("kq(%p) invalid refcount %d", kqwl, ref);
3655 		}
3656 		kqworkloop_dealloc(kqwl, false);
3657 	}
3658 }
3659 
3660 static int
kevent_register_validate_priority(struct kqueue * kq,struct knote * kn,struct kevent_qos_s * kev)3661 kevent_register_validate_priority(struct kqueue *kq, struct knote *kn,
3662     struct kevent_qos_s *kev)
3663 {
3664 	/* We don't care about the priority of a disabled or deleted knote */
3665 	if (kev->flags & (EV_DISABLE | EV_DELETE)) {
3666 		return 0;
3667 	}
3668 
3669 	if (kq->kq_state & KQ_WORKLOOP) {
3670 		/*
3671 		 * Workloops need valid priorities with a QOS (excluding manager) for
3672 		 * any enabled knote.
3673 		 *
3674 		 * When it is pre-existing, just make sure it has a valid QoS as
3675 		 * kevent_register() will not use the incoming priority (filters who do
3676 		 * have the responsibility to validate it again, see filt_wltouch).
3677 		 *
3678 		 * If the knote is being made, validate the incoming priority.
3679 		 */
3680 		if (!_pthread_priority_thread_qos(kn ? kn->kn_qos : kev->qos)) {
3681 			return ERANGE;
3682 		}
3683 	}
3684 
3685 	return 0;
3686 }
3687 
3688 /*
3689  * Prepare a filter for waiting after register.
3690  *
3691  * The f_post_register_wait hook will be called later by kevent_register()
3692  * and should call kevent_register_wait_block()
3693  */
3694 static int
kevent_register_wait_prepare(struct knote * kn,struct kevent_qos_s * kev,int rc)3695 kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int rc)
3696 {
3697 	thread_t thread = current_thread();
3698 
3699 	assert(knote_fops(kn)->f_extended_codes);
3700 
3701 	if (kn->kn_thread == NULL) {
3702 		thread_reference(thread);
3703 		kn->kn_thread = thread;
3704 	} else if (kn->kn_thread != thread) {
3705 		/*
3706 		 * kn_thread may be set from a previous aborted wait
3707 		 * However, it has to be from the same thread.
3708 		 */
3709 		kev->flags |= EV_ERROR;
3710 		kev->data = EXDEV;
3711 		return 0;
3712 	}
3713 
3714 	return FILTER_REGISTER_WAIT | rc;
3715 }
3716 
3717 /*
3718  * Cleanup a kevent_register_wait_prepare() effect for threads that have been
3719  * aborted instead of properly woken up with thread_wakeup_thread().
3720  */
3721 static void
kevent_register_wait_cleanup(struct knote * kn)3722 kevent_register_wait_cleanup(struct knote *kn)
3723 {
3724 	thread_t thread = kn->kn_thread;
3725 	kn->kn_thread = NULL;
3726 	thread_deallocate(thread);
3727 }
3728 
3729 /*
3730  * Must be called at the end of a f_post_register_wait call from a filter.
3731  */
3732 static void
kevent_register_wait_block(struct turnstile * ts,thread_t thread,thread_continue_t cont,struct _kevent_register * cont_args)3733 kevent_register_wait_block(struct turnstile *ts, thread_t thread,
3734     thread_continue_t cont, struct _kevent_register *cont_args)
3735 {
3736 	turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
3737 	kqunlock(cont_args->kqwl);
3738 	cont_args->handoff_thread = thread;
3739 	thread_handoff_parameter(thread, cont, cont_args, THREAD_HANDOFF_NONE);
3740 }
3741 
3742 /*
3743  * Called by Filters using a f_post_register_wait to return from their wait.
3744  */
3745 static void
kevent_register_wait_return(struct _kevent_register * cont_args)3746 kevent_register_wait_return(struct _kevent_register *cont_args)
3747 {
3748 	struct kqworkloop *kqwl = cont_args->kqwl;
3749 	struct kevent_qos_s *kev = &cont_args->kev;
3750 	int error = 0;
3751 
3752 	if (cont_args->handoff_thread) {
3753 		thread_deallocate(cont_args->handoff_thread);
3754 	}
3755 
3756 	if (kev->flags & (EV_ERROR | EV_RECEIPT)) {
3757 		if ((kev->flags & EV_ERROR) == 0) {
3758 			kev->flags |= EV_ERROR;
3759 			kev->data = 0;
3760 		}
3761 		error = kevent_modern_copyout(kev, &cont_args->ueventlist);
3762 		if (error == 0) {
3763 			cont_args->eventout++;
3764 		}
3765 	}
3766 
3767 	kqworkloop_release(kqwl);
3768 	if (error == 0) {
3769 		*(int32_t *)&current_uthread()->uu_rval = cont_args->eventout;
3770 	}
3771 	unix_syscall_return(error);
3772 }
3773 
3774 /*
3775  * kevent_register - add a new event to a kqueue
3776  *
3777  *	Creates a mapping between the event source and
3778  *	the kqueue via a knote data structure.
3779  *
3780  *	Because many/most the event sources are file
3781  *	descriptor related, the knote is linked off
3782  *	the filedescriptor table for quick access.
3783  *
3784  *	called with nothing locked
3785  *	caller holds a reference on the kqueue
3786  */
3787 
3788 int
kevent_register(struct kqueue * kq,struct kevent_qos_s * kev,struct knote ** kn_out)3789 kevent_register(struct kqueue *kq, struct kevent_qos_s *kev,
3790     struct knote **kn_out)
3791 {
3792 	struct proc *p = kq->kq_p;
3793 	const struct filterops *fops;
3794 	struct knote *kn = NULL;
3795 	int result = 0, error = 0;
3796 	unsigned short kev_flags = kev->flags;
3797 	KNOTE_LOCK_CTX(knlc);
3798 
3799 	if (__probable(kev->filter < 0 && kev->filter + EVFILT_SYSCOUNT >= 0)) {
3800 		fops = sysfilt_ops[~kev->filter];       /* to 0-base index */
3801 	} else {
3802 		error = EINVAL;
3803 		goto out;
3804 	}
3805 
3806 	/* restrict EV_VANISHED to adding udata-specific dispatch kevents */
3807 	if (__improbable((kev->flags & EV_VANISHED) &&
3808 	    (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2))) {
3809 		error = EINVAL;
3810 		goto out;
3811 	}
3812 
3813 	/* Simplify the flags - delete and disable overrule */
3814 	if (kev->flags & EV_DELETE) {
3815 		kev->flags &= ~EV_ADD;
3816 	}
3817 	if (kev->flags & EV_DISABLE) {
3818 		kev->flags &= ~EV_ENABLE;
3819 	}
3820 
3821 	if (kq->kq_state & KQ_WORKLOOP) {
3822 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER),
3823 		    ((struct kqworkloop *)kq)->kqwl_dynamicid,
3824 		    kev->udata, kev->flags, kev->filter);
3825 	} else if (kq->kq_state & KQ_WORKQ) {
3826 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER),
3827 		    0, kev->udata, kev->flags, kev->filter);
3828 	} else {
3829 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_REGISTER),
3830 		    VM_KERNEL_UNSLIDE_OR_PERM(kq),
3831 		    kev->udata, kev->flags, kev->filter);
3832 	}
3833 
3834 restart:
3835 	/* find the matching knote from the fd tables/hashes */
3836 	kn = kq_find_knote_and_kq_lock(kq, kev, fops->f_isfd, p);
3837 	error = kevent_register_validate_priority(kq, kn, kev);
3838 	result = 0;
3839 	if (error) {
3840 		goto out;
3841 	}
3842 
3843 	if (kn == NULL && (kev->flags & EV_ADD) == 0) {
3844 		/*
3845 		 * No knote found, EV_ADD wasn't specified
3846 		 */
3847 
3848 		if ((kev_flags & EV_ADD) && (kev_flags & EV_DELETE) &&
3849 		    (kq->kq_state & KQ_WORKLOOP)) {
3850 			/*
3851 			 * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete
3852 			 * that doesn't care about ENOENT, so just pretend the deletion
3853 			 * happened.
3854 			 */
3855 		} else {
3856 			error = ENOENT;
3857 		}
3858 		goto out;
3859 	} else if (kn == NULL) {
3860 		/*
3861 		 * No knote found, need to attach a new one (attach)
3862 		 */
3863 
3864 		struct fileproc *knote_fp = NULL;
3865 
3866 		/* grab a file reference for the new knote */
3867 		if (fops->f_isfd) {
3868 			if ((error = fp_lookup(p, (int)kev->ident, &knote_fp, 0)) != 0) {
3869 				goto out;
3870 			}
3871 		}
3872 
3873 		kn = knote_alloc();
3874 		kn->kn_fp = knote_fp;
3875 		kn->kn_is_fd = fops->f_isfd;
3876 		kn->kn_kq_packed = VM_PACK_POINTER((vm_offset_t)kq, KNOTE_KQ_PACKED);
3877 		kn->kn_status = 0;
3878 
3879 		/* was vanish support requested */
3880 		if (kev->flags & EV_VANISHED) {
3881 			kev->flags &= ~EV_VANISHED;
3882 			kn->kn_status |= KN_REQVANISH;
3883 		}
3884 
3885 		/* snapshot matching/dispatching protocol flags into knote */
3886 		if (kev->flags & EV_DISABLE) {
3887 			kn->kn_status |= KN_DISABLED;
3888 		}
3889 
3890 		/*
3891 		 * copy the kevent state into knote
3892 		 * protocol is that fflags and data
3893 		 * are saved off, and cleared before
3894 		 * calling the attach routine.
3895 		 *
3896 		 * - kn->kn_sfflags aliases with kev->xflags
3897 		 * - kn->kn_sdata   aliases with kev->data
3898 		 * - kn->kn_filter  is the top 8 bits of kev->filter
3899 		 */
3900 		kn->kn_kevent  = *(struct kevent_internal_s *)kev;
3901 		kn->kn_sfflags = kev->fflags;
3902 		kn->kn_filtid  = (uint8_t)~kev->filter;
3903 		kn->kn_fflags  = 0;
3904 		knote_reset_priority(kq, kn, kev->qos);
3905 
3906 		/* Add the knote for lookup thru the fd table */
3907 		error = kq_add_knote(kq, kn, &knlc, p);
3908 		if (error) {
3909 			knote_free(kn);
3910 			if (knote_fp != NULL) {
3911 				fp_drop(p, (int)kev->ident, knote_fp, 0);
3912 			}
3913 
3914 			if (error == ERESTART) {
3915 				goto restart;
3916 			}
3917 			goto out;
3918 		}
3919 
3920 		/* fp reference count now applies to knote */
3921 
3922 		/*
3923 		 * we can't use filter_call() because f_attach can change the filter ops
3924 		 * for a filter that supports f_extended_codes, so we need to reload
3925 		 * knote_fops() and not use `fops`.
3926 		 */
3927 		result = fops->f_attach(kn, kev);
3928 		if (result && !knote_fops(kn)->f_extended_codes) {
3929 			result = FILTER_ACTIVE;
3930 		}
3931 
3932 		kqlock(kq);
3933 
3934 		if (result & FILTER_THREADREQ_NODEFEER) {
3935 			enable_preemption();
3936 		}
3937 
3938 		if (kn->kn_flags & EV_ERROR) {
3939 			/*
3940 			 * Failed to attach correctly, so drop.
3941 			 */
3942 			kn->kn_filtid = EVFILTID_DETACHED;
3943 			error = (int)kn->kn_sdata;
3944 			knote_drop(kq, kn, &knlc);
3945 			result = 0;
3946 			goto out;
3947 		}
3948 
3949 		/*
3950 		 * end "attaching" phase - now just attached
3951 		 *
3952 		 * Mark the thread request overcommit, if appropos
3953 		 *
3954 		 * If the attach routine indicated that an
3955 		 * event is already fired, activate the knote.
3956 		 */
3957 		if ((kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) &&
3958 		    (kq->kq_state & KQ_WORKLOOP)) {
3959 			kqworkloop_set_overcommit((struct kqworkloop *)kq);
3960 		}
3961 	} else if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
3962 		/*
3963 		 * The knote was dropped while we were waiting for the lock,
3964 		 * we need to re-evaluate entirely
3965 		 */
3966 
3967 		goto restart;
3968 	} else if (kev->flags & EV_DELETE) {
3969 		/*
3970 		 * Deletion of a knote (drop)
3971 		 *
3972 		 * If the filter wants to filter drop events, let it do so.
3973 		 *
3974 		 * defer-delete: when trying to delete a disabled EV_DISPATCH2 knote,
3975 		 * we must wait for the knote to be re-enabled (unless it is being
3976 		 * re-enabled atomically here).
3977 		 */
3978 
3979 		if (knote_fops(kn)->f_allow_drop) {
3980 			bool drop;
3981 
3982 			kqunlock(kq);
3983 			drop = knote_fops(kn)->f_allow_drop(kn, kev);
3984 			kqlock(kq);
3985 
3986 			if (!drop) {
3987 				goto out_unlock;
3988 			}
3989 		}
3990 
3991 		if ((kev->flags & EV_ENABLE) == 0 &&
3992 		    (kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
3993 		    (kn->kn_status & KN_DISABLED) != 0) {
3994 			kn->kn_status |= KN_DEFERDELETE;
3995 			error = EINPROGRESS;
3996 			goto out_unlock;
3997 		}
3998 
3999 		knote_drop(kq, kn, &knlc);
4000 		goto out;
4001 	} else {
4002 		/*
4003 		 * Regular update of a knote (touch)
4004 		 *
4005 		 * Call touch routine to notify filter of changes in filter values
4006 		 * (and to re-determine if any events are fired).
4007 		 *
4008 		 * If the knote is in defer-delete, avoid calling the filter touch
4009 		 * routine (it has delivered its last event already).
4010 		 *
4011 		 * If the touch routine had no failure,
4012 		 * apply the requested side effects to the knote.
4013 		 */
4014 
4015 		if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
4016 			if (kev->flags & EV_ENABLE) {
4017 				result = FILTER_ACTIVE;
4018 			}
4019 		} else {
4020 			kqunlock(kq);
4021 			result = filter_call(knote_fops(kn), f_touch(kn, kev));
4022 			kqlock(kq);
4023 			if (result & FILTER_THREADREQ_NODEFEER) {
4024 				enable_preemption();
4025 			}
4026 		}
4027 
4028 		if (kev->flags & EV_ERROR) {
4029 			result = 0;
4030 			goto out_unlock;
4031 		}
4032 
4033 		if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0 &&
4034 		    kn->kn_udata != kev->udata) {
4035 			// this allows klist_copy_udata() not to take locks
4036 			os_atomic_store_wide(&kn->kn_udata, kev->udata, relaxed);
4037 		}
4038 		if ((kev->flags & EV_DISABLE) && !(kn->kn_status & KN_DISABLED)) {
4039 			kn->kn_status |= KN_DISABLED;
4040 			knote_dequeue(kq, kn);
4041 		}
4042 	}
4043 
4044 	/* accept new kevent state */
4045 	knote_apply_touch(kq, kn, kev, result);
4046 
4047 out_unlock:
4048 	/*
4049 	 * When the filter asked for a post-register wait,
4050 	 * we leave the kqueue locked for kevent_register()
4051 	 * to call the filter's f_post_register_wait hook.
4052 	 */
4053 	if (result & FILTER_REGISTER_WAIT) {
4054 		knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4055 		*kn_out = kn;
4056 	} else {
4057 		knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
4058 	}
4059 
4060 out:
4061 	/* output local errors through the kevent */
4062 	if (error) {
4063 		kev->flags |= EV_ERROR;
4064 		kev->data = error;
4065 	}
4066 	return result;
4067 }
4068 
4069 /*
4070  * knote_process - process a triggered event
4071  *
4072  *	Validate that it is really still a triggered event
4073  *	by calling the filter routines (if necessary).  Hold
4074  *	a use reference on the knote to avoid it being detached.
4075  *
4076  *	If it is still considered triggered, we will have taken
4077  *	a copy of the state under the filter lock.  We use that
4078  *	snapshot to dispatch the knote for future processing (or
4079  *	not, if this was a lost event).
4080  *
4081  *	Our caller assures us that nobody else can be processing
4082  *	events from this knote during the whole operation. But
4083  *	others can be touching or posting events to the knote
4084  *	interspersed with our processing it.
4085  *
4086  *	caller holds a reference on the kqueue.
4087  *	kqueue locked on entry and exit - but may be dropped
4088  */
4089 static int
knote_process(struct knote * kn,kevent_ctx_t kectx,kevent_callback_t callback)4090 knote_process(struct knote *kn, kevent_ctx_t kectx,
4091     kevent_callback_t callback)
4092 {
4093 	struct kevent_qos_s kev;
4094 	struct kqueue *kq = knote_get_kq(kn);
4095 	KNOTE_LOCK_CTX(knlc);
4096 	int result = FILTER_ACTIVE;
4097 	int error = 0;
4098 	bool drop = false;
4099 
4100 	/*
4101 	 * Must be active
4102 	 * Must be queued and not disabled/suppressed or dropping
4103 	 */
4104 	assert(kn->kn_status & KN_QUEUED);
4105 	assert(kn->kn_status & KN_ACTIVE);
4106 	assert(!(kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)));
4107 
4108 	if (kq->kq_state & KQ_WORKLOOP) {
4109 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS),
4110 		    ((struct kqworkloop *)kq)->kqwl_dynamicid,
4111 		    kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4112 		    kn->kn_filtid);
4113 	} else if (kq->kq_state & KQ_WORKQ) {
4114 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS),
4115 		    0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4116 		    kn->kn_filtid);
4117 	} else {
4118 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS),
4119 		    VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata,
4120 		    kn->kn_status | (kn->kn_id << 32), kn->kn_filtid);
4121 	}
4122 
4123 	if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS)) {
4124 		/*
4125 		 * When the knote is dropping or has dropped,
4126 		 * then there's nothing we want to process.
4127 		 */
4128 		return EJUSTRETURN;
4129 	}
4130 
4131 	/*
4132 	 * While waiting for the knote lock, we may have dropped the kq lock.
4133 	 * and a touch may have disabled and dequeued the knote.
4134 	 */
4135 	if (!(kn->kn_status & KN_QUEUED)) {
4136 		knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4137 		return EJUSTRETURN;
4138 	}
4139 
4140 	/*
4141 	 * For deferred-drop or vanished events, we just create a fake
4142 	 * event to acknowledge end-of-life.  Otherwise, we call the
4143 	 * filter's process routine to snapshot the kevent state under
4144 	 * the filter's locking protocol.
4145 	 *
4146 	 * suppress knotes to avoid returning the same event multiple times in
4147 	 * a single call.
4148 	 */
4149 	knote_suppress(kq, kn);
4150 
4151 	if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
4152 		uint16_t kev_flags = EV_DISPATCH2 | EV_ONESHOT;
4153 		if (kn->kn_status & KN_DEFERDELETE) {
4154 			kev_flags |= EV_DELETE;
4155 		} else {
4156 			kev_flags |= EV_VANISHED;
4157 		}
4158 
4159 		/* create fake event */
4160 		kev = (struct kevent_qos_s){
4161 			.filter = kn->kn_filter,
4162 			.ident  = kn->kn_id,
4163 			.flags  = kev_flags,
4164 			.udata  = kn->kn_udata,
4165 		};
4166 	} else {
4167 		kqunlock(kq);
4168 		kev = (struct kevent_qos_s) { };
4169 		result = filter_call(knote_fops(kn), f_process(kn, &kev));
4170 		kqlock(kq);
4171 	}
4172 
4173 	/*
4174 	 * Determine how to dispatch the knote for future event handling.
4175 	 * not-fired: just return (do not callout, leave deactivated).
4176 	 * One-shot:  If dispatch2, enter deferred-delete mode (unless this is
4177 	 *            is the deferred delete event delivery itself).  Otherwise,
4178 	 *            drop it.
4179 	 * Dispatch:  don't clear state, just mark it disabled.
4180 	 * Cleared:   just leave it deactivated.
4181 	 * Others:    re-activate as there may be more events to handle.
4182 	 *            This will not wake up more handlers right now, but
4183 	 *            at the completion of handling events it may trigger
4184 	 *            more handler threads (TODO: optimize based on more than
4185 	 *            just this one event being detected by the filter).
4186 	 */
4187 	if ((result & FILTER_ACTIVE) == 0) {
4188 		if ((kn->kn_status & KN_ACTIVE) == 0) {
4189 			/*
4190 			 * Some knotes (like EVFILT_WORKLOOP) can be reactivated from
4191 			 * within f_process() but that doesn't necessarily make them
4192 			 * ready to process, so we should leave them be.
4193 			 *
4194 			 * For other knotes, since we will not return an event,
4195 			 * there's no point keeping the knote suppressed.
4196 			 */
4197 			knote_unsuppress(kq, kn);
4198 		}
4199 		knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4200 		return EJUSTRETURN;
4201 	}
4202 
4203 	if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
4204 		knote_adjust_qos(kq, kn, result);
4205 	}
4206 
4207 	if (result & FILTER_ADJUST_EVENT_IOTIER_BIT) {
4208 		kqueue_update_iotier_override(kq);
4209 	}
4210 
4211 	kev.qos = _pthread_priority_combine(kn->kn_qos, kn->kn_qos_override);
4212 
4213 	if (kev.flags & EV_ONESHOT) {
4214 		if ((kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
4215 		    (kn->kn_status & KN_DEFERDELETE) == 0) {
4216 			/* defer dropping non-delete oneshot dispatch2 events */
4217 			kn->kn_status |= KN_DEFERDELETE | KN_DISABLED;
4218 		} else {
4219 			drop = true;
4220 		}
4221 	} else if (kn->kn_flags & EV_DISPATCH) {
4222 		/* disable all dispatch knotes */
4223 		kn->kn_status |= KN_DISABLED;
4224 	} else if ((kn->kn_flags & EV_CLEAR) == 0) {
4225 		/* re-activate in case there are more events */
4226 		knote_activate(kq, kn, FILTER_ACTIVE);
4227 	}
4228 
4229 	/*
4230 	 * callback to handle each event as we find it.
4231 	 * If we have to detach and drop the knote, do
4232 	 * it while we have the kq unlocked.
4233 	 */
4234 	if (drop) {
4235 		knote_drop(kq, kn, &knlc);
4236 	} else {
4237 		knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
4238 	}
4239 
4240 	if (kev.flags & EV_VANISHED) {
4241 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED),
4242 		    kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4243 		    kn->kn_filtid);
4244 	}
4245 
4246 	error = (callback)(&kev, kectx);
4247 	kqlock(kq);
4248 	return error;
4249 }
4250 
4251 /*
4252  * Returns -1 if the kqueue was unbound and processing should not happen
4253  */
4254 #define KQWQAE_BEGIN_PROCESSING 1
4255 #define KQWQAE_END_PROCESSING   2
4256 #define KQWQAE_UNBIND           3
4257 static int
kqworkq_acknowledge_events(struct kqworkq * kqwq,workq_threadreq_t kqr,int kevent_flags,int kqwqae_op)4258 kqworkq_acknowledge_events(struct kqworkq *kqwq, workq_threadreq_t kqr,
4259     int kevent_flags, int kqwqae_op)
4260 {
4261 	struct knote *kn;
4262 	int rc = 0;
4263 	bool unbind;
4264 	struct kqtailq *suppressq = &kqwq->kqwq_suppressed[kqr->tr_kq_qos_index - 1];
4265 	struct kqtailq *queue = &kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1];
4266 
4267 	kqlock_held(&kqwq->kqwq_kqueue);
4268 
4269 	/*
4270 	 * Return suppressed knotes to their original state.
4271 	 * For workq kqueues, suppressed ones that are still
4272 	 * truly active (not just forced into the queue) will
4273 	 * set flags we check below to see if anything got
4274 	 * woken up.
4275 	 */
4276 	while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
4277 		knote_unsuppress(kqwq, kn);
4278 	}
4279 
4280 	if (kqwqae_op == KQWQAE_UNBIND) {
4281 		unbind = true;
4282 	} else if ((kevent_flags & KEVENT_FLAG_PARKING) == 0) {
4283 		unbind = false;
4284 	} else {
4285 		unbind = TAILQ_EMPTY(queue);
4286 	}
4287 	if (unbind) {
4288 		thread_t thread = kqr_thread_fast(kqr);
4289 		thread_qos_t old_override;
4290 
4291 #if DEBUG || DEVELOPMENT
4292 		thread_t self = current_thread();
4293 		struct uthread *ut = get_bsdthread_info(self);
4294 
4295 		assert(thread == self);
4296 		assert(ut->uu_kqr_bound == kqr);
4297 #endif // DEBUG || DEVELOPMENT
4298 
4299 		old_override = kqworkq_unbind_locked(kqwq, kqr, thread);
4300 		if (!TAILQ_EMPTY(queue)) {
4301 			/*
4302 			 * Request a new thread if we didn't process the whole
4303 			 * queue.
4304 			 */
4305 			kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr,
4306 			    kqr->tr_kq_qos_index, 0);
4307 		}
4308 		if (old_override) {
4309 			thread_drop_kevent_override(thread);
4310 		}
4311 		rc = -1;
4312 	}
4313 
4314 	return rc;
4315 }
4316 
4317 /*
4318  * Return 0 to indicate that processing should proceed,
4319  * -1 if there is nothing to process.
4320  *
4321  * Called with kqueue locked and returns the same way,
4322  * but may drop lock temporarily.
4323  */
4324 static int
kqworkq_begin_processing(struct kqworkq * kqwq,workq_threadreq_t kqr,int kevent_flags)4325 kqworkq_begin_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
4326     int kevent_flags)
4327 {
4328 	int rc = 0;
4329 
4330 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START,
4331 	    0, kqr->tr_kq_qos_index);
4332 
4333 	rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
4334 	    KQWQAE_BEGIN_PROCESSING);
4335 
4336 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
4337 	    thread_tid(kqr_thread(kqr)),
4338 	    !TAILQ_EMPTY(&kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1]));
4339 
4340 	return rc;
4341 }
4342 
4343 static thread_qos_t
kqworkloop_acknowledge_events(struct kqworkloop * kqwl)4344 kqworkloop_acknowledge_events(struct kqworkloop *kqwl)
4345 {
4346 	kq_index_t qos = THREAD_QOS_UNSPECIFIED;
4347 	struct knote *kn, *tmp;
4348 
4349 	kqlock_held(kqwl);
4350 
4351 	TAILQ_FOREACH_SAFE(kn, &kqwl->kqwl_suppressed, kn_tqe, tmp) {
4352 		/*
4353 		 * If a knote that can adjust QoS is disabled because of the automatic
4354 		 * behavior of EV_DISPATCH, the knotes should stay suppressed so that
4355 		 * further overrides keep pushing.
4356 		 */
4357 		if (knote_fops(kn)->f_adjusts_qos &&
4358 		    (kn->kn_status & KN_DISABLED) != 0 &&
4359 		    (kn->kn_status & KN_DROPPING) == 0 &&
4360 		    (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) {
4361 			qos = MAX(qos, kn->kn_qos_override);
4362 			continue;
4363 		}
4364 		knote_unsuppress(kqwl, kn);
4365 	}
4366 
4367 	return qos;
4368 }
4369 
4370 static int
kqworkloop_begin_processing(struct kqworkloop * kqwl,unsigned int kevent_flags)4371 kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags)
4372 {
4373 	workq_threadreq_t kqr = &kqwl->kqwl_request;
4374 	struct kqueue *kq = &kqwl->kqwl_kqueue;
4375 	int rc = 0, op = KQWL_UTQ_NONE;
4376 
4377 	kqlock_held(kq);
4378 
4379 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START,
4380 	    kqwl->kqwl_dynamicid, 0, 0);
4381 
4382 	/* nobody else should still be processing */
4383 	assert((kq->kq_state & KQ_PROCESSING) == 0);
4384 
4385 	kq->kq_state |= KQ_PROCESSING;
4386 
4387 	if (kevent_flags & KEVENT_FLAG_PARKING) {
4388 		/*
4389 		 * When "parking" we want to process events and if no events are found
4390 		 * unbind.
4391 		 *
4392 		 * However, non overcommit threads sometimes park even when they have
4393 		 * more work so that the pool can narrow.  For these, we need to unbind
4394 		 * early, so that calling kqworkloop_update_threads_qos() can ask the
4395 		 * workqueue subsystem whether the thread should park despite having
4396 		 * pending events.
4397 		 */
4398 		if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
4399 			op = KQWL_UTQ_PARKING;
4400 		} else {
4401 			op = KQWL_UTQ_UNBINDING;
4402 		}
4403 	} else if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
4404 		op = KQWL_UTQ_RESET_WAKEUP_OVERRIDE;
4405 	}
4406 
4407 	if (op != KQWL_UTQ_NONE) {
4408 		thread_qos_t qos_override;
4409 		thread_t thread = kqr_thread_fast(kqr);
4410 
4411 		qos_override = kqworkloop_acknowledge_events(kqwl);
4412 
4413 		if (op == KQWL_UTQ_UNBINDING) {
4414 			kqworkloop_unbind_locked(kqwl, thread,
4415 			    KQWL_OVERRIDE_DROP_IMMEDIATELY);
4416 			kqworkloop_release_live(kqwl);
4417 		}
4418 		kqworkloop_update_threads_qos(kqwl, op, qos_override);
4419 		if (op == KQWL_UTQ_PARKING &&
4420 		    (!kqwl->kqwl_count || kqwl->kqwl_owner)) {
4421 			kqworkloop_unbind_locked(kqwl, thread,
4422 			    KQWL_OVERRIDE_DROP_DELAYED);
4423 			kqworkloop_release_live(kqwl);
4424 			rc = -1;
4425 		} else if (op == KQWL_UTQ_UNBINDING &&
4426 		    kqr_thread(kqr) != thread) {
4427 			rc = -1;
4428 		}
4429 
4430 		if (rc == -1) {
4431 			kq->kq_state &= ~KQ_PROCESSING;
4432 			kqworkloop_unbind_delayed_override_drop(thread);
4433 		}
4434 	}
4435 
4436 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END,
4437 	    kqwl->kqwl_dynamicid, 0, 0);
4438 
4439 	return rc;
4440 }
4441 
4442 /*
4443  * Return 0 to indicate that processing should proceed,
4444  * -1 if there is nothing to process.
4445  * EBADF if the kqueue is draining
4446  *
4447  * Called with kqueue locked and returns the same way,
4448  * but may drop lock temporarily.
4449  * May block.
4450  */
4451 static int
kqfile_begin_processing(struct kqfile * kq)4452 kqfile_begin_processing(struct kqfile *kq)
4453 {
4454 	kqlock_held(kq);
4455 
4456 	assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
4457 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START,
4458 	    VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4459 
4460 	/* wait to become the exclusive processing thread */
4461 	while ((kq->kqf_state & (KQ_PROCESSING | KQ_DRAIN)) == KQ_PROCESSING) {
4462 		kq->kqf_state |= KQ_PROCWAIT;
4463 		lck_spin_sleep(&kq->kqf_lock, LCK_SLEEP_DEFAULT,
4464 		    &kq->kqf_suppressed, THREAD_UNINT | THREAD_WAIT_NOREPORT);
4465 	}
4466 
4467 	if (kq->kqf_state & KQ_DRAIN) {
4468 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4469 		    VM_KERNEL_UNSLIDE_OR_PERM(kq), 2);
4470 		return EBADF;
4471 	}
4472 
4473 	/* Nobody else processing */
4474 
4475 	/* anything left to process? */
4476 	if (kq->kqf_count == 0) {
4477 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4478 		    VM_KERNEL_UNSLIDE_OR_PERM(kq), 1);
4479 		return -1;
4480 	}
4481 
4482 	/* convert to processing mode */
4483 	kq->kqf_state |= KQ_PROCESSING;
4484 
4485 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4486 	    VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4487 	return 0;
4488 }
4489 
4490 /*
4491  * Try to end the processing, only called when a workq thread is attempting to
4492  * park (KEVENT_FLAG_PARKING is set).
4493  *
4494  * When returning -1, the kqworkq is setup again so that it is ready to be
4495  * processed.
4496  */
4497 static int
kqworkq_end_processing(struct kqworkq * kqwq,workq_threadreq_t kqr,int kevent_flags)4498 kqworkq_end_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
4499     int kevent_flags)
4500 {
4501 	if (kevent_flags & KEVENT_FLAG_PARKING) {
4502 		/*
4503 		 * if acknowledge events "succeeds" it means there are events,
4504 		 * which is a failure condition for end_processing.
4505 		 */
4506 		int rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
4507 		    KQWQAE_END_PROCESSING);
4508 		if (rc == 0) {
4509 			return -1;
4510 		}
4511 	}
4512 
4513 	return 0;
4514 }
4515 
4516 /*
4517  * Try to end the processing, only called when a workq thread is attempting to
4518  * park (KEVENT_FLAG_PARKING is set).
4519  *
4520  * When returning -1, the kqworkq is setup again so that it is ready to be
4521  * processed (as if kqworkloop_begin_processing had just been called).
4522  *
4523  * If successful and KEVENT_FLAG_PARKING was set in the kevent_flags,
4524  * the kqworkloop is unbound from its servicer as a side effect.
4525  */
4526 static int
kqworkloop_end_processing(struct kqworkloop * kqwl,int flags,int kevent_flags)4527 kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags)
4528 {
4529 	struct kqueue *kq = &kqwl->kqwl_kqueue;
4530 	workq_threadreq_t kqr = &kqwl->kqwl_request;
4531 	int rc = 0;
4532 
4533 	kqlock_held(kq);
4534 
4535 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START,
4536 	    kqwl->kqwl_dynamicid, 0, 0);
4537 
4538 	if (kevent_flags & KEVENT_FLAG_PARKING) {
4539 		thread_t thread = kqr_thread_fast(kqr);
4540 		thread_qos_t qos_override;
4541 
4542 		/*
4543 		 * When KEVENT_FLAG_PARKING is set, we need to attempt
4544 		 * an unbind while still under the lock.
4545 		 *
4546 		 * So we do everything kqworkloop_unbind() would do, but because
4547 		 * we're inside kqueue_process(), if the workloop actually
4548 		 * received events while our locks were dropped, we have
4549 		 * the opportunity to fail the end processing and loop again.
4550 		 *
4551 		 * This avoids going through the process-wide workqueue lock
4552 		 * hence scales better.
4553 		 */
4554 		assert(flags & KQ_PROCESSING);
4555 		qos_override = kqworkloop_acknowledge_events(kqwl);
4556 		kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_PARKING, qos_override);
4557 
4558 		if (kqwl->kqwl_wakeup_qos && !kqwl->kqwl_owner) {
4559 			rc = -1;
4560 		} else {
4561 			kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
4562 			kqworkloop_release_live(kqwl);
4563 			kq->kq_state &= ~flags;
4564 			kqworkloop_unbind_delayed_override_drop(thread);
4565 		}
4566 	} else {
4567 		kq->kq_state &= ~flags;
4568 		kq->kq_state |= KQ_R2K_ARMED;
4569 		kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
4570 	}
4571 
4572 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END,
4573 	    kqwl->kqwl_dynamicid, 0, 0);
4574 
4575 	return rc;
4576 }
4577 
4578 /*
4579  * Called with kqueue lock held.
4580  *
4581  * 0: no more events
4582  * -1: has more events
4583  * EBADF: kqueue is in draining mode
4584  */
4585 static int
kqfile_end_processing(struct kqfile * kq)4586 kqfile_end_processing(struct kqfile *kq)
4587 {
4588 	struct knote *kn;
4589 	int procwait;
4590 
4591 	kqlock_held(kq);
4592 
4593 	assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
4594 
4595 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END),
4596 	    VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4597 
4598 	/*
4599 	 * Return suppressed knotes to their original state.
4600 	 */
4601 	while ((kn = TAILQ_FIRST(&kq->kqf_suppressed)) != NULL) {
4602 		knote_unsuppress(kq, kn);
4603 	}
4604 
4605 	procwait = (kq->kqf_state & KQ_PROCWAIT);
4606 	kq->kqf_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
4607 
4608 	if (procwait) {
4609 		/* first wake up any thread already waiting to process */
4610 		thread_wakeup(&kq->kqf_suppressed);
4611 	}
4612 
4613 	if (kq->kqf_state & KQ_DRAIN) {
4614 		return EBADF;
4615 	}
4616 	return kq->kqf_count != 0 ? -1 : 0;
4617 }
4618 
4619 static int
kqueue_workloop_ctl_internal(proc_t p,uintptr_t cmd,uint64_t __unused options,struct kqueue_workloop_params * params,int * retval)4620 kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options,
4621     struct kqueue_workloop_params *params, int *retval)
4622 {
4623 	int error = 0;
4624 	struct kqworkloop *kqwl;
4625 	struct filedesc *fdp = &p->p_fd;
4626 	workq_threadreq_param_t trp = { };
4627 
4628 	switch (cmd) {
4629 	case KQ_WORKLOOP_CREATE:
4630 		if (!params->kqwlp_flags) {
4631 			error = EINVAL;
4632 			break;
4633 		}
4634 
4635 		if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) &&
4636 		    (params->kqwlp_sched_pri < 1 ||
4637 		    params->kqwlp_sched_pri > 63 /* MAXPRI_USER */)) {
4638 			error = EINVAL;
4639 			break;
4640 		}
4641 
4642 		if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) &&
4643 		    invalid_policy(params->kqwlp_sched_pol)) {
4644 			error = EINVAL;
4645 			break;
4646 		}
4647 
4648 		if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) &&
4649 		    (params->kqwlp_cpu_percent <= 0 ||
4650 		    params->kqwlp_cpu_percent > 100 ||
4651 		    params->kqwlp_cpu_refillms <= 0 ||
4652 		    params->kqwlp_cpu_refillms > 0x00ffffff)) {
4653 			error = EINVAL;
4654 			break;
4655 		}
4656 
4657 		if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) {
4658 			trp.trp_flags |= TRP_PRIORITY;
4659 			trp.trp_pri = (uint8_t)params->kqwlp_sched_pri;
4660 		}
4661 		if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) {
4662 			trp.trp_flags |= TRP_POLICY;
4663 			trp.trp_pol = (uint8_t)params->kqwlp_sched_pol;
4664 		}
4665 		if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) {
4666 			trp.trp_flags |= TRP_CPUPERCENT;
4667 			trp.trp_cpupercent = (uint8_t)params->kqwlp_cpu_percent;
4668 			trp.trp_refillms = params->kqwlp_cpu_refillms;
4669 		}
4670 
4671 		error = kqworkloop_get_or_create(p, params->kqwlp_id, &trp,
4672 		    KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
4673 		    KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST, &kqwl);
4674 		if (error) {
4675 			break;
4676 		}
4677 
4678 		if (!fdt_flag_test(fdp, FD_WORKLOOP)) {
4679 			/* FD_WORKLOOP indicates we've ever created a workloop
4680 			 * via this syscall but its only ever added to a process, never
4681 			 * removed.
4682 			 */
4683 			proc_fdlock(p);
4684 			fdt_flag_set(fdp, FD_WORKLOOP);
4685 			proc_fdunlock(p);
4686 		}
4687 		break;
4688 	case KQ_WORKLOOP_DESTROY:
4689 		error = kqworkloop_get_or_create(p, params->kqwlp_id, NULL,
4690 		    KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
4691 		    KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST, &kqwl);
4692 		if (error) {
4693 			break;
4694 		}
4695 		kqlock(kqwl);
4696 		trp.trp_value = kqwl->kqwl_params;
4697 		if (trp.trp_flags && !(trp.trp_flags & TRP_RELEASED)) {
4698 			trp.trp_flags |= TRP_RELEASED;
4699 			kqwl->kqwl_params = trp.trp_value;
4700 			kqworkloop_release_live(kqwl);
4701 		} else {
4702 			error = EINVAL;
4703 		}
4704 		kqunlock(kqwl);
4705 		kqworkloop_release(kqwl);
4706 		break;
4707 	}
4708 	*retval = 0;
4709 	return error;
4710 }
4711 
4712 int
kqueue_workloop_ctl(proc_t p,struct kqueue_workloop_ctl_args * uap,int * retval)4713 kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval)
4714 {
4715 	struct kqueue_workloop_params params = {
4716 		.kqwlp_id = 0,
4717 	};
4718 	if (uap->sz < sizeof(params.kqwlp_version)) {
4719 		return EINVAL;
4720 	}
4721 
4722 	size_t copyin_sz = MIN(sizeof(params), uap->sz);
4723 	int rv = copyin(uap->addr, &params, copyin_sz);
4724 	if (rv) {
4725 		return rv;
4726 	}
4727 
4728 	if (params.kqwlp_version != (int)uap->sz) {
4729 		return EINVAL;
4730 	}
4731 
4732 	return kqueue_workloop_ctl_internal(p, uap->cmd, uap->options, &params,
4733 	           retval);
4734 }
4735 
4736 static int
kqueue_select(struct fileproc * fp,int which,void * wql,__unused vfs_context_t ctx)4737 kqueue_select(struct fileproc *fp, int which, void *wql, __unused vfs_context_t ctx)
4738 {
4739 	struct kqfile *kq = (struct kqfile *)fp_get_data(fp);
4740 	int retnum = 0;
4741 
4742 	assert((kq->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4743 
4744 	if (which == FREAD) {
4745 		kqlock(kq);
4746 		if (kqfile_begin_processing(kq) == 0) {
4747 			retnum = kq->kqf_count;
4748 			kqfile_end_processing(kq);
4749 		} else if ((kq->kqf_state & KQ_DRAIN) == 0) {
4750 			selrecord(kq->kqf_p, &kq->kqf_sel, wql);
4751 		}
4752 		kqunlock(kq);
4753 	}
4754 	return retnum;
4755 }
4756 
4757 /*
4758  * kqueue_close -
4759  */
4760 static int
kqueue_close(struct fileglob * fg,__unused vfs_context_t ctx)4761 kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
4762 {
4763 	struct kqfile *kqf = fg_get_data(fg);
4764 
4765 	assert((kqf->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4766 	kqlock(kqf);
4767 	selthreadclear(&kqf->kqf_sel);
4768 	kqunlock(kqf);
4769 	kqueue_dealloc(&kqf->kqf_kqueue);
4770 	fg_set_data(fg, NULL);
4771 	return 0;
4772 }
4773 
4774 /*
4775  * Max depth of the nested kq path that can be created.
4776  * Note that this has to be less than the size of kq_level
4777  * to avoid wrapping around and mislabeling the level.
4778  */
4779 #define MAX_NESTED_KQ 1000
4780 
4781 /*
4782  * The callers has taken a use-count reference on this kqueue and will donate it
4783  * to the kqueue we are being added to.  This keeps the kqueue from closing until
4784  * that relationship is torn down.
4785  */
4786 static int
kqueue_kqfilter(struct fileproc * fp,struct knote * kn,__unused struct kevent_qos_s * kev)4787 kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
4788     __unused struct kevent_qos_s *kev)
4789 {
4790 	struct kqfile *kqf = (struct kqfile *)fp_get_data(fp);
4791 	struct kqueue *kq = &kqf->kqf_kqueue;
4792 	struct kqueue *parentkq = knote_get_kq(kn);
4793 
4794 	assert((kqf->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4795 
4796 	if (parentkq == kq || kn->kn_filter != EVFILT_READ) {
4797 		knote_set_error(kn, EINVAL);
4798 		return 0;
4799 	}
4800 
4801 	/*
4802 	 * We have to avoid creating a cycle when nesting kqueues
4803 	 * inside another.  Rather than trying to walk the whole
4804 	 * potential DAG of nested kqueues, we just use a simple
4805 	 * ceiling protocol.  When a kqueue is inserted into another,
4806 	 * we check that the (future) parent is not already nested
4807 	 * into another kqueue at a lower level than the potenial
4808 	 * child (because it could indicate a cycle).  If that test
4809 	 * passes, we just mark the nesting levels accordingly.
4810 	 *
4811 	 * Only up to MAX_NESTED_KQ can be nested.
4812 	 *
4813 	 * Note: kqworkq and kqworkloop cannot be nested and have reused their
4814 	 *       kq_level field, so ignore these as parent.
4815 	 */
4816 
4817 	kqlock(parentkq);
4818 
4819 	if ((parentkq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) {
4820 		if (parentkq->kq_level > 0 &&
4821 		    parentkq->kq_level < kq->kq_level) {
4822 			kqunlock(parentkq);
4823 			knote_set_error(kn, EINVAL);
4824 			return 0;
4825 		}
4826 
4827 		/* set parent level appropriately */
4828 		uint16_t plevel = (parentkq->kq_level == 0)? 2: parentkq->kq_level;
4829 		if (plevel < kq->kq_level + 1) {
4830 			if (kq->kq_level + 1 > MAX_NESTED_KQ) {
4831 				kqunlock(parentkq);
4832 				knote_set_error(kn, EINVAL);
4833 				return 0;
4834 			}
4835 			plevel = kq->kq_level + 1;
4836 		}
4837 
4838 		parentkq->kq_level = plevel;
4839 	}
4840 
4841 	kqunlock(parentkq);
4842 
4843 	kn->kn_filtid = EVFILTID_KQREAD;
4844 	kqlock(kq);
4845 	KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
4846 	/* indicate nesting in child, if needed */
4847 	if (kq->kq_level == 0) {
4848 		kq->kq_level = 1;
4849 	}
4850 
4851 	int count = kq->kq_count;
4852 	kqunlock(kq);
4853 	return count > 0;
4854 }
4855 
4856 __attribute__((noinline))
4857 static void
kqfile_wakeup(struct kqfile * kqf,long hint,wait_result_t wr)4858 kqfile_wakeup(struct kqfile *kqf, long hint, wait_result_t wr)
4859 {
4860 	/* wakeup a thread waiting on this queue */
4861 	selwakeup(&kqf->kqf_sel);
4862 
4863 	/* wake up threads in kqueue_scan() */
4864 	if (kqf->kqf_state & KQ_SLEEP) {
4865 		kqf->kqf_state &= ~KQ_SLEEP;
4866 		thread_wakeup_with_result(&kqf->kqf_count, wr);
4867 	}
4868 
4869 	if (hint == NOTE_REVOKE) {
4870 		/* wakeup threads waiting their turn to process */
4871 		if (kqf->kqf_state & KQ_PROCWAIT) {
4872 			assert(kqf->kqf_state & KQ_PROCESSING);
4873 			kqf->kqf_state &= ~KQ_PROCWAIT;
4874 			thread_wakeup(&kqf->kqf_suppressed);
4875 		}
4876 
4877 		/* no need to KNOTE: knote_fdclose() takes care of it */
4878 	} else {
4879 		/* wakeup other kqueues/select sets we're inside */
4880 		KNOTE(&kqf->kqf_sel.si_note, hint);
4881 	}
4882 }
4883 
4884 /*
4885  * kqueue_drain - called when kq is closed
4886  */
4887 static int
kqueue_drain(struct fileproc * fp,__unused vfs_context_t ctx)4888 kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
4889 {
4890 	struct kqfile *kqf = (struct kqfile *)fp_get_data(fp);
4891 
4892 	assert((kqf->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4893 
4894 	kqlock(kqf);
4895 	kqf->kqf_state |= KQ_DRAIN;
4896 	kqfile_wakeup(kqf, NOTE_REVOKE, THREAD_RESTART);
4897 	kqunlock(kqf);
4898 	return 0;
4899 }
4900 
4901 int
kqueue_stat(struct kqueue * kq,void * ub,int isstat64,proc_t p)4902 kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
4903 {
4904 	assert((kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4905 
4906 	kqlock(kq);
4907 	if (isstat64 != 0) {
4908 		struct stat64 *sb64 = (struct stat64 *)ub;
4909 
4910 		bzero((void *)sb64, sizeof(*sb64));
4911 		sb64->st_size = kq->kq_count;
4912 		if (kq->kq_state & KQ_KEV_QOS) {
4913 			sb64->st_blksize = sizeof(struct kevent_qos_s);
4914 		} else if (kq->kq_state & KQ_KEV64) {
4915 			sb64->st_blksize = sizeof(struct kevent64_s);
4916 		} else if (IS_64BIT_PROCESS(p)) {
4917 			sb64->st_blksize = sizeof(struct user64_kevent);
4918 		} else {
4919 			sb64->st_blksize = sizeof(struct user32_kevent);
4920 		}
4921 		sb64->st_mode = S_IFIFO;
4922 	} else {
4923 		struct stat *sb = (struct stat *)ub;
4924 
4925 		bzero((void *)sb, sizeof(*sb));
4926 		sb->st_size = kq->kq_count;
4927 		if (kq->kq_state & KQ_KEV_QOS) {
4928 			sb->st_blksize = sizeof(struct kevent_qos_s);
4929 		} else if (kq->kq_state & KQ_KEV64) {
4930 			sb->st_blksize = sizeof(struct kevent64_s);
4931 		} else if (IS_64BIT_PROCESS(p)) {
4932 			sb->st_blksize = sizeof(struct user64_kevent);
4933 		} else {
4934 			sb->st_blksize = sizeof(struct user32_kevent);
4935 		}
4936 		sb->st_mode = S_IFIFO;
4937 	}
4938 	kqunlock(kq);
4939 	return 0;
4940 }
4941 
4942 static inline bool
kqueue_threadreq_can_use_ast(struct kqueue * kq)4943 kqueue_threadreq_can_use_ast(struct kqueue *kq)
4944 {
4945 	if (current_proc() == kq->kq_p) {
4946 		/*
4947 		 * Setting an AST from a non BSD syscall is unsafe: mach_msg_trap() can
4948 		 * do combined send/receive and in the case of self-IPC, the AST may bet
4949 		 * set on a thread that will not return to userspace and needs the
4950 		 * thread the AST would create to unblock itself.
4951 		 *
4952 		 * At this time, we really want to target:
4953 		 *
4954 		 * - kevent variants that can cause thread creations, and dispatch
4955 		 *   really only uses kevent_qos and kevent_id,
4956 		 *
4957 		 * - workq_kernreturn (directly about thread creations)
4958 		 *
4959 		 * - bsdthread_ctl which is used for qos changes and has direct impact
4960 		 *   on the creator thread scheduling decisions.
4961 		 */
4962 		switch (current_uthread()->syscall_code) {
4963 		case SYS_kevent_qos:
4964 		case SYS_kevent_id:
4965 		case SYS_workq_kernreturn:
4966 		case SYS_bsdthread_ctl:
4967 			return true;
4968 		}
4969 	}
4970 	return false;
4971 }
4972 
4973 /*
4974  * Interact with the pthread kext to request a servicing there at a specific QoS
4975  * level.
4976  *
4977  * - Caller holds the kqlock
4978  *
4979  * - May be called with the kqueue's wait queue set locked,
4980  *   so cannot do anything that could recurse on that.
4981  */
4982 static void
kqueue_threadreq_initiate(kqueue_t kqu,workq_threadreq_t kqr,kq_index_t qos,int flags)4983 kqueue_threadreq_initiate(kqueue_t kqu, workq_threadreq_t kqr,
4984     kq_index_t qos, int flags)
4985 {
4986 	assert(kqr_thread(kqr) == THREAD_NULL);
4987 	assert(!kqr_thread_requested(kqr));
4988 	struct turnstile *ts = TURNSTILE_NULL;
4989 
4990 	if (workq_is_exiting(kqu.kq->kq_p)) {
4991 		return;
4992 	}
4993 
4994 	kqlock_held(kqu);
4995 
4996 	if (kqu.kq->kq_state & KQ_WORKLOOP) {
4997 		struct kqworkloop *kqwl = kqu.kqwl;
4998 
4999 		assert(kqwl->kqwl_owner == THREAD_NULL);
5000 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST),
5001 		    kqwl->kqwl_dynamicid, 0, qos, kqwl->kqwl_wakeup_qos);
5002 		ts = kqwl->kqwl_turnstile;
5003 		/* Add a thread request reference on the kqueue. */
5004 		kqworkloop_retain(kqwl);
5005 
5006 #if CONFIG_PREADOPT_TG
5007 		/* This thread is the one which is ack-ing the thread group on the kqwl
5008 		 * under the kqlock and will take action accordingly, pairs with the
5009 		 * release barrier in kqueue_set_preadopted_thread_group */
5010 		uint16_t tg_acknowledged;
5011 		if (os_atomic_cmpxchgv(&kqwl->kqwl_preadopt_tg_needs_redrive,
5012 		    KQWL_PREADOPT_TG_NEEDS_REDRIVE, KQWL_PREADOPT_TG_CLEAR_REDRIVE,
5013 		    &tg_acknowledged, acquire)) {
5014 			flags |= WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG;
5015 		}
5016 #endif
5017 	} else {
5018 		assert(kqu.kq->kq_state & KQ_WORKQ);
5019 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST), -1, 0, qos,
5020 		    !TAILQ_EMPTY(&kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1]));
5021 	}
5022 
5023 	/*
5024 	 * New-style thread request supported.
5025 	 * Provide the pthread kext a pointer to a workq_threadreq_s structure for
5026 	 * its use until a corresponding kqueue_threadreq_bind callback.
5027 	 */
5028 	if (kqueue_threadreq_can_use_ast(kqu.kq)) {
5029 		flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
5030 	}
5031 	if (qos == KQWQ_QOS_MANAGER) {
5032 		qos = WORKQ_THREAD_QOS_MANAGER;
5033 	}
5034 
5035 	if (!workq_kern_threadreq_initiate(kqu.kq->kq_p, kqr, ts, qos, flags)) {
5036 		/*
5037 		 * Process is shutting down or exec'ing.
5038 		 * All the kqueues are going to be cleaned up
5039 		 * soon. Forget we even asked for a thread -
5040 		 * and make sure we don't ask for more.
5041 		 */
5042 		kqu.kq->kq_state &= ~KQ_R2K_ARMED;
5043 		kqueue_release_live(kqu);
5044 	}
5045 }
5046 
5047 /*
5048  * kqueue_threadreq_bind_prepost - prepost the bind to kevent
5049  *
5050  * This is used when kqueue_threadreq_bind may cause a lock inversion.
5051  */
5052 __attribute__((always_inline))
5053 void
kqueue_threadreq_bind_prepost(struct proc * p __unused,workq_threadreq_t kqr,struct uthread * ut)5054 kqueue_threadreq_bind_prepost(struct proc *p __unused, workq_threadreq_t kqr,
5055     struct uthread *ut)
5056 {
5057 	ut->uu_kqr_bound = kqr;
5058 	kqr->tr_thread = get_machthread(ut);
5059 	kqr->tr_state = WORKQ_TR_STATE_BINDING;
5060 }
5061 
5062 /*
5063  * kqueue_threadreq_bind_commit - commit a bind prepost
5064  *
5065  * The workq code has to commit any binding prepost before the thread has
5066  * a chance to come back to userspace (and do kevent syscalls) or be aborted.
5067  */
5068 void
kqueue_threadreq_bind_commit(struct proc * p,thread_t thread)5069 kqueue_threadreq_bind_commit(struct proc *p, thread_t thread)
5070 {
5071 	struct uthread *ut = get_bsdthread_info(thread);
5072 	workq_threadreq_t kqr = ut->uu_kqr_bound;
5073 	kqueue_t kqu = kqr_kqueue(p, kqr);
5074 
5075 	kqlock(kqu);
5076 	if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
5077 		kqueue_threadreq_bind(p, kqr, thread, 0);
5078 	}
5079 	kqunlock(kqu);
5080 }
5081 
5082 static void
kqueue_threadreq_modify(kqueue_t kqu,workq_threadreq_t kqr,kq_index_t qos,workq_kern_threadreq_flags_t flags)5083 kqueue_threadreq_modify(kqueue_t kqu, workq_threadreq_t kqr, kq_index_t qos,
5084     workq_kern_threadreq_flags_t flags)
5085 {
5086 	assert(kqr_thread_requested_pending(kqr));
5087 
5088 	kqlock_held(kqu);
5089 
5090 	if (kqueue_threadreq_can_use_ast(kqu.kq)) {
5091 		flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
5092 	}
5093 
5094 #if CONFIG_PREADOPT_TG
5095 	if (kqu.kq->kq_state & KQ_WORKLOOP) {
5096 		uint16_t tg_ack_status;
5097 		struct kqworkloop *kqwl = kqu.kqwl;
5098 
5099 		/* This thread is the one which is ack-ing the thread group on the kqwl
5100 		 * under the kqlock and will take action accordingly, needs acquire
5101 		 * barrier */
5102 		if (os_atomic_cmpxchgv(&kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_NEEDS_REDRIVE,
5103 		    KQWL_PREADOPT_TG_CLEAR_REDRIVE, &tg_ack_status, acquire)) {
5104 			flags |= WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG;
5105 		}
5106 	}
5107 #endif
5108 
5109 	workq_kern_threadreq_modify(kqu.kq->kq_p, kqr, qos, flags);
5110 }
5111 
5112 /*
5113  * kqueue_threadreq_bind - bind thread to processing kqrequest
5114  *
5115  * The provided thread will be responsible for delivering events
5116  * associated with the given kqrequest.  Bind it and get ready for
5117  * the thread to eventually arrive.
5118  */
5119 void
kqueue_threadreq_bind(struct proc * p,workq_threadreq_t kqr,thread_t thread,unsigned int flags)5120 kqueue_threadreq_bind(struct proc *p, workq_threadreq_t kqr, thread_t thread,
5121     unsigned int flags)
5122 {
5123 	kqueue_t kqu = kqr_kqueue(p, kqr);
5124 	struct uthread *ut = get_bsdthread_info(thread);
5125 
5126 	kqlock_held(kqu);
5127 
5128 	assert(ut->uu_kqueue_override == 0);
5129 
5130 	if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
5131 		assert(ut->uu_kqr_bound == kqr);
5132 		assert(kqr->tr_thread == thread);
5133 	} else {
5134 		assert(kqr_thread_requested_pending(kqr));
5135 		assert(kqr->tr_thread == THREAD_NULL);
5136 		assert(ut->uu_kqr_bound == NULL);
5137 		ut->uu_kqr_bound = kqr;
5138 		kqr->tr_thread = thread;
5139 	}
5140 
5141 	kqr->tr_state = WORKQ_TR_STATE_BOUND;
5142 
5143 	if (kqu.kq->kq_state & KQ_WORKLOOP) {
5144 		struct turnstile *ts = kqu.kqwl->kqwl_turnstile;
5145 
5146 		if (__improbable(thread == kqu.kqwl->kqwl_owner)) {
5147 			/*
5148 			 * <rdar://problem/38626999> shows that asserting here is not ok.
5149 			 *
5150 			 * This is not supposed to happen for correct use of the interface,
5151 			 * but it is sadly possible for userspace (with the help of memory
5152 			 * corruption, such as over-release of a dispatch queue) to make
5153 			 * the creator thread the "owner" of a workloop.
5154 			 *
5155 			 * Once that happens, and that creator thread picks up the same
5156 			 * workloop as a servicer, we trip this codepath. We need to fixup
5157 			 * the state to forget about this thread being the owner, as the
5158 			 * entire workloop state machine expects servicers to never be
5159 			 * owners and everything would basically go downhill from here.
5160 			 */
5161 			kqu.kqwl->kqwl_owner = THREAD_NULL;
5162 			if (kqworkloop_override(kqu.kqwl)) {
5163 				thread_drop_kevent_override(thread);
5164 			}
5165 		}
5166 
5167 		if (ts && (flags & KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE) == 0) {
5168 			/*
5169 			 * Past this point, the interlock is the kq req lock again,
5170 			 * so we can fix the inheritor for good.
5171 			 */
5172 			filt_wlupdate_inheritor(kqu.kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
5173 			turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
5174 		}
5175 
5176 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_BIND), kqu.kqwl->kqwl_dynamicid,
5177 		    thread_tid(thread), kqr->tr_kq_qos_index,
5178 		    (kqr->tr_kq_override_index << 16) | kqwl->kqwl_wakeup_qos);
5179 
5180 		ut->uu_kqueue_override = kqr->tr_kq_override_index;
5181 		if (kqr->tr_kq_override_index) {
5182 			thread_add_servicer_override(thread, kqr->tr_kq_override_index);
5183 		}
5184 
5185 #if CONFIG_PREADOPT_TG
5186 		/* Remove reference from kqwl and mark it as bound with the SENTINEL */
5187 		thread_group_qos_t old_tg;
5188 		thread_group_qos_t new_tg;
5189 		int ret = os_atomic_rmw_loop(kqr_preadopt_thread_group_addr(kqr), old_tg, new_tg, relaxed, {
5190 			if (old_tg == KQWL_PREADOPTED_TG_NEVER) {
5191 			        os_atomic_rmw_loop_give_up(break); // It's an app, nothing to do
5192 			}
5193 			assert(old_tg != KQWL_PREADOPTED_TG_PROCESSED);
5194 			new_tg = KQWL_PREADOPTED_TG_SENTINEL;
5195 		});
5196 
5197 		if (ret) {
5198 			KQWL_PREADOPT_TG_HISTORY_WRITE_ENTRY(kqu.kqwl, KQWL_PREADOPT_OP_SERVICER_BIND, old_tg, new_tg);
5199 
5200 			if (KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) {
5201 				struct thread_group *tg = KQWL_GET_PREADOPTED_TG(old_tg);
5202 				assert(tg != NULL);
5203 
5204 				thread_set_preadopt_thread_group(thread, tg);
5205 				thread_group_release_live(tg); // The thread has a reference
5206 			} else {
5207 				/*
5208 				 * The thread may already have a preadopt thread group on it -
5209 				 * we need to make sure to clear that.
5210 				 */
5211 				thread_set_preadopt_thread_group(thread, NULL);
5212 			}
5213 
5214 			/* We have taken action on the preadopted thread group set on the
5215 			 * set on the kqwl, clear any redrive requests */
5216 			os_atomic_store(&kqu.kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_CLEAR_REDRIVE, relaxed);
5217 		}
5218 #endif
5219 		kqueue_update_iotier_override(kqu);
5220 	} else {
5221 		assert(kqr->tr_kq_override_index == 0);
5222 
5223 #if CONFIG_PREADOPT_TG
5224 		/*
5225 		 * The thread may have a preadopt thread group on it already because it
5226 		 * got tagged with it as a creator thread. So we need to make sure to
5227 		 * clear that since we don't have preadopt thread groups for non-kqwl
5228 		 * cases
5229 		 */
5230 		thread_set_preadopt_thread_group(thread, NULL);
5231 #endif
5232 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), -1,
5233 		    thread_tid(thread), kqr->tr_kq_qos_index,
5234 		    (kqr->tr_kq_override_index << 16) |
5235 		    !TAILQ_EMPTY(&kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1]));
5236 	}
5237 }
5238 
5239 /*
5240  * kqueue_threadreq_cancel - abort a pending thread request
5241  *
5242  * Called when exiting/exec'ing. Forget our pending request.
5243  */
5244 void
kqueue_threadreq_cancel(struct proc * p,workq_threadreq_t kqr)5245 kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t kqr)
5246 {
5247 	kqueue_release(kqr_kqueue(p, kqr));
5248 }
5249 
5250 workq_threadreq_param_t
kqueue_threadreq_workloop_param(workq_threadreq_t kqr)5251 kqueue_threadreq_workloop_param(workq_threadreq_t kqr)
5252 {
5253 	struct kqworkloop *kqwl;
5254 	workq_threadreq_param_t trp;
5255 
5256 	assert(kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
5257 	kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
5258 	trp.trp_value = kqwl->kqwl_params;
5259 	return trp;
5260 }
5261 
5262 /*
5263  *	kqueue_threadreq_unbind - unbind thread from processing kqueue
5264  *
5265  *	End processing the per-QoS bucket of events and allow other threads
5266  *	to be requested for future servicing.
5267  *
5268  *	caller holds a reference on the kqueue.
5269  */
5270 void
kqueue_threadreq_unbind(struct proc * p,workq_threadreq_t kqr)5271 kqueue_threadreq_unbind(struct proc *p, workq_threadreq_t kqr)
5272 {
5273 	if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
5274 		kqworkloop_unbind(kqr_kqworkloop(kqr));
5275 	} else {
5276 		kqworkq_unbind(p, kqr);
5277 	}
5278 }
5279 
5280 /*
5281  * If we aren't already busy processing events [for this QoS],
5282  * request workq thread support as appropriate.
5283  *
5284  * TBD - for now, we don't segregate out processing by QoS.
5285  *
5286  * - May be called with the kqueue's wait queue set locked,
5287  *   so cannot do anything that could recurse on that.
5288  */
5289 static void
kqworkq_wakeup(struct kqworkq * kqwq,kq_index_t qos_index)5290 kqworkq_wakeup(struct kqworkq *kqwq, kq_index_t qos_index)
5291 {
5292 	workq_threadreq_t kqr = kqworkq_get_request(kqwq, qos_index);
5293 
5294 	/* convert to thread qos value */
5295 	assert(qos_index > 0 && qos_index <= KQWQ_NBUCKETS);
5296 
5297 	if (!kqr_thread_requested(kqr)) {
5298 		kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr, qos_index, 0);
5299 	}
5300 }
5301 
5302 /*
5303  * This represent the asynchronous QoS a given workloop contributes,
5304  * hence is the max of the current active knotes (override index)
5305  * and the workloop max qos (userspace async qos).
5306  */
5307 static kq_index_t
kqworkloop_override(struct kqworkloop * kqwl)5308 kqworkloop_override(struct kqworkloop *kqwl)
5309 {
5310 	workq_threadreq_t kqr = &kqwl->kqwl_request;
5311 	return MAX(kqr->tr_kq_qos_index, kqr->tr_kq_override_index);
5312 }
5313 
5314 static inline void
kqworkloop_request_fire_r2k_notification(struct kqworkloop * kqwl)5315 kqworkloop_request_fire_r2k_notification(struct kqworkloop *kqwl)
5316 {
5317 	workq_threadreq_t kqr = &kqwl->kqwl_request;
5318 
5319 	kqlock_held(kqwl);
5320 
5321 	if (kqwl->kqwl_state & KQ_R2K_ARMED) {
5322 		kqwl->kqwl_state &= ~KQ_R2K_ARMED;
5323 		act_set_astkevent(kqr_thread_fast(kqr), AST_KEVENT_RETURN_TO_KERNEL);
5324 	}
5325 }
5326 
5327 static void
kqworkloop_update_threads_qos(struct kqworkloop * kqwl,int op,kq_index_t qos)5328 kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos)
5329 {
5330 	workq_threadreq_t kqr = &kqwl->kqwl_request;
5331 	struct kqueue *kq = &kqwl->kqwl_kqueue;
5332 	kq_index_t old_override = kqworkloop_override(kqwl);
5333 
5334 	kqlock_held(kqwl);
5335 
5336 	switch (op) {
5337 	case KQWL_UTQ_UPDATE_WAKEUP_QOS:
5338 		kqwl->kqwl_wakeup_qos = qos;
5339 		kqworkloop_request_fire_r2k_notification(kqwl);
5340 		goto recompute;
5341 
5342 	case KQWL_UTQ_RESET_WAKEUP_OVERRIDE:
5343 		kqr->tr_kq_override_index = qos;
5344 		goto recompute;
5345 
5346 	case KQWL_UTQ_PARKING:
5347 	case KQWL_UTQ_UNBINDING:
5348 		kqr->tr_kq_override_index = qos;
5349 		OS_FALLTHROUGH;
5350 
5351 	case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS:
5352 		if (op == KQWL_UTQ_RECOMPUTE_WAKEUP_QOS) {
5353 			assert(qos == THREAD_QOS_UNSPECIFIED);
5354 		}
5355 		if (TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
5356 			kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
5357 		}
5358 		kqwl->kqwl_wakeup_qos = 0;
5359 		for (kq_index_t i = KQWL_NBUCKETS; i > 0; i--) {
5360 			if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i - 1])) {
5361 				kqwl->kqwl_wakeup_qos = i;
5362 				kqworkloop_request_fire_r2k_notification(kqwl);
5363 				break;
5364 			}
5365 		}
5366 		OS_FALLTHROUGH;
5367 
5368 	case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE:
5369 recompute:
5370 		/*
5371 		 * When modifying the wakeup QoS or the override QoS, we always need to
5372 		 * maintain our invariant that kqr_override_index is at least as large
5373 		 * as the highest QoS for which an event is fired.
5374 		 *
5375 		 * However this override index can be larger when there is an overriden
5376 		 * suppressed knote pushing on the kqueue.
5377 		 */
5378 		if (qos < kqwl->kqwl_wakeup_qos) {
5379 			qos = kqwl->kqwl_wakeup_qos;
5380 		}
5381 		if (kqr->tr_kq_override_index < qos) {
5382 			kqr->tr_kq_override_index = qos;
5383 		}
5384 		break;
5385 
5386 	case KQWL_UTQ_REDRIVE_EVENTS:
5387 		break;
5388 
5389 	case KQWL_UTQ_SET_QOS_INDEX:
5390 		kqr->tr_kq_qos_index = qos;
5391 		break;
5392 
5393 	default:
5394 		panic("unknown kqwl thread qos update operation: %d", op);
5395 	}
5396 
5397 	thread_t kqwl_owner = kqwl->kqwl_owner;
5398 	thread_t servicer = kqr_thread(kqr);
5399 	boolean_t qos_changed = FALSE;
5400 	kq_index_t new_override = kqworkloop_override(kqwl);
5401 
5402 	/*
5403 	 * Apply the diffs to the owner if applicable
5404 	 */
5405 	if (kqwl_owner) {
5406 #if 0
5407 		/* JMM - need new trace hooks for owner overrides */
5408 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
5409 		    kqwl->kqwl_dynamicid, thread_tid(kqwl_owner), kqr->tr_kq_qos_index,
5410 		    (kqr->tr_kq_override_index << 16) | kqwl->kqwl_wakeup_qos);
5411 #endif
5412 		if (new_override == old_override) {
5413 			// nothing to do
5414 		} else if (old_override == THREAD_QOS_UNSPECIFIED) {
5415 			thread_add_kevent_override(kqwl_owner, new_override);
5416 		} else if (new_override == THREAD_QOS_UNSPECIFIED) {
5417 			thread_drop_kevent_override(kqwl_owner);
5418 		} else { /*  old_override != new_override */
5419 			thread_update_kevent_override(kqwl_owner, new_override);
5420 		}
5421 	}
5422 
5423 	/*
5424 	 * apply the diffs to the servicer
5425 	 */
5426 
5427 	if (!kqr_thread_requested(kqr)) {
5428 		/*
5429 		 * No servicer, nor thread-request
5430 		 *
5431 		 * Make a new thread request, unless there is an owner (or the workloop
5432 		 * is suspended in userland) or if there is no asynchronous work in the
5433 		 * first place.
5434 		 */
5435 
5436 		if (kqwl_owner == NULL && kqwl->kqwl_wakeup_qos) {
5437 			int initiate_flags = 0;
5438 			if (op == KQWL_UTQ_UNBINDING) {
5439 				initiate_flags = WORKQ_THREADREQ_ATTEMPT_REBIND;
5440 			}
5441 
5442 			/* kqueue_threadreq_initiate handles the acknowledgement of the TG
5443 			 * if needed */
5444 			kqueue_threadreq_initiate(kq, kqr, new_override, initiate_flags);
5445 		}
5446 	} else if (servicer) {
5447 		/*
5448 		 * Servicer in flight
5449 		 *
5450 		 * Just apply the diff to the servicer
5451 		 */
5452 
5453 #if CONFIG_PREADOPT_TG
5454 		/* When there's a servicer for the kqwl already, then the servicer will
5455 		 * adopt the thread group in the kqr, we don't need to poke the
5456 		 * workqueue subsystem to make different decisions due to the thread
5457 		 * group. Consider the current request ack-ed.
5458 		 */
5459 		os_atomic_store(&kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_CLEAR_REDRIVE, relaxed);
5460 #endif
5461 
5462 		struct uthread *ut = get_bsdthread_info(servicer);
5463 		if (ut->uu_kqueue_override != new_override) {
5464 			if (ut->uu_kqueue_override == THREAD_QOS_UNSPECIFIED) {
5465 				thread_add_servicer_override(servicer, new_override);
5466 			} else if (new_override == THREAD_QOS_UNSPECIFIED) {
5467 				thread_drop_servicer_override(servicer);
5468 			} else { /* ut->uu_kqueue_override != new_override */
5469 				thread_update_servicer_override(servicer, new_override);
5470 			}
5471 			ut->uu_kqueue_override = new_override;
5472 			qos_changed = TRUE;
5473 		}
5474 	} else if (new_override == THREAD_QOS_UNSPECIFIED) {
5475 		/*
5476 		 * No events to deliver anymore.
5477 		 *
5478 		 * However canceling with turnstiles is challenging, so the fact that
5479 		 * the request isn't useful will be discovered by the servicer himself
5480 		 * later on.
5481 		 */
5482 	} else if (old_override != new_override) {
5483 		/*
5484 		 * Request is in flight
5485 		 *
5486 		 * Apply the diff to the thread request.
5487 		 */
5488 		kqueue_threadreq_modify(kq, kqr, new_override, WORKQ_THREADREQ_NONE);
5489 		qos_changed = TRUE;
5490 	}
5491 
5492 	if (qos_changed) {
5493 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), kqwl->kqwl_dynamicid,
5494 		    thread_tid(servicer), kqr->tr_kq_qos_index,
5495 		    (kqr->tr_kq_override_index << 16) | kqwl->kqwl_wakeup_qos);
5496 	}
5497 }
5498 
5499 static void
kqworkloop_update_iotier_override(struct kqworkloop * kqwl)5500 kqworkloop_update_iotier_override(struct kqworkloop *kqwl)
5501 {
5502 	workq_threadreq_t kqr = &kqwl->kqwl_request;
5503 	thread_t servicer = kqr_thread(kqr);
5504 	uint8_t iotier = os_atomic_load(&kqwl->kqwl_iotier_override, relaxed);
5505 
5506 	kqlock_held(kqwl);
5507 
5508 	if (servicer) {
5509 		thread_update_servicer_iotier_override(servicer, iotier);
5510 	}
5511 }
5512 
5513 static void
kqworkloop_wakeup(struct kqworkloop * kqwl,kq_index_t qos)5514 kqworkloop_wakeup(struct kqworkloop *kqwl, kq_index_t qos)
5515 {
5516 	if (qos <= kqwl->kqwl_wakeup_qos) {
5517 		/*
5518 		 * Shortcut wakeups that really do nothing useful
5519 		 */
5520 		return;
5521 	}
5522 
5523 	if ((kqwl->kqwl_state & KQ_PROCESSING) &&
5524 	    kqr_thread(&kqwl->kqwl_request) == current_thread()) {
5525 		/*
5526 		 * kqworkloop_end_processing() will perform the required QoS
5527 		 * computations when it unsets the processing mode.
5528 		 */
5529 		return;
5530 	}
5531 
5532 	kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, qos);
5533 }
5534 
5535 static struct kqtailq *
kqueue_get_suppressed_queue(kqueue_t kq,struct knote * kn)5536 kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn)
5537 {
5538 	if (kq.kq->kq_state & KQ_WORKLOOP) {
5539 		return &kq.kqwl->kqwl_suppressed;
5540 	} else if (kq.kq->kq_state & KQ_WORKQ) {
5541 		return &kq.kqwq->kqwq_suppressed[kn->kn_qos_index - 1];
5542 	} else {
5543 		return &kq.kqf->kqf_suppressed;
5544 	}
5545 }
5546 
5547 struct turnstile *
kqueue_alloc_turnstile(kqueue_t kqu)5548 kqueue_alloc_turnstile(kqueue_t kqu)
5549 {
5550 	struct kqworkloop *kqwl = kqu.kqwl;
5551 	kq_state_t kq_state;
5552 
5553 	kq_state = os_atomic_load(&kqu.kq->kq_state, dependency);
5554 	if (kq_state & KQ_HAS_TURNSTILE) {
5555 		/* force a dependency to pair with the atomic or with release below */
5556 		return os_atomic_load_with_dependency_on(&kqwl->kqwl_turnstile,
5557 		           (uintptr_t)kq_state);
5558 	}
5559 
5560 	if (!(kq_state & KQ_WORKLOOP)) {
5561 		return TURNSTILE_NULL;
5562 	}
5563 
5564 	struct turnstile *ts = turnstile_alloc(), *free_ts = TURNSTILE_NULL;
5565 	bool workq_locked = false;
5566 
5567 	kqlock(kqu);
5568 
5569 	if (filt_wlturnstile_interlock_is_workq(kqwl)) {
5570 		workq_locked = true;
5571 		workq_kern_threadreq_lock(kqwl->kqwl_p);
5572 	}
5573 
5574 	if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
5575 		free_ts = ts;
5576 		ts = kqwl->kqwl_turnstile;
5577 	} else {
5578 		ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
5579 		    ts, TURNSTILE_WORKLOOPS);
5580 
5581 		/* release-barrier to pair with the unlocked load of kqwl_turnstile above */
5582 		os_atomic_or(&kqwl->kqwl_state, KQ_HAS_TURNSTILE, release);
5583 
5584 		if (filt_wlturnstile_interlock_is_workq(kqwl)) {
5585 			workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
5586 			    &kqwl->kqwl_request, kqwl->kqwl_owner,
5587 			    ts, TURNSTILE_IMMEDIATE_UPDATE);
5588 			/*
5589 			 * The workq may no longer be the interlock after this.
5590 			 * In which case the inheritor wasn't updated.
5591 			 */
5592 		}
5593 		if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
5594 			filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
5595 		}
5596 	}
5597 
5598 	if (workq_locked) {
5599 		workq_kern_threadreq_unlock(kqwl->kqwl_p);
5600 	}
5601 
5602 	kqunlock(kqu);
5603 
5604 	if (free_ts) {
5605 		turnstile_deallocate(free_ts);
5606 	} else {
5607 		turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
5608 	}
5609 	return ts;
5610 }
5611 
5612 __attribute__((always_inline))
5613 struct turnstile *
kqueue_turnstile(kqueue_t kqu)5614 kqueue_turnstile(kqueue_t kqu)
5615 {
5616 	kq_state_t kq_state = os_atomic_load(&kqu.kq->kq_state, relaxed);
5617 	if (kq_state & KQ_WORKLOOP) {
5618 		return os_atomic_load(&kqu.kqwl->kqwl_turnstile, relaxed);
5619 	}
5620 	return TURNSTILE_NULL;
5621 }
5622 
5623 __attribute__((always_inline))
5624 struct turnstile *
kqueue_threadreq_get_turnstile(workq_threadreq_t kqr)5625 kqueue_threadreq_get_turnstile(workq_threadreq_t kqr)
5626 {
5627 	struct kqworkloop *kqwl = kqr_kqworkloop(kqr);
5628 	if (kqwl) {
5629 		return os_atomic_load(&kqwl->kqwl_turnstile, relaxed);
5630 	}
5631 	return TURNSTILE_NULL;
5632 }
5633 
5634 static void
kqworkloop_set_overcommit(struct kqworkloop * kqwl)5635 kqworkloop_set_overcommit(struct kqworkloop *kqwl)
5636 {
5637 	workq_threadreq_t kqr = &kqwl->kqwl_request;
5638 
5639 	/*
5640 	 * This test is racy, but since we never remove this bit,
5641 	 * it allows us to avoid taking a lock.
5642 	 */
5643 	if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
5644 		return;
5645 	}
5646 
5647 	kqlock_held(kqwl);
5648 
5649 	if (kqr_thread_requested_pending(kqr)) {
5650 		kqueue_threadreq_modify(kqwl, kqr, kqr->tr_qos,
5651 		    WORKQ_THREADREQ_MAKE_OVERCOMMIT);
5652 	} else {
5653 		kqr->tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
5654 	}
5655 }
5656 
5657 static void
kqworkq_update_override(struct kqworkq * kqwq,struct knote * kn,kq_index_t override_index)5658 kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn,
5659     kq_index_t override_index)
5660 {
5661 	workq_threadreq_t kqr;
5662 	kq_index_t old_override_index;
5663 	kq_index_t queue_index = kn->kn_qos_index;
5664 
5665 	if (override_index <= queue_index) {
5666 		return;
5667 	}
5668 
5669 	kqr = kqworkq_get_request(kqwq, queue_index);
5670 
5671 	kqlock_held(kqwq);
5672 
5673 	old_override_index = kqr->tr_kq_override_index;
5674 	if (override_index > MAX(kqr->tr_kq_qos_index, old_override_index)) {
5675 		thread_t servicer = kqr_thread(kqr);
5676 		kqr->tr_kq_override_index = override_index;
5677 
5678 		/* apply the override to [incoming?] servicing thread */
5679 		if (servicer) {
5680 			if (old_override_index) {
5681 				thread_update_kevent_override(servicer, override_index);
5682 			} else {
5683 				thread_add_kevent_override(servicer, override_index);
5684 			}
5685 		}
5686 	}
5687 }
5688 
5689 static void
kqueue_update_iotier_override(kqueue_t kqu)5690 kqueue_update_iotier_override(kqueue_t kqu)
5691 {
5692 	if (kqu.kq->kq_state & KQ_WORKLOOP) {
5693 		kqworkloop_update_iotier_override(kqu.kqwl);
5694 	}
5695 }
5696 
5697 static void
kqueue_update_override(kqueue_t kqu,struct knote * kn,thread_qos_t qos)5698 kqueue_update_override(kqueue_t kqu, struct knote *kn, thread_qos_t qos)
5699 {
5700 	if (kqu.kq->kq_state & KQ_WORKLOOP) {
5701 		kqworkloop_update_threads_qos(kqu.kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
5702 		    qos);
5703 	} else {
5704 		kqworkq_update_override(kqu.kqwq, kn, qos);
5705 	}
5706 }
5707 
5708 static void
kqworkloop_unbind_locked(struct kqworkloop * kqwl,thread_t thread,enum kqwl_unbind_locked_mode how)5709 kqworkloop_unbind_locked(struct kqworkloop *kqwl, thread_t thread,
5710     enum kqwl_unbind_locked_mode how)
5711 {
5712 	struct uthread *ut = get_bsdthread_info(thread);
5713 	workq_threadreq_t kqr = &kqwl->kqwl_request;
5714 
5715 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND), kqwl->kqwl_dynamicid,
5716 	    thread_tid(thread), 0, 0);
5717 
5718 	kqlock_held(kqwl);
5719 
5720 	assert(ut->uu_kqr_bound == kqr);
5721 	ut->uu_kqr_bound = NULL;
5722 	if (how == KQWL_OVERRIDE_DROP_IMMEDIATELY &&
5723 	    ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
5724 		thread_drop_servicer_override(thread);
5725 		ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
5726 	}
5727 
5728 	if (kqwl->kqwl_owner == NULL && kqwl->kqwl_turnstile) {
5729 		turnstile_update_inheritor(kqwl->kqwl_turnstile,
5730 		    TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
5731 		turnstile_update_inheritor_complete(kqwl->kqwl_turnstile,
5732 		    TURNSTILE_INTERLOCK_HELD);
5733 	}
5734 
5735 #if CONFIG_PREADOPT_TG
5736 	/* The kqueue is able to adopt a thread group again */
5737 
5738 	thread_group_qos_t old_tg, new_tg = NULL;
5739 	int ret = os_atomic_rmw_loop(kqr_preadopt_thread_group_addr(kqr), old_tg, new_tg, relaxed, {
5740 		new_tg = old_tg;
5741 		if (old_tg == KQWL_PREADOPTED_TG_SENTINEL || old_tg == KQWL_PREADOPTED_TG_PROCESSED) {
5742 		        new_tg = KQWL_PREADOPTED_TG_NULL;
5743 		}
5744 	});
5745 	KQWL_PREADOPT_TG_HISTORY_WRITE_ENTRY(kqwl, KQWL_PREADOPT_OP_SERVICER_UNBIND, old_tg, KQWL_PREADOPTED_TG_NULL);
5746 
5747 	if (ret) {
5748 		// Servicer can drop any preadopt thread group it has since it has
5749 		// unbound.
5750 		thread_set_preadopt_thread_group(thread, NULL);
5751 	}
5752 #endif
5753 	thread_update_servicer_iotier_override(thread, THROTTLE_LEVEL_END);
5754 
5755 	kqr->tr_thread = THREAD_NULL;
5756 	kqr->tr_state = WORKQ_TR_STATE_IDLE;
5757 	kqwl->kqwl_state &= ~KQ_R2K_ARMED;
5758 }
5759 
5760 static void
kqworkloop_unbind_delayed_override_drop(thread_t thread)5761 kqworkloop_unbind_delayed_override_drop(thread_t thread)
5762 {
5763 	struct uthread *ut = get_bsdthread_info(thread);
5764 	assert(ut->uu_kqr_bound == NULL);
5765 	if (ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
5766 		thread_drop_servicer_override(thread);
5767 		ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
5768 	}
5769 }
5770 
5771 /*
5772  *	kqworkloop_unbind - Unbind the servicer thread of a workloop kqueue
5773  *
5774  *	It will acknowledge events, and possibly request a new thread if:
5775  *	- there were active events left
5776  *	- we pended waitq hook callouts during processing
5777  *	- we pended wakeups while processing (or unsuppressing)
5778  *
5779  *	Called with kqueue lock held.
5780  */
5781 static void
kqworkloop_unbind(struct kqworkloop * kqwl)5782 kqworkloop_unbind(struct kqworkloop *kqwl)
5783 {
5784 	struct kqueue *kq = &kqwl->kqwl_kqueue;
5785 	workq_threadreq_t kqr = &kqwl->kqwl_request;
5786 	thread_t thread = kqr_thread_fast(kqr);
5787 	int op = KQWL_UTQ_PARKING;
5788 	kq_index_t qos_override = THREAD_QOS_UNSPECIFIED;
5789 
5790 	assert(thread == current_thread());
5791 
5792 	kqlock(kqwl);
5793 
5794 	/*
5795 	 * Forcing the KQ_PROCESSING flag allows for QoS updates because of
5796 	 * unsuppressing knotes not to be applied until the eventual call to
5797 	 * kqworkloop_update_threads_qos() below.
5798 	 */
5799 	assert((kq->kq_state & KQ_PROCESSING) == 0);
5800 	if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
5801 		kq->kq_state |= KQ_PROCESSING;
5802 		qos_override = kqworkloop_acknowledge_events(kqwl);
5803 		kq->kq_state &= ~KQ_PROCESSING;
5804 	}
5805 
5806 	kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
5807 	kqworkloop_update_threads_qos(kqwl, op, qos_override);
5808 
5809 	kqunlock(kqwl);
5810 
5811 	/*
5812 	 * Drop the override on the current thread last, after the call to
5813 	 * kqworkloop_update_threads_qos above.
5814 	 */
5815 	kqworkloop_unbind_delayed_override_drop(thread);
5816 
5817 	/* If last reference, dealloc the workloop kq */
5818 	kqworkloop_release(kqwl);
5819 }
5820 
5821 static thread_qos_t
kqworkq_unbind_locked(struct kqworkq * kqwq,workq_threadreq_t kqr,thread_t thread)5822 kqworkq_unbind_locked(struct kqworkq *kqwq,
5823     workq_threadreq_t kqr, thread_t thread)
5824 {
5825 	struct uthread *ut = get_bsdthread_info(thread);
5826 	kq_index_t old_override = kqr->tr_kq_override_index;
5827 
5828 	KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND), -1,
5829 	    thread_tid(kqr_thread(kqr)), kqr->tr_kq_qos_index, 0);
5830 
5831 	kqlock_held(kqwq);
5832 
5833 	assert(ut->uu_kqr_bound == kqr);
5834 	ut->uu_kqr_bound = NULL;
5835 	kqr->tr_thread = THREAD_NULL;
5836 	kqr->tr_state = WORKQ_TR_STATE_IDLE;
5837 	kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
5838 	kqwq->kqwq_state &= ~KQ_R2K_ARMED;
5839 
5840 	return old_override;
5841 }
5842 
5843 /*
5844  *	kqworkq_unbind - unbind of a workq kqueue from a thread
5845  *
5846  *	We may have to request new threads.
5847  *	This can happen there are no waiting processing threads and:
5848  *	- there were active events we never got to (count > 0)
5849  *	- we pended waitq hook callouts during processing
5850  *	- we pended wakeups while processing (or unsuppressing)
5851  */
5852 static void
kqworkq_unbind(proc_t p,workq_threadreq_t kqr)5853 kqworkq_unbind(proc_t p, workq_threadreq_t kqr)
5854 {
5855 	struct kqworkq *kqwq = (struct kqworkq *)p->p_fd.fd_wqkqueue;
5856 	__assert_only int rc;
5857 
5858 	kqlock(kqwq);
5859 	rc = kqworkq_acknowledge_events(kqwq, kqr, 0, KQWQAE_UNBIND);
5860 	assert(rc == -1);
5861 	kqunlock(kqwq);
5862 }
5863 
5864 workq_threadreq_t
kqworkq_get_request(struct kqworkq * kqwq,kq_index_t qos_index)5865 kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
5866 {
5867 	assert(qos_index > 0 && qos_index <= KQWQ_NBUCKETS);
5868 	return &kqwq->kqwq_request[qos_index - 1];
5869 }
5870 
5871 static void
knote_reset_priority(kqueue_t kqu,struct knote * kn,pthread_priority_t pp)5872 knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp)
5873 {
5874 	kq_index_t qos = _pthread_priority_thread_qos(pp);
5875 
5876 	if (kqu.kq->kq_state & KQ_WORKLOOP) {
5877 		assert((pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) == 0);
5878 		pp = _pthread_priority_normalize(pp);
5879 	} else if (kqu.kq->kq_state & KQ_WORKQ) {
5880 		if (qos == THREAD_QOS_UNSPECIFIED) {
5881 			/* On workqueues, outside of QoS means MANAGER */
5882 			qos = KQWQ_QOS_MANAGER;
5883 			pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
5884 		} else {
5885 			pp = _pthread_priority_normalize(pp);
5886 		}
5887 	} else {
5888 		pp = _pthread_unspecified_priority();
5889 		qos = THREAD_QOS_UNSPECIFIED;
5890 	}
5891 
5892 	kn->kn_qos = (int32_t)pp;
5893 
5894 	if ((kn->kn_status & KN_MERGE_QOS) == 0 || qos > kn->kn_qos_override) {
5895 		/* Never lower QoS when in "Merge" mode */
5896 		kn->kn_qos_override = qos;
5897 	}
5898 
5899 	/* only adjust in-use qos index when not suppressed */
5900 	if (kn->kn_status & KN_SUPPRESSED) {
5901 		kqueue_update_override(kqu, kn, qos);
5902 	} else if (kn->kn_qos_index != qos) {
5903 		knote_dequeue(kqu, kn);
5904 		kn->kn_qos_index = qos;
5905 	}
5906 }
5907 
5908 static void
knote_adjust_qos(struct kqueue * kq,struct knote * kn,int result)5909 knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result)
5910 {
5911 	thread_qos_t qos_index = (result >> FILTER_ADJUST_EVENT_QOS_SHIFT) & 7;
5912 
5913 	kqlock_held(kq);
5914 
5915 	assert(result & FILTER_ADJUST_EVENT_QOS_BIT);
5916 	assert(qos_index < THREAD_QOS_LAST);
5917 
5918 	/*
5919 	 * Early exit for knotes that should not change QoS
5920 	 */
5921 	if (__improbable(!knote_fops(kn)->f_adjusts_qos)) {
5922 		panic("filter %d cannot change QoS", kn->kn_filtid);
5923 	} else if (__improbable(!knote_has_qos(kn))) {
5924 		return;
5925 	}
5926 
5927 	/*
5928 	 * knotes with the FALLBACK flag will only use their registration QoS if the
5929 	 * incoming event has no QoS, else, the registration QoS acts as a floor.
5930 	 */
5931 	thread_qos_t req_qos = _pthread_priority_thread_qos_fast(kn->kn_qos);
5932 	if (kn->kn_qos & _PTHREAD_PRIORITY_FALLBACK_FLAG) {
5933 		if (qos_index == THREAD_QOS_UNSPECIFIED) {
5934 			qos_index = req_qos;
5935 		}
5936 	} else {
5937 		if (qos_index < req_qos) {
5938 			qos_index = req_qos;
5939 		}
5940 	}
5941 	if ((kn->kn_status & KN_MERGE_QOS) && (qos_index < kn->kn_qos_override)) {
5942 		/* Never lower QoS when in "Merge" mode */
5943 		return;
5944 	}
5945 
5946 	if ((kn->kn_status & KN_LOCKED) && (kn->kn_status & KN_POSTING)) {
5947 		/*
5948 		 * When we're trying to update the QoS override and that both an
5949 		 * f_event() and other f_* calls are running concurrently, any of these
5950 		 * in flight calls may want to perform overrides that aren't properly
5951 		 * serialized with each other.
5952 		 *
5953 		 * The first update that observes this racy situation enters a "Merge"
5954 		 * mode which causes subsequent override requests to saturate the
5955 		 * override instead of replacing its value.
5956 		 *
5957 		 * This mode is left when knote_unlock() or knote_post()
5958 		 * observe that no other f_* routine is in flight.
5959 		 */
5960 		kn->kn_status |= KN_MERGE_QOS;
5961 	}
5962 
5963 	/*
5964 	 * Now apply the override if it changed.
5965 	 */
5966 
5967 	if (kn->kn_qos_override == qos_index) {
5968 		return;
5969 	}
5970 
5971 	kn->kn_qos_override = qos_index;
5972 
5973 	if (kn->kn_status & KN_SUPPRESSED) {
5974 		/*
5975 		 * For suppressed events, the kn_qos_index field cannot be touched as it
5976 		 * allows us to know on which supress queue the knote is for a kqworkq.
5977 		 *
5978 		 * Also, there's no natural push applied on the kqueues when this field
5979 		 * changes anyway. We hence need to apply manual overrides in this case,
5980 		 * which will be cleared when the events are later acknowledged.
5981 		 */
5982 		kqueue_update_override(kq, kn, qos_index);
5983 	} else if (kn->kn_qos_index != qos_index) {
5984 		knote_dequeue(kq, kn);
5985 		kn->kn_qos_index = qos_index;
5986 	}
5987 }
5988 
5989 void
klist_init(struct klist * list)5990 klist_init(struct klist *list)
5991 {
5992 	SLIST_INIT(list);
5993 }
5994 
5995 
5996 /*
5997  * Query/Post each knote in the object's list
5998  *
5999  *	The object lock protects the list. It is assumed
6000  *	that the filter/event routine for the object can
6001  *	determine that the object is already locked (via
6002  *	the hint) and not deadlock itself.
6003  *
6004  *	The object lock should also hold off pending
6005  *	detach/drop operations.
6006  */
6007 void
knote(struct klist * list,long hint)6008 knote(struct klist *list, long hint)
6009 {
6010 	struct knote *kn;
6011 
6012 	SLIST_FOREACH(kn, list, kn_selnext) {
6013 		knote_post(kn, hint);
6014 	}
6015 }
6016 
6017 /*
6018  * attach a knote to the specified list.  Return true if this is the first entry.
6019  * The list is protected by whatever lock the object it is associated with uses.
6020  */
6021 int
knote_attach(struct klist * list,struct knote * kn)6022 knote_attach(struct klist *list, struct knote *kn)
6023 {
6024 	int ret = SLIST_EMPTY(list);
6025 	SLIST_INSERT_HEAD(list, kn, kn_selnext);
6026 	return ret;
6027 }
6028 
6029 /*
6030  * detach a knote from the specified list.  Return true if that was the last entry.
6031  * The list is protected by whatever lock the object it is associated with uses.
6032  */
6033 int
knote_detach(struct klist * list,struct knote * kn)6034 knote_detach(struct klist *list, struct knote *kn)
6035 {
6036 	SLIST_REMOVE(list, kn, knote, kn_selnext);
6037 	return SLIST_EMPTY(list);
6038 }
6039 
6040 /*
6041  * knote_vanish - Indicate that the source has vanished
6042  *
6043  * If the knote has requested EV_VANISHED delivery,
6044  * arrange for that. Otherwise, deliver a NOTE_REVOKE
6045  * event for backward compatibility.
6046  *
6047  * The knote is marked as having vanished, but is not
6048  * actually detached from the source in this instance.
6049  * The actual detach is deferred until the knote drop.
6050  *
6051  * Our caller already has the object lock held. Calling
6052  * the detach routine would try to take that lock
6053  * recursively - which likely is not supported.
6054  */
6055 void
knote_vanish(struct klist * list,bool make_active)6056 knote_vanish(struct klist *list, bool make_active)
6057 {
6058 	struct knote *kn;
6059 	struct knote *kn_next;
6060 
6061 	SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
6062 		struct kqueue *kq = knote_get_kq(kn);
6063 
6064 		kqlock(kq);
6065 		if (__probable(kn->kn_status & KN_REQVANISH)) {
6066 			/*
6067 			 * If EV_VANISH supported - prepare to deliver one
6068 			 */
6069 			kn->kn_status |= KN_VANISHED;
6070 		} else {
6071 			/*
6072 			 * Handle the legacy way to indicate that the port/portset was
6073 			 * deallocated or left the current Mach portspace (modern technique
6074 			 * is with an EV_VANISHED protocol).
6075 			 *
6076 			 * Deliver an EV_EOF event for these changes (hopefully it will get
6077 			 * delivered before the port name recycles to the same generation
6078 			 * count and someone tries to re-register a kevent for it or the
6079 			 * events are udata-specific - avoiding a conflict).
6080 			 */
6081 			kn->kn_flags |= EV_EOF | EV_ONESHOT;
6082 		}
6083 		if (make_active) {
6084 			knote_activate(kq, kn, FILTER_ACTIVE);
6085 		}
6086 		kqunlock(kq);
6087 	}
6088 }
6089 
6090 /*
6091  * remove all knotes referencing a specified fd
6092  *
6093  * Entered with the proc_fd lock already held.
6094  * It returns the same way, but may drop it temporarily.
6095  */
6096 void
knote_fdclose(struct proc * p,int fd)6097 knote_fdclose(struct proc *p, int fd)
6098 {
6099 	struct filedesc *fdt = &p->p_fd;
6100 	struct klist *list;
6101 	struct knote *kn;
6102 	KNOTE_LOCK_CTX(knlc);
6103 
6104 restart:
6105 	list = &fdt->fd_knlist[fd];
6106 	SLIST_FOREACH(kn, list, kn_link) {
6107 		struct kqueue *kq = knote_get_kq(kn);
6108 
6109 		kqlock(kq);
6110 
6111 		if (kq->kq_p != p) {
6112 			panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
6113 			    __func__, kq->kq_p, p);
6114 		}
6115 
6116 		/*
6117 		 * If the knote supports EV_VANISHED delivery,
6118 		 * transition it to vanished mode (or skip over
6119 		 * it if already vanished).
6120 		 */
6121 		if (kn->kn_status & KN_VANISHED) {
6122 			kqunlock(kq);
6123 			continue;
6124 		}
6125 
6126 		proc_fdunlock(p);
6127 		if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
6128 			/* the knote was dropped by someone, nothing to do */
6129 		} else if (kn->kn_status & KN_REQVANISH) {
6130 			kn->kn_status |= KN_VANISHED;
6131 
6132 			kqunlock(kq);
6133 			knote_fops(kn)->f_detach(kn);
6134 			if (kn->kn_is_fd) {
6135 				fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0);
6136 			}
6137 			kn->kn_filtid = EVFILTID_DETACHED;
6138 			kqlock(kq);
6139 
6140 			knote_activate(kq, kn, FILTER_ACTIVE);
6141 			knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
6142 		} else {
6143 			knote_drop(kq, kn, &knlc);
6144 		}
6145 
6146 		proc_fdlock(p);
6147 		goto restart;
6148 	}
6149 }
6150 
6151 /*
6152  * knote_fdfind - lookup a knote in the fd table for process
6153  *
6154  * If the filter is file-based, lookup based on fd index.
6155  * Otherwise use a hash based on the ident.
6156  *
6157  * Matching is based on kq, filter, and ident. Optionally,
6158  * it may also be based on the udata field in the kevent -
6159  * allowing multiple event registration for the file object
6160  * per kqueue.
6161  *
6162  * fd_knhashlock or fdlock held on entry (and exit)
6163  */
6164 static struct knote *
knote_fdfind(struct kqueue * kq,const struct kevent_internal_s * kev,bool is_fd,struct proc * p)6165 knote_fdfind(struct kqueue *kq,
6166     const struct kevent_internal_s *kev,
6167     bool is_fd,
6168     struct proc *p)
6169 {
6170 	struct filedesc *fdp = &p->p_fd;
6171 	struct klist *list = NULL;
6172 	struct knote *kn = NULL;
6173 
6174 	/*
6175 	 * determine where to look for the knote
6176 	 */
6177 	if (is_fd) {
6178 		/* fd-based knotes are linked off the fd table */
6179 		if (kev->kei_ident < (u_int)fdp->fd_knlistsize) {
6180 			list = &fdp->fd_knlist[kev->kei_ident];
6181 		}
6182 	} else if (fdp->fd_knhashmask != 0) {
6183 		/* hash non-fd knotes here too */
6184 		list = &fdp->fd_knhash[KN_HASH((u_long)kev->kei_ident, fdp->fd_knhashmask)];
6185 	}
6186 
6187 	/*
6188 	 * scan the selected list looking for a match
6189 	 */
6190 	if (list != NULL) {
6191 		SLIST_FOREACH(kn, list, kn_link) {
6192 			if (kq == knote_get_kq(kn) &&
6193 			    kev->kei_ident == kn->kn_id &&
6194 			    kev->kei_filter == kn->kn_filter) {
6195 				if (kev->kei_flags & EV_UDATA_SPECIFIC) {
6196 					if ((kn->kn_flags & EV_UDATA_SPECIFIC) &&
6197 					    kev->kei_udata == kn->kn_udata) {
6198 						break; /* matching udata-specific knote */
6199 					}
6200 				} else if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0) {
6201 					break; /* matching non-udata-specific knote */
6202 				}
6203 			}
6204 		}
6205 	}
6206 	return kn;
6207 }
6208 
6209 /*
6210  * kq_add_knote- Add knote to the fd table for process
6211  * while checking for duplicates.
6212  *
6213  * All file-based filters associate a list of knotes by file
6214  * descriptor index. All other filters hash the knote by ident.
6215  *
6216  * May have to grow the table of knote lists to cover the
6217  * file descriptor index presented.
6218  *
6219  * fd_knhashlock and fdlock unheld on entry (and exit).
6220  *
6221  * Takes a rwlock boost if inserting the knote is successful.
6222  */
6223 static int
kq_add_knote(struct kqueue * kq,struct knote * kn,struct knote_lock_ctx * knlc,struct proc * p)6224 kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc,
6225     struct proc *p)
6226 {
6227 	struct filedesc *fdp = &p->p_fd;
6228 	struct klist *list = NULL;
6229 	int ret = 0;
6230 	bool is_fd = kn->kn_is_fd;
6231 
6232 	if (is_fd) {
6233 		proc_fdlock(p);
6234 	} else {
6235 		knhash_lock(fdp);
6236 	}
6237 
6238 	if (knote_fdfind(kq, &kn->kn_kevent, is_fd, p) != NULL) {
6239 		/* found an existing knote: we can't add this one */
6240 		ret = ERESTART;
6241 		goto out_locked;
6242 	}
6243 
6244 	/* knote was not found: add it now */
6245 	if (!is_fd) {
6246 		if (fdp->fd_knhashmask == 0) {
6247 			u_long size = 0;
6248 
6249 			list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE, &size);
6250 			if (list == NULL) {
6251 				ret = ENOMEM;
6252 				goto out_locked;
6253 			}
6254 
6255 			fdp->fd_knhash = list;
6256 			fdp->fd_knhashmask = size;
6257 		}
6258 
6259 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
6260 		SLIST_INSERT_HEAD(list, kn, kn_link);
6261 		ret = 0;
6262 		goto out_locked;
6263 	} else {
6264 		/* knote is fd based */
6265 
6266 		if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
6267 			u_int size = 0;
6268 
6269 			/* Make sure that fd stays below current process's soft limit AND system allowed per-process limits */
6270 			if (kn->kn_id >= (uint64_t)proc_limitgetcur_nofile(p)) {
6271 				ret = EINVAL;
6272 				goto out_locked;
6273 			}
6274 			/* have to grow the fd_knlist */
6275 			size = fdp->fd_knlistsize;
6276 			while (size <= kn->kn_id) {
6277 				size += KQEXTENT;
6278 			}
6279 
6280 			if (size >= (UINT_MAX / sizeof(struct klist))) {
6281 				ret = EINVAL;
6282 				goto out_locked;
6283 			}
6284 
6285 			list = kalloc_type(struct klist, size, Z_WAITOK | Z_ZERO);
6286 			if (list == NULL) {
6287 				ret = ENOMEM;
6288 				goto out_locked;
6289 			}
6290 
6291 			bcopy(fdp->fd_knlist, list,
6292 			    fdp->fd_knlistsize * sizeof(struct klist));
6293 			kfree_type(struct klist, fdp->fd_knlistsize, fdp->fd_knlist);
6294 			fdp->fd_knlist = list;
6295 			fdp->fd_knlistsize = size;
6296 		}
6297 
6298 		list = &fdp->fd_knlist[kn->kn_id];
6299 		SLIST_INSERT_HEAD(list, kn, kn_link);
6300 		ret = 0;
6301 		goto out_locked;
6302 	}
6303 
6304 out_locked:
6305 	if (ret == 0) {
6306 		kqlock(kq);
6307 		assert((kn->kn_status & KN_LOCKED) == 0);
6308 		(void)knote_lock(kq, kn, knlc, KNOTE_KQ_UNLOCK);
6309 		kqueue_retain(kq); /* retain a kq ref */
6310 	}
6311 	if (is_fd) {
6312 		proc_fdunlock(p);
6313 	} else {
6314 		knhash_unlock(fdp);
6315 	}
6316 
6317 	return ret;
6318 }
6319 
6320 /*
6321  * kq_remove_knote - remove a knote from the fd table for process
6322  *
6323  * If the filter is file-based, remove based on fd index.
6324  * Otherwise remove from the hash based on the ident.
6325  *
6326  * fd_knhashlock and fdlock unheld on entry (and exit).
6327  */
6328 static void
kq_remove_knote(struct kqueue * kq,struct knote * kn,struct proc * p,struct knote_lock_ctx * knlc)6329 kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p,
6330     struct knote_lock_ctx *knlc)
6331 {
6332 	struct filedesc *fdp = &p->p_fd;
6333 	struct klist *list = NULL;
6334 	uint16_t kq_state;
6335 	bool is_fd = kn->kn_is_fd;
6336 
6337 	if (is_fd) {
6338 		proc_fdlock(p);
6339 	} else {
6340 		knhash_lock(fdp);
6341 	}
6342 
6343 	if (is_fd) {
6344 		assert((u_int)fdp->fd_knlistsize > kn->kn_id);
6345 		list = &fdp->fd_knlist[kn->kn_id];
6346 	} else {
6347 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
6348 	}
6349 	SLIST_REMOVE(list, kn, knote, kn_link);
6350 
6351 	kqlock(kq);
6352 
6353 	/* Update the servicer iotier override */
6354 	kqueue_update_iotier_override(kq);
6355 
6356 	kq_state = kq->kq_state;
6357 	if (knlc) {
6358 		knote_unlock_cancel(kq, kn, knlc);
6359 	} else {
6360 		kqunlock(kq);
6361 	}
6362 	if (is_fd) {
6363 		proc_fdunlock(p);
6364 	} else {
6365 		knhash_unlock(fdp);
6366 	}
6367 
6368 	if (kq_state & KQ_DYNAMIC) {
6369 		kqworkloop_release((struct kqworkloop *)kq);
6370 	}
6371 }
6372 
6373 /*
6374  * kq_find_knote_and_kq_lock - lookup a knote in the fd table for process
6375  * and, if the knote is found, acquires the kqlock while holding the fd table lock/spinlock.
6376  *
6377  * fd_knhashlock or fdlock unheld on entry (and exit)
6378  */
6379 
6380 static struct knote *
kq_find_knote_and_kq_lock(struct kqueue * kq,struct kevent_qos_s * kev,bool is_fd,struct proc * p)6381 kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_qos_s *kev,
6382     bool is_fd, struct proc *p)
6383 {
6384 	struct filedesc *fdp = &p->p_fd;
6385 	struct knote *kn;
6386 
6387 	if (is_fd) {
6388 		proc_fdlock(p);
6389 	} else {
6390 		knhash_lock(fdp);
6391 	}
6392 
6393 	/*
6394 	 * Temporary horrible hack:
6395 	 * this cast is gross and will go away in a future change.
6396 	 * It is OK to do because we don't look at xflags/s_fflags,
6397 	 * and that when we cast down the kev this way,
6398 	 * the truncated filter field works.
6399 	 */
6400 	kn = knote_fdfind(kq, (struct kevent_internal_s *)kev, is_fd, p);
6401 
6402 	if (kn) {
6403 		kqlock(kq);
6404 		assert(knote_get_kq(kn) == kq);
6405 	}
6406 
6407 	if (is_fd) {
6408 		proc_fdunlock(p);
6409 	} else {
6410 		knhash_unlock(fdp);
6411 	}
6412 
6413 	return kn;
6414 }
6415 
6416 static struct kqtailq *
knote_get_tailq(kqueue_t kqu,struct knote * kn)6417 knote_get_tailq(kqueue_t kqu, struct knote *kn)
6418 {
6419 	kq_index_t qos_index = kn->kn_qos_index;
6420 
6421 	if (kqu.kq->kq_state & KQ_WORKLOOP) {
6422 		assert(qos_index > 0 && qos_index <= KQWL_NBUCKETS);
6423 		return &kqu.kqwl->kqwl_queue[qos_index - 1];
6424 	} else if (kqu.kq->kq_state & KQ_WORKQ) {
6425 		assert(qos_index > 0 && qos_index <= KQWQ_NBUCKETS);
6426 		return &kqu.kqwq->kqwq_queue[qos_index - 1];
6427 	} else {
6428 		assert(qos_index == QOS_INDEX_KQFILE);
6429 		return &kqu.kqf->kqf_queue;
6430 	}
6431 }
6432 
6433 static void
knote_enqueue(kqueue_t kqu,struct knote * kn)6434 knote_enqueue(kqueue_t kqu, struct knote *kn)
6435 {
6436 	kqlock_held(kqu);
6437 
6438 	if ((kn->kn_status & KN_ACTIVE) == 0) {
6439 		return;
6440 	}
6441 
6442 	if (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING | KN_QUEUED)) {
6443 		return;
6444 	}
6445 
6446 	struct kqtailq *queue = knote_get_tailq(kqu, kn);
6447 	bool wakeup = TAILQ_EMPTY(queue);
6448 
6449 	TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
6450 	kn->kn_status |= KN_QUEUED;
6451 	kqu.kq->kq_count++;
6452 
6453 	if (wakeup) {
6454 		if (kqu.kq->kq_state & KQ_WORKLOOP) {
6455 			kqworkloop_wakeup(kqu.kqwl, kn->kn_qos_index);
6456 		} else if (kqu.kq->kq_state & KQ_WORKQ) {
6457 			kqworkq_wakeup(kqu.kqwq, kn->kn_qos_index);
6458 		} else {
6459 			kqfile_wakeup(kqu.kqf, 0, THREAD_AWAKENED);
6460 		}
6461 	}
6462 }
6463 
6464 __attribute__((always_inline))
6465 static inline void
knote_dequeue(kqueue_t kqu,struct knote * kn)6466 knote_dequeue(kqueue_t kqu, struct knote *kn)
6467 {
6468 	if (kn->kn_status & KN_QUEUED) {
6469 		struct kqtailq *queue = knote_get_tailq(kqu, kn);
6470 
6471 		// attaching the knote calls knote_reset_priority() without
6472 		// the kqlock which is fine, so we can't call kqlock_held()
6473 		// if we're not queued.
6474 		kqlock_held(kqu);
6475 
6476 		TAILQ_REMOVE(queue, kn, kn_tqe);
6477 		kn->kn_status &= ~KN_QUEUED;
6478 		kqu.kq->kq_count--;
6479 		if ((kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) {
6480 			assert((kqu.kq->kq_count == 0) ==
6481 			    (bool)TAILQ_EMPTY(queue));
6482 		}
6483 	}
6484 }
6485 
6486 /* called with kqueue lock held */
6487 static void
knote_suppress(kqueue_t kqu,struct knote * kn)6488 knote_suppress(kqueue_t kqu, struct knote *kn)
6489 {
6490 	struct kqtailq *suppressq;
6491 
6492 	kqlock_held(kqu);
6493 
6494 	assert((kn->kn_status & KN_SUPPRESSED) == 0);
6495 	assert(kn->kn_status & KN_QUEUED);
6496 
6497 	knote_dequeue(kqu, kn);
6498 	/* deactivate - so new activations indicate a wakeup */
6499 	kn->kn_status &= ~KN_ACTIVE;
6500 	kn->kn_status |= KN_SUPPRESSED;
6501 	suppressq = kqueue_get_suppressed_queue(kqu, kn);
6502 	TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
6503 }
6504 
6505 __attribute__((always_inline))
6506 static inline void
knote_unsuppress_noqueue(kqueue_t kqu,struct knote * kn)6507 knote_unsuppress_noqueue(kqueue_t kqu, struct knote *kn)
6508 {
6509 	struct kqtailq *suppressq;
6510 
6511 	kqlock_held(kqu);
6512 
6513 	assert(kn->kn_status & KN_SUPPRESSED);
6514 
6515 	kn->kn_status &= ~KN_SUPPRESSED;
6516 	suppressq = kqueue_get_suppressed_queue(kqu, kn);
6517 	TAILQ_REMOVE(suppressq, kn, kn_tqe);
6518 
6519 	/*
6520 	 * If the knote is no longer active, reset its push,
6521 	 * and resynchronize kn_qos_index with kn_qos_override
6522 	 * for knotes with a real qos.
6523 	 */
6524 	if ((kn->kn_status & KN_ACTIVE) == 0 && knote_has_qos(kn)) {
6525 		kn->kn_qos_override = _pthread_priority_thread_qos_fast(kn->kn_qos);
6526 	}
6527 	kn->kn_qos_index = kn->kn_qos_override;
6528 }
6529 
6530 /* called with kqueue lock held */
6531 static void
knote_unsuppress(kqueue_t kqu,struct knote * kn)6532 knote_unsuppress(kqueue_t kqu, struct knote *kn)
6533 {
6534 	knote_unsuppress_noqueue(kqu, kn);
6535 	knote_enqueue(kqu, kn);
6536 }
6537 
6538 __attribute__((always_inline))
6539 static inline void
knote_mark_active(struct knote * kn)6540 knote_mark_active(struct knote *kn)
6541 {
6542 	if ((kn->kn_status & KN_ACTIVE) == 0) {
6543 		KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_ACTIVATE),
6544 		    kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
6545 		    kn->kn_filtid);
6546 	}
6547 
6548 	kn->kn_status |= KN_ACTIVE;
6549 }
6550 
6551 /* called with kqueue lock held */
6552 static void
knote_activate(kqueue_t kqu,struct knote * kn,int result)6553 knote_activate(kqueue_t kqu, struct knote *kn, int result)
6554 {
6555 	assert(result & FILTER_ACTIVE);
6556 	if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
6557 		// may dequeue the knote
6558 		knote_adjust_qos(kqu.kq, kn, result);
6559 	}
6560 	knote_mark_active(kn);
6561 	knote_enqueue(kqu, kn);
6562 }
6563 
6564 /*
6565  * This function applies changes requested by f_attach or f_touch for
6566  * a given filter. It proceeds in a carefully chosen order to help
6567  * every single transition do the minimal amount of work possible.
6568  */
6569 static void
knote_apply_touch(kqueue_t kqu,struct knote * kn,struct kevent_qos_s * kev,int result)6570 knote_apply_touch(kqueue_t kqu, struct knote *kn, struct kevent_qos_s *kev,
6571     int result)
6572 {
6573 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
6574 		kn->kn_status &= ~KN_DISABLED;
6575 
6576 		/*
6577 		 * it is possible for userland to have knotes registered for a given
6578 		 * workloop `wl_orig` but really handled on another workloop `wl_new`.
6579 		 *
6580 		 * In that case, rearming will happen from the servicer thread of
6581 		 * `wl_new` which if `wl_orig` is no longer being serviced, would cause
6582 		 * this knote to stay suppressed forever if we only relied on
6583 		 * kqworkloop_acknowledge_events to be called by `wl_orig`.
6584 		 *
6585 		 * However if we see the KQ_PROCESSING bit on `wl_orig` set, we can't
6586 		 * unsuppress because that would mess with the processing phase of
6587 		 * `wl_orig`, however it also means kqworkloop_acknowledge_events()
6588 		 * will be called.
6589 		 */
6590 		if (__improbable(kn->kn_status & KN_SUPPRESSED)) {
6591 			if ((kqu.kq->kq_state & KQ_PROCESSING) == 0) {
6592 				knote_unsuppress_noqueue(kqu, kn);
6593 			}
6594 		}
6595 	}
6596 
6597 	if (result & FILTER_ADJUST_EVENT_IOTIER_BIT) {
6598 		kqueue_update_iotier_override(kqu);
6599 	}
6600 
6601 	if ((result & FILTER_UPDATE_REQ_QOS) && kev->qos && kev->qos != kn->kn_qos) {
6602 		// may dequeue the knote
6603 		knote_reset_priority(kqu, kn, kev->qos);
6604 	}
6605 
6606 	/*
6607 	 * When we unsuppress above, or because of knote_reset_priority(),
6608 	 * the knote may have been dequeued, we need to restore the invariant
6609 	 * that if the knote is active it needs to be queued now that
6610 	 * we're done applying changes.
6611 	 */
6612 	if (result & FILTER_ACTIVE) {
6613 		knote_activate(kqu, kn, result);
6614 	} else {
6615 		knote_enqueue(kqu, kn);
6616 	}
6617 
6618 	if ((result & FILTER_THREADREQ_NODEFEER) &&
6619 	    act_clear_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ)) {
6620 		workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
6621 	}
6622 }
6623 
6624 /*
6625  * knote_drop - disconnect and drop the knote
6626  *
6627  * Called with the kqueue locked, returns with the kqueue unlocked.
6628  *
6629  * If a knote locking context is passed, it is canceled.
6630  *
6631  * The knote may have already been detached from
6632  * (or not yet attached to) its source object.
6633  */
6634 static void
knote_drop(struct kqueue * kq,struct knote * kn,struct knote_lock_ctx * knlc)6635 knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc)
6636 {
6637 	struct proc *p = kq->kq_p;
6638 
6639 	kqlock_held(kq);
6640 
6641 	assert((kn->kn_status & KN_DROPPING) == 0);
6642 	if (knlc == NULL) {
6643 		assert((kn->kn_status & KN_LOCKED) == 0);
6644 	}
6645 	kn->kn_status |= KN_DROPPING;
6646 
6647 	if (kn->kn_status & KN_SUPPRESSED) {
6648 		knote_unsuppress_noqueue(kq, kn);
6649 	} else {
6650 		knote_dequeue(kq, kn);
6651 	}
6652 	knote_wait_for_post(kq, kn);
6653 
6654 	knote_fops(kn)->f_detach(kn);
6655 
6656 	/* kq may be freed when kq_remove_knote() returns */
6657 	kq_remove_knote(kq, kn, p, knlc);
6658 	if (kn->kn_is_fd && ((kn->kn_status & KN_VANISHED) == 0)) {
6659 		fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0);
6660 	}
6661 
6662 	knote_free(kn);
6663 }
6664 
6665 void
knote_init(void)6666 knote_init(void)
6667 {
6668 #if CONFIG_MEMORYSTATUS
6669 	/* Initialize the memorystatus list lock */
6670 	memorystatus_kevent_init(&kq_lck_grp, LCK_ATTR_NULL);
6671 #endif
6672 }
6673 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
6674 
6675 const struct filterops *
knote_fops(struct knote * kn)6676 knote_fops(struct knote *kn)
6677 {
6678 	return sysfilt_ops[kn->kn_filtid];
6679 }
6680 
6681 static struct knote *
knote_alloc(void)6682 knote_alloc(void)
6683 {
6684 	return zalloc_flags(knote_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
6685 }
6686 
6687 static void
knote_free(struct knote * kn)6688 knote_free(struct knote *kn)
6689 {
6690 	assert((kn->kn_status & (KN_LOCKED | KN_POSTING)) == 0);
6691 	zfree(knote_zone, kn);
6692 }
6693 
6694 #pragma mark - syscalls: kevent, kevent64, kevent_qos, kevent_id
6695 
6696 kevent_ctx_t
kevent_get_context(thread_t thread)6697 kevent_get_context(thread_t thread)
6698 {
6699 	uthread_t ut = get_bsdthread_info(thread);
6700 	return &ut->uu_save.uus_kevent;
6701 }
6702 
6703 static inline bool
kevent_args_requesting_events(unsigned int flags,int nevents)6704 kevent_args_requesting_events(unsigned int flags, int nevents)
6705 {
6706 	return !(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0;
6707 }
6708 
6709 static inline int
kevent_adjust_flags_for_proc(proc_t p,int flags)6710 kevent_adjust_flags_for_proc(proc_t p, int flags)
6711 {
6712 	__builtin_assume(p);
6713 	return flags | (IS_64BIT_PROCESS(p) ? KEVENT_FLAG_PROC64 : 0);
6714 }
6715 
6716 /*!
6717  * @function kevent_get_kqfile
6718  *
6719  * @brief
6720  * Lookup a kqfile by fd.
6721  *
6722  * @discussion
6723  * Callers: kevent, kevent64, kevent_qos
6724  *
6725  * This is not assumed to be a fastpath (kqfile interfaces are legacy)
6726  */
6727 OS_NOINLINE
6728 static int
kevent_get_kqfile(struct proc * p,int fd,int flags,struct fileproc ** fpp,struct kqueue ** kqp)6729 kevent_get_kqfile(struct proc *p, int fd, int flags,
6730     struct fileproc **fpp, struct kqueue **kqp)
6731 {
6732 	int error = 0;
6733 	struct kqueue *kq;
6734 
6735 	error = fp_get_ftype(p, fd, DTYPE_KQUEUE, EBADF, fpp);
6736 	if (__improbable(error)) {
6737 		return error;
6738 	}
6739 	kq = (struct kqueue *)fp_get_data((*fpp));
6740 
6741 	uint16_t kq_state = os_atomic_load(&kq->kq_state, relaxed);
6742 	if (__improbable((kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) == 0)) {
6743 		kqlock(kq);
6744 		kq_state = kq->kq_state;
6745 		if (!(kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS))) {
6746 			if (flags & KEVENT_FLAG_LEGACY32) {
6747 				kq_state |= KQ_KEV32;
6748 			} else if (flags & KEVENT_FLAG_LEGACY64) {
6749 				kq_state |= KQ_KEV64;
6750 			} else {
6751 				kq_state |= KQ_KEV_QOS;
6752 			}
6753 			kq->kq_state = kq_state;
6754 		}
6755 		kqunlock(kq);
6756 	}
6757 
6758 	/*
6759 	 * kqfiles can't be used through the legacy kevent()
6760 	 * and other interfaces at the same time.
6761 	 */
6762 	if (__improbable((bool)(flags & KEVENT_FLAG_LEGACY32) !=
6763 	    (bool)(kq_state & KQ_KEV32))) {
6764 		fp_drop(p, fd, *fpp, 0);
6765 		return EINVAL;
6766 	}
6767 
6768 	*kqp = kq;
6769 	return 0;
6770 }
6771 
6772 /*!
6773  * @function kevent_get_kqwq
6774  *
6775  * @brief
6776  * Lookup or create the process kqwq (faspath).
6777  *
6778  * @discussion
6779  * Callers: kevent64, kevent_qos
6780  */
6781 OS_ALWAYS_INLINE
6782 static int
kevent_get_kqwq(proc_t p,int flags,int nevents,struct kqueue ** kqp)6783 kevent_get_kqwq(proc_t p, int flags, int nevents, struct kqueue **kqp)
6784 {
6785 	struct kqworkq *kqwq = p->p_fd.fd_wqkqueue;
6786 
6787 	if (__improbable(kevent_args_requesting_events(flags, nevents))) {
6788 		return EINVAL;
6789 	}
6790 	if (__improbable(kqwq == NULL)) {
6791 		kqwq = kqworkq_alloc(p, flags);
6792 		if (__improbable(kqwq == NULL)) {
6793 			return ENOMEM;
6794 		}
6795 	}
6796 
6797 	*kqp = &kqwq->kqwq_kqueue;
6798 	return 0;
6799 }
6800 
6801 #pragma mark kevent copyio
6802 
6803 /*!
6804  * @function kevent_get_data_size
6805  *
6806  * @brief
6807  * Copies in the extra data size from user-space.
6808  */
6809 static int
kevent_get_data_size(int flags,user_addr_t data_avail,user_addr_t data_out,kevent_ctx_t kectx)6810 kevent_get_data_size(int flags, user_addr_t data_avail, user_addr_t data_out,
6811     kevent_ctx_t kectx)
6812 {
6813 	if (!data_avail || !data_out) {
6814 		kectx->kec_data_size  = 0;
6815 		kectx->kec_data_resid = 0;
6816 	} else if (flags & KEVENT_FLAG_PROC64) {
6817 		user64_size_t usize = 0;
6818 		int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
6819 		if (__improbable(error)) {
6820 			return error;
6821 		}
6822 		kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
6823 	} else {
6824 		user32_size_t usize = 0;
6825 		int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
6826 		if (__improbable(error)) {
6827 			return error;
6828 		}
6829 		kectx->kec_data_avail = data_avail;
6830 		kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
6831 	}
6832 	kectx->kec_data_out   = data_out;
6833 	kectx->kec_data_avail = data_avail;
6834 	return 0;
6835 }
6836 
6837 /*!
6838  * @function kevent_put_data_size
6839  *
6840  * @brief
6841  * Copies out the residual data size to user-space if any has been used.
6842  */
6843 static int
kevent_put_data_size(unsigned int flags,kevent_ctx_t kectx)6844 kevent_put_data_size(unsigned int flags, kevent_ctx_t kectx)
6845 {
6846 	if (kectx->kec_data_resid == kectx->kec_data_size) {
6847 		return 0;
6848 	}
6849 	if (flags & KEVENT_FLAG_KERNEL) {
6850 		*(user_size_t *)(uintptr_t)kectx->kec_data_avail = kectx->kec_data_resid;
6851 		return 0;
6852 	}
6853 	if (flags & KEVENT_FLAG_PROC64) {
6854 		user64_size_t usize = (user64_size_t)kectx->kec_data_resid;
6855 		return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
6856 	} else {
6857 		user32_size_t usize = (user32_size_t)kectx->kec_data_resid;
6858 		return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
6859 	}
6860 }
6861 
6862 /*!
6863  * @function kevent_legacy_copyin
6864  *
6865  * @brief
6866  * Handles the copyin of a kevent/kevent64 event.
6867  */
6868 static int
kevent_legacy_copyin(user_addr_t * addrp,struct kevent_qos_s * kevp,unsigned int flags)6869 kevent_legacy_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp, unsigned int flags)
6870 {
6871 	int error;
6872 
6873 	assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
6874 
6875 	if (flags & KEVENT_FLAG_LEGACY64) {
6876 		struct kevent64_s kev64;
6877 
6878 		error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
6879 		if (__improbable(error)) {
6880 			return error;
6881 		}
6882 		*addrp += sizeof(kev64);
6883 		*kevp = (struct kevent_qos_s){
6884 			.ident  = kev64.ident,
6885 			.filter = kev64.filter,
6886 			/* Make sure user doesn't pass in any system flags */
6887 			.flags  = kev64.flags & ~EV_SYSFLAGS,
6888 			.udata  = kev64.udata,
6889 			.fflags = kev64.fflags,
6890 			.data   = kev64.data,
6891 			.ext[0] = kev64.ext[0],
6892 			.ext[1] = kev64.ext[1],
6893 		};
6894 	} else if (flags & KEVENT_FLAG_PROC64) {
6895 		struct user64_kevent kev64;
6896 
6897 		error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
6898 		if (__improbable(error)) {
6899 			return error;
6900 		}
6901 		*addrp += sizeof(kev64);
6902 		*kevp = (struct kevent_qos_s){
6903 			.ident  = kev64.ident,
6904 			.filter = kev64.filter,
6905 			/* Make sure user doesn't pass in any system flags */
6906 			.flags  = kev64.flags & ~EV_SYSFLAGS,
6907 			.udata  = kev64.udata,
6908 			.fflags = kev64.fflags,
6909 			.data   = kev64.data,
6910 		};
6911 	} else {
6912 		struct user32_kevent kev32;
6913 
6914 		error = copyin(*addrp, (caddr_t)&kev32, sizeof(kev32));
6915 		if (__improbable(error)) {
6916 			return error;
6917 		}
6918 		*addrp += sizeof(kev32);
6919 		*kevp = (struct kevent_qos_s){
6920 			.ident  = (uintptr_t)kev32.ident,
6921 			.filter = kev32.filter,
6922 			/* Make sure user doesn't pass in any system flags */
6923 			.flags  = kev32.flags & ~EV_SYSFLAGS,
6924 			.udata  = CAST_USER_ADDR_T(kev32.udata),
6925 			.fflags = kev32.fflags,
6926 			.data   = (intptr_t)kev32.data,
6927 		};
6928 	}
6929 
6930 	return 0;
6931 }
6932 
6933 /*!
6934  * @function kevent_modern_copyin
6935  *
6936  * @brief
6937  * Handles the copyin of a kevent_qos/kevent_id event.
6938  */
6939 static int
kevent_modern_copyin(user_addr_t * addrp,struct kevent_qos_s * kevp)6940 kevent_modern_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp)
6941 {
6942 	int error = copyin(*addrp, (caddr_t)kevp, sizeof(struct kevent_qos_s));
6943 	if (__probable(!error)) {
6944 		/* Make sure user doesn't pass in any system flags */
6945 		*addrp += sizeof(struct kevent_qos_s);
6946 		kevp->flags &= ~EV_SYSFLAGS;
6947 	}
6948 	return error;
6949 }
6950 
6951 /*!
6952  * @function kevent_legacy_copyout
6953  *
6954  * @brief
6955  * Handles the copyout of a kevent/kevent64 event.
6956  */
6957 static int
kevent_legacy_copyout(struct kevent_qos_s * kevp,user_addr_t * addrp,unsigned int flags)6958 kevent_legacy_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp, unsigned int flags)
6959 {
6960 	int advance;
6961 	int error;
6962 
6963 	assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
6964 
6965 	/*
6966 	 * fully initialize the differnt output event structure
6967 	 * types from the internal kevent (and some universal
6968 	 * defaults for fields not represented in the internal
6969 	 * form).
6970 	 *
6971 	 * Note: these structures have no padding hence the C99
6972 	 *       initializers below do not leak kernel info.
6973 	 */
6974 	if (flags & KEVENT_FLAG_LEGACY64) {
6975 		struct kevent64_s kev64 = {
6976 			.ident  = kevp->ident,
6977 			.filter = kevp->filter,
6978 			.flags  = kevp->flags,
6979 			.fflags = kevp->fflags,
6980 			.data   = (int64_t)kevp->data,
6981 			.udata  = kevp->udata,
6982 			.ext[0] = kevp->ext[0],
6983 			.ext[1] = kevp->ext[1],
6984 		};
6985 		advance = sizeof(struct kevent64_s);
6986 		error = copyout((caddr_t)&kev64, *addrp, advance);
6987 	} else if (flags & KEVENT_FLAG_PROC64) {
6988 		/*
6989 		 * deal with the special case of a user-supplied
6990 		 * value of (uintptr_t)-1.
6991 		 */
6992 		uint64_t ident = (kevp->ident == (uintptr_t)-1) ?
6993 		    (uint64_t)-1LL : (uint64_t)kevp->ident;
6994 		struct user64_kevent kev64 = {
6995 			.ident  = ident,
6996 			.filter = kevp->filter,
6997 			.flags  = kevp->flags,
6998 			.fflags = kevp->fflags,
6999 			.data   = (int64_t) kevp->data,
7000 			.udata  = (user_addr_t) kevp->udata,
7001 		};
7002 		advance = sizeof(kev64);
7003 		error = copyout((caddr_t)&kev64, *addrp, advance);
7004 	} else {
7005 		struct user32_kevent kev32 = {
7006 			.ident  = (uint32_t)kevp->ident,
7007 			.filter = kevp->filter,
7008 			.flags  = kevp->flags,
7009 			.fflags = kevp->fflags,
7010 			.data   = (int32_t)kevp->data,
7011 			.udata  = (uint32_t)kevp->udata,
7012 		};
7013 		advance = sizeof(kev32);
7014 		error = copyout((caddr_t)&kev32, *addrp, advance);
7015 	}
7016 	if (__probable(!error)) {
7017 		*addrp += advance;
7018 	}
7019 	return error;
7020 }
7021 
7022 /*!
7023  * @function kevent_modern_copyout
7024  *
7025  * @brief
7026  * Handles the copyout of a kevent_qos/kevent_id event.
7027  */
7028 OS_ALWAYS_INLINE
7029 static inline int
kevent_modern_copyout(struct kevent_qos_s * kevp,user_addr_t * addrp)7030 kevent_modern_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp)
7031 {
7032 	int error = copyout((caddr_t)kevp, *addrp, sizeof(struct kevent_qos_s));
7033 	if (__probable(!error)) {
7034 		*addrp += sizeof(struct kevent_qos_s);
7035 	}
7036 	return error;
7037 }
7038 
7039 #pragma mark kevent core implementation
7040 
7041 /*!
7042  * @function kevent_callback_inline
7043  *
7044  * @brief
7045  * Callback for each individual event
7046  *
7047  * @discussion
7048  * This is meant to be inlined in kevent_modern_callback and
7049  * kevent_legacy_callback.
7050  */
7051 OS_ALWAYS_INLINE
7052 static inline int
kevent_callback_inline(struct kevent_qos_s * kevp,kevent_ctx_t kectx,bool legacy)7053 kevent_callback_inline(struct kevent_qos_s *kevp, kevent_ctx_t kectx, bool legacy)
7054 {
7055 	int error;
7056 
7057 	assert(kectx->kec_process_noutputs < kectx->kec_process_nevents);
7058 
7059 	/*
7060 	 * Copy out the appropriate amount of event data for this user.
7061 	 */
7062 	if (legacy) {
7063 		error = kevent_legacy_copyout(kevp, &kectx->kec_process_eventlist,
7064 		    kectx->kec_process_flags);
7065 	} else {
7066 		error = kevent_modern_copyout(kevp, &kectx->kec_process_eventlist);
7067 	}
7068 
7069 	/*
7070 	 * If there isn't space for additional events, return
7071 	 * a harmless error to stop the processing here
7072 	 */
7073 	if (error == 0 && ++kectx->kec_process_noutputs == kectx->kec_process_nevents) {
7074 		error = EWOULDBLOCK;
7075 	}
7076 	return error;
7077 }
7078 
7079 /*!
7080  * @function kevent_modern_callback
7081  *
7082  * @brief
7083  * Callback for each individual modern event.
7084  *
7085  * @discussion
7086  * This callback handles kevent_qos/kevent_id events.
7087  */
7088 static int
kevent_modern_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)7089 kevent_modern_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
7090 {
7091 	return kevent_callback_inline(kevp, kectx, /*legacy*/ false);
7092 }
7093 
7094 /*!
7095  * @function kevent_legacy_callback
7096  *
7097  * @brief
7098  * Callback for each individual legacy event.
7099  *
7100  * @discussion
7101  * This callback handles kevent/kevent64 events.
7102  */
7103 static int
kevent_legacy_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)7104 kevent_legacy_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
7105 {
7106 	return kevent_callback_inline(kevp, kectx, /*legacy*/ true);
7107 }
7108 
7109 /*!
7110  * @function kevent_cleanup
7111  *
7112  * @brief
7113  * Handles the cleanup returning from a kevent call.
7114  *
7115  * @discussion
7116  * kevent entry points will take a reference on workloops,
7117  * and a usecount on the fileglob of kqfiles.
7118  *
7119  * This function undoes this on the exit paths of kevents.
7120  *
7121  * @returns
7122  * The error to return to userspace.
7123  */
7124 static int
kevent_cleanup(kqueue_t kqu,int flags,int error,kevent_ctx_t kectx)7125 kevent_cleanup(kqueue_t kqu, int flags, int error, kevent_ctx_t kectx)
7126 {
7127 	// poll should not call any codepath leading to this
7128 	assert((flags & KEVENT_FLAG_POLL) == 0);
7129 
7130 	if (flags & KEVENT_FLAG_WORKLOOP) {
7131 		kqworkloop_release(kqu.kqwl);
7132 	} else if (flags & KEVENT_FLAG_WORKQ) {
7133 		/* nothing held */
7134 	} else {
7135 		fp_drop(kqu.kqf->kqf_p, kectx->kec_fd, kectx->kec_fp, 0);
7136 	}
7137 
7138 	/* don't restart after signals... */
7139 	if (error == ERESTART) {
7140 		error = EINTR;
7141 	} else if (error == 0) {
7142 		/* don't abandon other output just because of residual copyout failures */
7143 		(void)kevent_put_data_size(flags, kectx);
7144 	}
7145 
7146 	if (flags & KEVENT_FLAG_PARKING) {
7147 		thread_t th = current_thread();
7148 		struct uthread *uth = get_bsdthread_info(th);
7149 		if (uth->uu_kqr_bound) {
7150 			thread_unfreeze_base_pri(th);
7151 		}
7152 	}
7153 	return error;
7154 }
7155 
7156 /*!
7157  * @function kqueue_process
7158  *
7159  * @brief
7160  * Process the triggered events in a kqueue.
7161  *
7162  * @discussion
7163  * Walk the queued knotes and validate that they are really still triggered
7164  * events by calling the filter routines (if necessary).
7165  *
7166  * For each event that is still considered triggered, invoke the callback
7167  * routine provided.
7168  *
7169  * caller holds a reference on the kqueue.
7170  * kqueue locked on entry and exit - but may be dropped
7171  * kqueue list locked (held for duration of call)
7172  *
7173  * This is only called by kqueue_scan() so that the compiler can inline it.
7174  *
7175  * @returns
7176  * - 0:            no event was returned, no other error occured
7177  * - EBADF:        the kqueue is being destroyed (KQ_DRAIN is set)
7178  * - EWOULDBLOCK:  (not an error) events have been found and we should return
7179  * - EFAULT:       copyout failed
7180  * - filter specific errors
7181  */
7182 static int
kqueue_process(kqueue_t kqu,int flags,kevent_ctx_t kectx,kevent_callback_t callback)7183 kqueue_process(kqueue_t kqu, int flags, kevent_ctx_t kectx,
7184     kevent_callback_t callback)
7185 {
7186 	workq_threadreq_t kqr = current_uthread()->uu_kqr_bound;
7187 	struct knote *kn;
7188 	int error = 0, rc = 0;
7189 	struct kqtailq *base_queue, *queue;
7190 #if DEBUG || DEVELOPMENT
7191 	int retries = 64;
7192 #endif
7193 	uint16_t kq_type = (kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
7194 
7195 	if (kq_type & KQ_WORKQ) {
7196 		rc = kqworkq_begin_processing(kqu.kqwq, kqr, flags);
7197 	} else if (kq_type & KQ_WORKLOOP) {
7198 		rc = kqworkloop_begin_processing(kqu.kqwl, flags);
7199 	} else {
7200 kqfile_retry:
7201 		rc = kqfile_begin_processing(kqu.kqf);
7202 		if (rc == EBADF) {
7203 			return EBADF;
7204 		}
7205 	}
7206 
7207 	if (rc == -1) {
7208 		/* Nothing to process */
7209 		return 0;
7210 	}
7211 
7212 	/*
7213 	 * loop through the enqueued knotes associated with this request,
7214 	 * processing each one. Each request may have several queues
7215 	 * of knotes to process (depending on the type of kqueue) so we
7216 	 * have to loop through all the queues as long as we have additional
7217 	 * space.
7218 	 */
7219 
7220 process_again:
7221 	if (kq_type & KQ_WORKQ) {
7222 		base_queue = queue = &kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1];
7223 	} else if (kq_type & KQ_WORKLOOP) {
7224 		base_queue = &kqu.kqwl->kqwl_queue[0];
7225 		queue = &kqu.kqwl->kqwl_queue[KQWL_NBUCKETS - 1];
7226 	} else {
7227 		base_queue = queue = &kqu.kqf->kqf_queue;
7228 	}
7229 
7230 	do {
7231 		while ((kn = TAILQ_FIRST(queue)) != NULL) {
7232 			error = knote_process(kn, kectx, callback);
7233 			if (error == EJUSTRETURN) {
7234 				error = 0;
7235 			} else if (__improbable(error)) {
7236 				/* error is EWOULDBLOCK when the out event array is full */
7237 				goto stop_processing;
7238 			}
7239 		}
7240 	} while (queue-- > base_queue);
7241 
7242 	if (kectx->kec_process_noutputs) {
7243 		/* callers will transform this into no error */
7244 		error = EWOULDBLOCK;
7245 	}
7246 
7247 stop_processing:
7248 	/*
7249 	 * If KEVENT_FLAG_PARKING is set, and no kevents have been returned,
7250 	 * we want to unbind the kqrequest from the thread.
7251 	 *
7252 	 * However, because the kq locks are dropped several times during process,
7253 	 * new knotes may have fired again, in which case, we want to fail the end
7254 	 * processing and process again, until it converges.
7255 	 *
7256 	 * If we have an error or returned events, end processing never fails.
7257 	 */
7258 	if (error) {
7259 		flags &= ~KEVENT_FLAG_PARKING;
7260 	}
7261 	if (kq_type & KQ_WORKQ) {
7262 		rc = kqworkq_end_processing(kqu.kqwq, kqr, flags);
7263 	} else if (kq_type & KQ_WORKLOOP) {
7264 		rc = kqworkloop_end_processing(kqu.kqwl, KQ_PROCESSING, flags);
7265 	} else {
7266 		rc = kqfile_end_processing(kqu.kqf);
7267 	}
7268 
7269 	if (__probable(error)) {
7270 		return error;
7271 	}
7272 
7273 	if (__probable(rc >= 0)) {
7274 		assert(rc == 0 || rc == EBADF);
7275 		return rc;
7276 	}
7277 
7278 #if DEBUG || DEVELOPMENT
7279 	if (retries-- == 0) {
7280 		panic("kevent: way too many knote_process retries, kq: %p (0x%04x)",
7281 		    kqu.kq, kqu.kq->kq_state);
7282 	}
7283 #endif
7284 	if (kq_type & (KQ_WORKQ | KQ_WORKLOOP)) {
7285 		assert(flags & KEVENT_FLAG_PARKING);
7286 		goto process_again;
7287 	} else {
7288 		goto kqfile_retry;
7289 	}
7290 }
7291 
7292 /*!
7293  * @function kqueue_scan_continue
7294  *
7295  * @brief
7296  * The continuation used by kqueue_scan for kevent entry points.
7297  *
7298  * @discussion
7299  * Assumes we inherit a use/ref count on the kq or its fileglob.
7300  *
7301  * This is called by kqueue_scan if neither KEVENT_FLAG_POLL nor
7302  * KEVENT_FLAG_KERNEL was set, and the caller had to wait.
7303  */
7304 OS_NORETURN OS_NOINLINE
7305 static void
kqueue_scan_continue(void * data,wait_result_t wait_result)7306 kqueue_scan_continue(void *data, wait_result_t wait_result)
7307 {
7308 	uthread_t ut = current_uthread();
7309 	kevent_ctx_t kectx = &ut->uu_save.uus_kevent;
7310 	int error = 0, flags = kectx->kec_process_flags;
7311 	struct kqueue *kq = data;
7312 
7313 	/*
7314 	 * only kevent variants call in here, so we know the callback is
7315 	 * kevent_legacy_callback or kevent_modern_callback.
7316 	 */
7317 	assert((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0);
7318 
7319 	switch (wait_result) {
7320 	case THREAD_AWAKENED:
7321 		if (__improbable(flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64))) {
7322 			error = kqueue_scan(kq, flags, kectx, kevent_legacy_callback);
7323 		} else {
7324 			error = kqueue_scan(kq, flags, kectx, kevent_modern_callback);
7325 		}
7326 		break;
7327 	case THREAD_TIMED_OUT:
7328 		error = 0;
7329 		break;
7330 	case THREAD_INTERRUPTED:
7331 		error = EINTR;
7332 		break;
7333 	case THREAD_RESTART:
7334 		error = EBADF;
7335 		break;
7336 	default:
7337 		panic("%s: - invalid wait_result (%d)", __func__, wait_result);
7338 	}
7339 
7340 
7341 	error = kevent_cleanup(kq, flags, error, kectx);
7342 	*(int32_t *)&ut->uu_rval = kectx->kec_process_noutputs;
7343 	unix_syscall_return(error);
7344 }
7345 
7346 /*!
7347  * @function kqueue_scan
7348  *
7349  * @brief
7350  * Scan and wait for events in a kqueue (used by poll & kevent).
7351  *
7352  * @discussion
7353  * Process the triggered events in a kqueue.
7354  *
7355  * If there are no events triggered arrange to wait for them:
7356  * - unless KEVENT_FLAG_IMMEDIATE is set in kectx->kec_process_flags
7357  * - possibly until kectx->kec_deadline expires
7358  *
7359  * When it waits, and that neither KEVENT_FLAG_POLL nor KEVENT_FLAG_KERNEL
7360  * are set, then it will wait in the kqueue_scan_continue continuation.
7361  *
7362  * poll() will block in place, and KEVENT_FLAG_KERNEL calls
7363  * all pass KEVENT_FLAG_IMMEDIATE and will not wait.
7364  *
7365  * @param kqu
7366  * The kqueue being scanned.
7367  *
7368  * @param flags
7369  * The KEVENT_FLAG_* flags for this call.
7370  *
7371  * @param kectx
7372  * The context used for this scan.
7373  * The uthread_t::uu_save.uus_kevent storage is used for this purpose.
7374  *
7375  * @param callback
7376  * The callback to be called on events sucessfully processed.
7377  * (Either kevent_legacy_callback, kevent_modern_callback or poll_callback)
7378  */
7379 int
kqueue_scan(kqueue_t kqu,int flags,kevent_ctx_t kectx,kevent_callback_t callback)7380 kqueue_scan(kqueue_t kqu, int flags, kevent_ctx_t kectx,
7381     kevent_callback_t callback)
7382 {
7383 	int error;
7384 
7385 	for (;;) {
7386 		kqlock(kqu);
7387 		error = kqueue_process(kqu, flags, kectx, callback);
7388 
7389 		/*
7390 		 * If we got an error, events returned (EWOULDBLOCK)
7391 		 * or blocking was disallowed (KEVENT_FLAG_IMMEDIATE),
7392 		 * just return.
7393 		 */
7394 		if (__probable(error || (flags & KEVENT_FLAG_IMMEDIATE))) {
7395 			kqunlock(kqu);
7396 			return error == EWOULDBLOCK ? 0 : error;
7397 		}
7398 
7399 		assert((kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
7400 
7401 		kqu.kqf->kqf_state |= KQ_SLEEP;
7402 		assert_wait_deadline(&kqu.kqf->kqf_count, THREAD_ABORTSAFE,
7403 		    kectx->kec_deadline);
7404 		kqunlock(kqu);
7405 
7406 		if (__probable((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0)) {
7407 			thread_block_parameter(kqueue_scan_continue, kqu.kqf);
7408 			__builtin_unreachable();
7409 		}
7410 
7411 		wait_result_t wr = thread_block(THREAD_CONTINUE_NULL);
7412 		switch (wr) {
7413 		case THREAD_AWAKENED:
7414 			break;
7415 		case THREAD_TIMED_OUT:
7416 			return 0;
7417 		case THREAD_INTERRUPTED:
7418 			return EINTR;
7419 		case THREAD_RESTART:
7420 			return EBADF;
7421 		default:
7422 			panic("%s: - bad wait_result (%d)", __func__, wr);
7423 		}
7424 	}
7425 }
7426 
7427 /*!
7428  * @function kevent_internal
7429  *
7430  * @brief
7431  * Common kevent code.
7432  *
7433  * @discussion
7434  * Needs to be inlined to specialize for legacy or modern and
7435  * eliminate dead code.
7436  *
7437  * This is the core logic of kevent entry points, that will:
7438  * - register kevents
7439  * - optionally scan the kqueue for events
7440  *
7441  * The caller is giving kevent_internal a reference on the kqueue
7442  * or its fileproc that needs to be cleaned up by kevent_cleanup().
7443  */
7444 OS_ALWAYS_INLINE
7445 static inline int
kevent_internal(kqueue_t kqu,user_addr_t changelist,int nchanges,user_addr_t ueventlist,int nevents,int flags,kevent_ctx_t kectx,int32_t * retval,bool legacy)7446 kevent_internal(kqueue_t kqu,
7447     user_addr_t changelist, int nchanges,
7448     user_addr_t ueventlist, int nevents,
7449     int flags, kevent_ctx_t kectx, int32_t *retval,
7450     bool legacy)
7451 {
7452 	int error = 0, noutputs = 0, register_rc;
7453 
7454 	/* only bound threads can receive events on workloops */
7455 	if (!legacy && (flags & KEVENT_FLAG_WORKLOOP)) {
7456 #if CONFIG_WORKLOOP_DEBUG
7457 		UU_KEVENT_HISTORY_WRITE_ENTRY(current_uthread(), {
7458 			.uu_kqid = kqu.kqwl->kqwl_dynamicid,
7459 			.uu_kq = error ? NULL : kqu.kq,
7460 			.uu_error = error,
7461 			.uu_nchanges = nchanges,
7462 			.uu_nevents = nevents,
7463 			.uu_flags = flags,
7464 		});
7465 #endif // CONFIG_WORKLOOP_DEBUG
7466 
7467 		if (flags & KEVENT_FLAG_KERNEL) {
7468 			/* see kevent_workq_internal */
7469 			error = copyout(&kqu.kqwl->kqwl_dynamicid,
7470 			    ueventlist - sizeof(kqueue_id_t), sizeof(kqueue_id_t));
7471 			kectx->kec_data_resid -= sizeof(kqueue_id_t);
7472 			if (__improbable(error)) {
7473 				goto out;
7474 			}
7475 		}
7476 
7477 		if (kevent_args_requesting_events(flags, nevents)) {
7478 			/*
7479 			 * Disable the R2K notification while doing a register, if the
7480 			 * caller wants events too, we don't want the AST to be set if we
7481 			 * will process these events soon.
7482 			 */
7483 			kqlock(kqu);
7484 			kqu.kq->kq_state &= ~KQ_R2K_ARMED;
7485 			kqunlock(kqu);
7486 			flags |= KEVENT_FLAG_NEEDS_END_PROCESSING;
7487 		}
7488 	}
7489 
7490 	/* register all the change requests the user provided... */
7491 	while (nchanges > 0 && error == 0) {
7492 		struct kevent_qos_s kev;
7493 		struct knote *kn = NULL;
7494 
7495 		if (legacy) {
7496 			error = kevent_legacy_copyin(&changelist, &kev, flags);
7497 		} else {
7498 			error = kevent_modern_copyin(&changelist, &kev);
7499 		}
7500 		if (error) {
7501 			break;
7502 		}
7503 
7504 		register_rc = kevent_register(kqu.kq, &kev, &kn);
7505 		if (__improbable(!legacy && (register_rc & FILTER_REGISTER_WAIT))) {
7506 			thread_t thread = current_thread();
7507 
7508 			kqlock_held(kqu);
7509 
7510 			if (act_clear_astkevent(thread, AST_KEVENT_REDRIVE_THREADREQ)) {
7511 				workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
7512 			}
7513 
7514 			// f_post_register_wait is meant to call a continuation and not to
7515 			// return, which is why we don't support FILTER_REGISTER_WAIT if
7516 			// KEVENT_FLAG_ERROR_EVENTS is not passed, or if the event that
7517 			// waits isn't the last.
7518 			//
7519 			// It is implementable, but not used by any userspace code at the
7520 			// moment, so for now return ENOTSUP if someone tries to do it.
7521 			if (nchanges == 1 && noutputs < nevents &&
7522 			    (flags & KEVENT_FLAG_KERNEL) == 0 &&
7523 			    (flags & KEVENT_FLAG_PARKING) == 0 &&
7524 			    (flags & KEVENT_FLAG_ERROR_EVENTS) &&
7525 			    (flags & KEVENT_FLAG_WORKLOOP)) {
7526 				uthread_t ut = get_bsdthread_info(thread);
7527 
7528 				/*
7529 				 * store the continuation/completion data in the uthread
7530 				 *
7531 				 * Note: the kectx aliases with this,
7532 				 * and is destroyed in the process.
7533 				 */
7534 				ut->uu_save.uus_kevent_register = (struct _kevent_register){
7535 					.kev        = kev,
7536 					.kqwl       = kqu.kqwl,
7537 					.eventout   = noutputs,
7538 					.ueventlist = ueventlist,
7539 				};
7540 				knote_fops(kn)->f_post_register_wait(ut, kn,
7541 				    &ut->uu_save.uus_kevent_register);
7542 				__builtin_unreachable();
7543 			}
7544 			kqunlock(kqu);
7545 
7546 			kev.flags |= EV_ERROR;
7547 			kev.data = ENOTSUP;
7548 		} else {
7549 			assert((register_rc & FILTER_REGISTER_WAIT) == 0);
7550 		}
7551 
7552 		// keep in sync with kevent_register_wait_return()
7553 		if (noutputs < nevents && (kev.flags & (EV_ERROR | EV_RECEIPT))) {
7554 			if ((kev.flags & EV_ERROR) == 0) {
7555 				kev.flags |= EV_ERROR;
7556 				kev.data = 0;
7557 			}
7558 			if (legacy) {
7559 				error = kevent_legacy_copyout(&kev, &ueventlist, flags);
7560 			} else {
7561 				error = kevent_modern_copyout(&kev, &ueventlist);
7562 			}
7563 			if (error == 0) {
7564 				noutputs++;
7565 			}
7566 		} else if (kev.flags & EV_ERROR) {
7567 			error = (int)kev.data;
7568 		}
7569 		nchanges--;
7570 	}
7571 
7572 	if ((flags & KEVENT_FLAG_ERROR_EVENTS) == 0 &&
7573 	    nevents > 0 && noutputs == 0 && error == 0) {
7574 		kectx->kec_process_flags = flags;
7575 		kectx->kec_process_nevents = nevents;
7576 		kectx->kec_process_noutputs = 0;
7577 		kectx->kec_process_eventlist = ueventlist;
7578 
7579 		if (legacy) {
7580 			error = kqueue_scan(kqu.kq, flags, kectx, kevent_legacy_callback);
7581 		} else {
7582 			error = kqueue_scan(kqu.kq, flags, kectx, kevent_modern_callback);
7583 		}
7584 
7585 		noutputs = kectx->kec_process_noutputs;
7586 	} else if (!legacy && (flags & KEVENT_FLAG_NEEDS_END_PROCESSING)) {
7587 		/*
7588 		 * If we didn't through kqworkloop_end_processing(),
7589 		 * we need to do it here.
7590 		 *
7591 		 * kqueue_scan will call kqworkloop_end_processing(),
7592 		 * so we only need to do it if we didn't scan.
7593 		 */
7594 		kqlock(kqu);
7595 		kqworkloop_end_processing(kqu.kqwl, 0, 0);
7596 		kqunlock(kqu);
7597 	}
7598 
7599 	*retval = noutputs;
7600 out:
7601 	return kevent_cleanup(kqu.kq, flags, error, kectx);
7602 }
7603 
7604 #pragma mark modern syscalls: kevent_qos, kevent_id, kevent_workq_internal
7605 
7606 /*!
7607  * @function kevent_modern_internal
7608  *
7609  * @brief
7610  * The backend of the kevent_id and kevent_workq_internal entry points.
7611  *
7612  * @discussion
7613  * Needs to be inline due to the number of arguments.
7614  */
7615 OS_NOINLINE
7616 static int
kevent_modern_internal(kqueue_t kqu,user_addr_t changelist,int nchanges,user_addr_t ueventlist,int nevents,int flags,kevent_ctx_t kectx,int32_t * retval)7617 kevent_modern_internal(kqueue_t kqu,
7618     user_addr_t changelist, int nchanges,
7619     user_addr_t ueventlist, int nevents,
7620     int flags, kevent_ctx_t kectx, int32_t *retval)
7621 {
7622 	return kevent_internal(kqu.kq, changelist, nchanges,
7623 	           ueventlist, nevents, flags, kectx, retval, /*legacy*/ false);
7624 }
7625 
7626 /*!
7627  * @function kevent_id
7628  *
7629  * @brief
7630  * The kevent_id() syscall.
7631  */
7632 int
kevent_id(struct proc * p,struct kevent_id_args * uap,int32_t * retval)7633 kevent_id(struct proc *p, struct kevent_id_args *uap, int32_t *retval)
7634 {
7635 	int error, flags = uap->flags & KEVENT_FLAG_USER;
7636 	uthread_t uth = current_uthread();
7637 	workq_threadreq_t kqr = uth->uu_kqr_bound;
7638 	kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7639 	kqueue_t kqu;
7640 
7641 	flags = kevent_adjust_flags_for_proc(p, flags);
7642 	flags |= KEVENT_FLAG_DYNAMIC_KQUEUE;
7643 
7644 	if (__improbable((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP)) !=
7645 	    KEVENT_FLAG_WORKLOOP)) {
7646 		return EINVAL;
7647 	}
7648 
7649 	error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
7650 	if (__improbable(error)) {
7651 		return error;
7652 	}
7653 
7654 	kectx->kec_deadline = 0;
7655 	kectx->kec_fp       = NULL;
7656 	kectx->kec_fd       = -1;
7657 	/* the kec_process_* fields are filled if kqueue_scann is called only */
7658 
7659 	/*
7660 	 * Get the kq we are going to be working on
7661 	 * As a fastpath, look at the currently bound workloop.
7662 	 */
7663 	kqu.kqwl = kqr ? kqr_kqworkloop(kqr) : NULL;
7664 	if (kqu.kqwl && kqu.kqwl->kqwl_dynamicid == uap->id) {
7665 		if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
7666 			return EEXIST;
7667 		}
7668 		kqworkloop_retain(kqu.kqwl);
7669 	} else if (__improbable(kevent_args_requesting_events(flags, uap->nevents))) {
7670 		return EXDEV;
7671 	} else {
7672 		error = kqworkloop_get_or_create(p, uap->id, NULL, flags, &kqu.kqwl);
7673 		if (__improbable(error)) {
7674 			return error;
7675 		}
7676 	}
7677 
7678 	return kevent_modern_internal(kqu, uap->changelist, uap->nchanges,
7679 	           uap->eventlist, uap->nevents, flags, kectx, retval);
7680 }
7681 
7682 /**!
7683  * @function kevent_workq_internal
7684  *
7685  * @discussion
7686  * This function is exported for the sake of the workqueue subsystem.
7687  *
7688  * It is called in two ways:
7689  * - when a thread is about to go to userspace to ask for pending event
7690  * - when a thread is returning from userspace with events back
7691  *
7692  * the workqueue subsystem will only use the following flags:
7693  * - KEVENT_FLAG_STACK_DATA (always)
7694  * - KEVENT_FLAG_IMMEDIATE (always)
7695  * - KEVENT_FLAG_PARKING (depending on whether it is going to or returning from
7696  *   userspace).
7697  *
7698  * It implicitly acts on the bound kqueue, and for the case of workloops
7699  * will copyout the kqueue ID before anything else.
7700  *
7701  *
7702  * Pthread will have setup the various arguments to fit this stack layout:
7703  *
7704  * +-------....----+--------------+-----------+--------------------+
7705  * |  user stack   |  data avail  |  nevents  |   pthread_self()   |
7706  * +-------....----+--------------+-----------+--------------------+
7707  *                 ^              ^
7708  *             data_out       eventlist
7709  *
7710  * When a workloop is used, the workloop ID is copied out right before
7711  * the eventlist and is taken from the data buffer.
7712  *
7713  * @warning
7714  * This function is carefuly tailored to not make any call except the final tail
7715  * call into kevent_modern_internal. (LTO inlines current_uthread()).
7716  *
7717  * This function is performance sensitive due to the workq subsystem.
7718  */
7719 int
kevent_workq_internal(struct proc * p,user_addr_t changelist,int nchanges,user_addr_t eventlist,int nevents,user_addr_t data_out,user_size_t * data_available,unsigned int flags,int32_t * retval)7720 kevent_workq_internal(struct proc *p,
7721     user_addr_t changelist, int nchanges,
7722     user_addr_t eventlist, int nevents,
7723     user_addr_t data_out, user_size_t *data_available,
7724     unsigned int flags, int32_t *retval)
7725 {
7726 	uthread_t uth = current_uthread();
7727 	workq_threadreq_t kqr = uth->uu_kqr_bound;
7728 	kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7729 	kqueue_t kqu;
7730 
7731 	assert(flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE) ||
7732 	    flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_PARKING));
7733 
7734 	kectx->kec_data_out   = data_out;
7735 	kectx->kec_data_avail = (uint64_t)data_available;
7736 	kectx->kec_data_size  = *data_available;
7737 	kectx->kec_data_resid = *data_available;
7738 	kectx->kec_deadline   = 0;
7739 	kectx->kec_fp         = NULL;
7740 	kectx->kec_fd         = -1;
7741 	/* the kec_process_* fields are filled if kqueue_scann is called only */
7742 
7743 	flags = kevent_adjust_flags_for_proc(p, flags);
7744 
7745 	if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
7746 		kqu.kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
7747 		kqworkloop_retain(kqu.kqwl);
7748 
7749 		flags |= KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_DYNAMIC_KQUEUE |
7750 		    KEVENT_FLAG_KERNEL;
7751 	} else {
7752 		kqu.kqwq = p->p_fd.fd_wqkqueue;
7753 
7754 		flags |= KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL;
7755 	}
7756 
7757 	return kevent_modern_internal(kqu, changelist, nchanges,
7758 	           eventlist, nevents, flags, kectx, retval);
7759 }
7760 
7761 /*!
7762  * @function kevent_qos
7763  *
7764  * @brief
7765  * The kevent_qos() syscall.
7766  */
7767 int
kevent_qos(struct proc * p,struct kevent_qos_args * uap,int32_t * retval)7768 kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
7769 {
7770 	uthread_t uth = current_uthread();
7771 	kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7772 	int error, flags = uap->flags & KEVENT_FLAG_USER;
7773 	struct kqueue *kq;
7774 
7775 	if (__improbable(flags & KEVENT_ID_FLAG_USER)) {
7776 		return EINVAL;
7777 	}
7778 
7779 	flags = kevent_adjust_flags_for_proc(p, flags);
7780 
7781 	error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
7782 	if (__improbable(error)) {
7783 		return error;
7784 	}
7785 
7786 	kectx->kec_deadline = 0;
7787 	kectx->kec_fp       = NULL;
7788 	kectx->kec_fd       = uap->fd;
7789 	/* the kec_process_* fields are filled if kqueue_scann is called only */
7790 
7791 	/* get the kq we are going to be working on */
7792 	if (__probable(flags & KEVENT_FLAG_WORKQ)) {
7793 		error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
7794 	} else {
7795 		error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
7796 	}
7797 	if (__improbable(error)) {
7798 		return error;
7799 	}
7800 
7801 	return kevent_modern_internal(kq, uap->changelist, uap->nchanges,
7802 	           uap->eventlist, uap->nevents, flags, kectx, retval);
7803 }
7804 
7805 #pragma mark legacy syscalls: kevent, kevent64
7806 
7807 /*!
7808  * @function kevent_legacy_get_deadline
7809  *
7810  * @brief
7811  * Compute the deadline for the legacy kevent syscalls.
7812  *
7813  * @discussion
7814  * This is not necessary if KEVENT_FLAG_IMMEDIATE is specified,
7815  * as this takes precedence over the deadline.
7816  *
7817  * This function will fail if utimeout is USER_ADDR_NULL
7818  * (the caller should check).
7819  */
7820 static int
kevent_legacy_get_deadline(int flags,user_addr_t utimeout,uint64_t * deadline)7821 kevent_legacy_get_deadline(int flags, user_addr_t utimeout, uint64_t *deadline)
7822 {
7823 	struct timespec ts;
7824 
7825 	if (flags & KEVENT_FLAG_PROC64) {
7826 		struct user64_timespec ts64;
7827 		int error = copyin(utimeout, &ts64, sizeof(ts64));
7828 		if (__improbable(error)) {
7829 			return error;
7830 		}
7831 		ts.tv_sec = (unsigned long)ts64.tv_sec;
7832 		ts.tv_nsec = (long)ts64.tv_nsec;
7833 	} else {
7834 		struct user32_timespec ts32;
7835 		int error = copyin(utimeout, &ts32, sizeof(ts32));
7836 		if (__improbable(error)) {
7837 			return error;
7838 		}
7839 		ts.tv_sec = ts32.tv_sec;
7840 		ts.tv_nsec = ts32.tv_nsec;
7841 	}
7842 	if (!timespec_is_valid(&ts)) {
7843 		return EINVAL;
7844 	}
7845 
7846 	clock_absolutetime_interval_to_deadline(tstoabstime(&ts), deadline);
7847 	return 0;
7848 }
7849 
7850 /*!
7851  * @function kevent_legacy_internal
7852  *
7853  * @brief
7854  * The core implementation for kevent and kevent64
7855  */
7856 OS_NOINLINE
7857 static int
kevent_legacy_internal(struct proc * p,struct kevent64_args * uap,int32_t * retval,int flags)7858 kevent_legacy_internal(struct proc *p, struct kevent64_args *uap,
7859     int32_t *retval, int flags)
7860 {
7861 	uthread_t uth = current_uthread();
7862 	kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7863 	struct kqueue *kq;
7864 	int error;
7865 
7866 	if (__improbable(uap->flags & KEVENT_ID_FLAG_USER)) {
7867 		return EINVAL;
7868 	}
7869 
7870 	flags = kevent_adjust_flags_for_proc(p, flags);
7871 
7872 	kectx->kec_data_out   = 0;
7873 	kectx->kec_data_avail = 0;
7874 	kectx->kec_data_size  = 0;
7875 	kectx->kec_data_resid = 0;
7876 	kectx->kec_deadline   = 0;
7877 	kectx->kec_fp         = NULL;
7878 	kectx->kec_fd         = uap->fd;
7879 	/* the kec_process_* fields are filled if kqueue_scann is called only */
7880 
7881 	/* convert timeout to absolute - if we have one (and not immediate) */
7882 	if (__improbable(uap->timeout && !(flags & KEVENT_FLAG_IMMEDIATE))) {
7883 		error = kevent_legacy_get_deadline(flags, uap->timeout,
7884 		    &kectx->kec_deadline);
7885 		if (__improbable(error)) {
7886 			return error;
7887 		}
7888 	}
7889 
7890 	/* get the kq we are going to be working on */
7891 	if (flags & KEVENT_FLAG_WORKQ) {
7892 		error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
7893 	} else {
7894 		error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
7895 	}
7896 	if (__improbable(error)) {
7897 		return error;
7898 	}
7899 
7900 	return kevent_internal(kq, uap->changelist, uap->nchanges,
7901 	           uap->eventlist, uap->nevents, flags, kectx, retval,
7902 	           /*legacy*/ true);
7903 }
7904 
7905 /*!
7906  * @function kevent
7907  *
7908  * @brief
7909  * The legacy kevent() syscall.
7910  */
7911 int
kevent(struct proc * p,struct kevent_args * uap,int32_t * retval)7912 kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
7913 {
7914 	struct kevent64_args args = {
7915 		.fd         = uap->fd,
7916 		.changelist = uap->changelist,
7917 		.nchanges   = uap->nchanges,
7918 		.eventlist  = uap->eventlist,
7919 		.nevents    = uap->nevents,
7920 		.timeout    = uap->timeout,
7921 	};
7922 
7923 	return kevent_legacy_internal(p, &args, retval, KEVENT_FLAG_LEGACY32);
7924 }
7925 
7926 /*!
7927  * @function kevent64
7928  *
7929  * @brief
7930  * The legacy kevent64() syscall.
7931  */
7932 int
kevent64(struct proc * p,struct kevent64_args * uap,int32_t * retval)7933 kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
7934 {
7935 	int flags = (uap->flags & KEVENT_FLAG_USER) | KEVENT_FLAG_LEGACY64;
7936 	return kevent_legacy_internal(p, uap, retval, flags);
7937 }
7938 
7939 #pragma mark - socket interface
7940 
7941 #if SOCKETS
7942 #include <sys/param.h>
7943 #include <sys/socket.h>
7944 #include <sys/protosw.h>
7945 #include <sys/domain.h>
7946 #include <sys/mbuf.h>
7947 #include <sys/kern_event.h>
7948 #include <sys/malloc.h>
7949 #include <sys/sys_domain.h>
7950 #include <sys/syslog.h>
7951 
7952 #ifndef ROUNDUP64
7953 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
7954 #endif
7955 
7956 #ifndef ADVANCE64
7957 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
7958 #endif
7959 
7960 static LCK_GRP_DECLARE(kev_lck_grp, "Kernel Event Protocol");
7961 static LCK_RW_DECLARE(kev_rwlock, &kev_lck_grp);
7962 
7963 static int kev_attach(struct socket *so, int proto, struct proc *p);
7964 static int kev_detach(struct socket *so);
7965 static int kev_control(struct socket *so, u_long cmd, caddr_t data,
7966     struct ifnet *ifp, struct proc *p);
7967 static lck_mtx_t * event_getlock(struct socket *, int);
7968 static int event_lock(struct socket *, int, void *);
7969 static int event_unlock(struct socket *, int, void *);
7970 
7971 static int event_sofreelastref(struct socket *);
7972 static void kev_delete(struct kern_event_pcb *);
7973 
7974 static struct pr_usrreqs event_usrreqs = {
7975 	.pru_attach =           kev_attach,
7976 	.pru_control =          kev_control,
7977 	.pru_detach =           kev_detach,
7978 	.pru_soreceive =        soreceive,
7979 };
7980 
7981 static struct protosw eventsw[] = {
7982 	{
7983 		.pr_type =              SOCK_RAW,
7984 		.pr_protocol =          SYSPROTO_EVENT,
7985 		.pr_flags =             PR_ATOMIC,
7986 		.pr_usrreqs =           &event_usrreqs,
7987 		.pr_lock =              event_lock,
7988 		.pr_unlock =            event_unlock,
7989 		.pr_getlock =           event_getlock,
7990 	}
7991 };
7992 
7993 __private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
7994 __private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
7995 
7996 SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
7997     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel event family");
7998 
7999 struct kevtstat kevtstat;
8000 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
8001     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8002     kevt_getstat, "S,kevtstat", "");
8003 
8004 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
8005     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8006     kevt_pcblist, "S,xkevtpcb", "");
8007 
8008 static lck_mtx_t *
event_getlock(struct socket * so,int flags)8009 event_getlock(struct socket *so, int flags)
8010 {
8011 #pragma unused(flags)
8012 	struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
8013 
8014 	if (so->so_pcb != NULL) {
8015 		if (so->so_usecount < 0) {
8016 			panic("%s: so=%p usecount=%d lrh= %s", __func__,
8017 			    so, so->so_usecount, solockhistory_nr(so));
8018 		}
8019 		/* NOTREACHED */
8020 	} else {
8021 		panic("%s: so=%p NULL NO so_pcb %s", __func__,
8022 		    so, solockhistory_nr(so));
8023 		/* NOTREACHED */
8024 	}
8025 	return &ev_pcb->evp_mtx;
8026 }
8027 
8028 static int
event_lock(struct socket * so,int refcount,void * lr)8029 event_lock(struct socket *so, int refcount, void *lr)
8030 {
8031 	void *lr_saved;
8032 
8033 	if (lr == NULL) {
8034 		lr_saved = __builtin_return_address(0);
8035 	} else {
8036 		lr_saved = lr;
8037 	}
8038 
8039 	if (so->so_pcb != NULL) {
8040 		lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
8041 	} else {
8042 		panic("%s: so=%p NO PCB! lr=%p lrh= %s", __func__,
8043 		    so, lr_saved, solockhistory_nr(so));
8044 		/* NOTREACHED */
8045 	}
8046 
8047 	if (so->so_usecount < 0) {
8048 		panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s", __func__,
8049 		    so, so->so_pcb, lr_saved, so->so_usecount,
8050 		    solockhistory_nr(so));
8051 		/* NOTREACHED */
8052 	}
8053 
8054 	if (refcount) {
8055 		so->so_usecount++;
8056 	}
8057 
8058 	so->lock_lr[so->next_lock_lr] = lr_saved;
8059 	so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
8060 	return 0;
8061 }
8062 
8063 static int
event_unlock(struct socket * so,int refcount,void * lr)8064 event_unlock(struct socket *so, int refcount, void *lr)
8065 {
8066 	void *lr_saved;
8067 	lck_mtx_t *mutex_held;
8068 
8069 	if (lr == NULL) {
8070 		lr_saved = __builtin_return_address(0);
8071 	} else {
8072 		lr_saved = lr;
8073 	}
8074 
8075 	if (refcount) {
8076 		so->so_usecount--;
8077 	}
8078 	if (so->so_usecount < 0) {
8079 		panic("%s: so=%p usecount=%d lrh= %s", __func__,
8080 		    so, so->so_usecount, solockhistory_nr(so));
8081 		/* NOTREACHED */
8082 	}
8083 	if (so->so_pcb == NULL) {
8084 		panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s", __func__,
8085 		    so, so->so_usecount, (void *)lr_saved,
8086 		    solockhistory_nr(so));
8087 		/* NOTREACHED */
8088 	}
8089 	mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
8090 
8091 	LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
8092 	so->unlock_lr[so->next_unlock_lr] = lr_saved;
8093 	so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
8094 
8095 	if (so->so_usecount == 0) {
8096 		VERIFY(so->so_flags & SOF_PCBCLEARING);
8097 		event_sofreelastref(so);
8098 	} else {
8099 		lck_mtx_unlock(mutex_held);
8100 	}
8101 
8102 	return 0;
8103 }
8104 
8105 static int
event_sofreelastref(struct socket * so)8106 event_sofreelastref(struct socket *so)
8107 {
8108 	struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
8109 
8110 	LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
8111 
8112 	so->so_pcb = NULL;
8113 
8114 	/*
8115 	 * Disable upcall in the event another thread is in kev_post_msg()
8116 	 * appending record to the receive socket buffer, since sbwakeup()
8117 	 * may release the socket lock otherwise.
8118 	 */
8119 	so->so_rcv.sb_flags &= ~SB_UPCALL;
8120 	so->so_snd.sb_flags &= ~SB_UPCALL;
8121 	so->so_event = sonullevent;
8122 	lck_mtx_unlock(&(ev_pcb->evp_mtx));
8123 
8124 	LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
8125 	lck_rw_lock_exclusive(&kev_rwlock);
8126 	LIST_REMOVE(ev_pcb, evp_link);
8127 	kevtstat.kes_pcbcount--;
8128 	kevtstat.kes_gencnt++;
8129 	lck_rw_done(&kev_rwlock);
8130 	kev_delete(ev_pcb);
8131 
8132 	sofreelastref(so, 1);
8133 	return 0;
8134 }
8135 
8136 static int event_proto_count = (sizeof(eventsw) / sizeof(struct protosw));
8137 
8138 static
8139 struct kern_event_head kern_event_head;
8140 
8141 static u_int32_t static_event_id = 0;
8142 
8143 static ZONE_DEFINE(ev_pcb_zone, "kerneventpcb",
8144     sizeof(struct kern_event_pcb), ZC_ZFREE_CLEARMEM);
8145 
8146 /*
8147  * Install the protosw's for the NKE manager.  Invoked at extension load time
8148  */
8149 void
kern_event_init(struct domain * dp)8150 kern_event_init(struct domain *dp)
8151 {
8152 	struct protosw *pr;
8153 	int i;
8154 
8155 	VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
8156 	VERIFY(dp == systemdomain);
8157 
8158 	for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++) {
8159 		net_add_proto(pr, dp, 1);
8160 	}
8161 }
8162 
8163 static int
kev_attach(struct socket * so,__unused int proto,__unused struct proc * p)8164 kev_attach(struct socket *so, __unused int proto, __unused struct proc *p)
8165 {
8166 	int error = 0;
8167 	struct kern_event_pcb *ev_pcb;
8168 
8169 	error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
8170 	if (error != 0) {
8171 		return error;
8172 	}
8173 
8174 	ev_pcb = zalloc_flags(ev_pcb_zone, Z_WAITOK | Z_ZERO);
8175 	lck_mtx_init(&ev_pcb->evp_mtx, &kev_lck_grp, LCK_ATTR_NULL);
8176 
8177 	ev_pcb->evp_socket = so;
8178 	ev_pcb->evp_vendor_code_filter = 0xffffffff;
8179 
8180 	so->so_pcb = (caddr_t) ev_pcb;
8181 	lck_rw_lock_exclusive(&kev_rwlock);
8182 	LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link);
8183 	kevtstat.kes_pcbcount++;
8184 	kevtstat.kes_gencnt++;
8185 	lck_rw_done(&kev_rwlock);
8186 
8187 	return error;
8188 }
8189 
8190 static void
kev_delete(struct kern_event_pcb * ev_pcb)8191 kev_delete(struct kern_event_pcb *ev_pcb)
8192 {
8193 	VERIFY(ev_pcb != NULL);
8194 	lck_mtx_destroy(&ev_pcb->evp_mtx, &kev_lck_grp);
8195 	zfree(ev_pcb_zone, ev_pcb);
8196 }
8197 
8198 static int
kev_detach(struct socket * so)8199 kev_detach(struct socket *so)
8200 {
8201 	struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8202 
8203 	if (ev_pcb != NULL) {
8204 		soisdisconnected(so);
8205 		so->so_flags |= SOF_PCBCLEARING;
8206 	}
8207 
8208 	return 0;
8209 }
8210 
8211 /*
8212  * For now, kev_vendor_code and mbuf_tags use the same
8213  * mechanism.
8214  */
8215 errno_t
kev_vendor_code_find(const char * string,u_int32_t * out_vendor_code)8216 kev_vendor_code_find(
8217 	const char      *string,
8218 	u_int32_t       *out_vendor_code)
8219 {
8220 	if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
8221 		return EINVAL;
8222 	}
8223 	return net_str_id_find_internal(string, out_vendor_code,
8224 	           NSI_VENDOR_CODE, 1);
8225 }
8226 
8227 errno_t
kev_msg_post(struct kev_msg * event_msg)8228 kev_msg_post(struct kev_msg *event_msg)
8229 {
8230 	mbuf_tag_id_t min_vendor, max_vendor;
8231 
8232 	net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
8233 
8234 	if (event_msg == NULL) {
8235 		return EINVAL;
8236 	}
8237 
8238 	/*
8239 	 * Limit third parties to posting events for registered vendor codes
8240 	 * only
8241 	 */
8242 	if (event_msg->vendor_code < min_vendor ||
8243 	    event_msg->vendor_code > max_vendor) {
8244 		os_atomic_inc(&kevtstat.kes_badvendor, relaxed);
8245 		return EINVAL;
8246 	}
8247 	return kev_post_msg(event_msg);
8248 }
8249 
8250 static int
kev_post_msg_internal(struct kev_msg * event_msg,int wait)8251 kev_post_msg_internal(struct kev_msg *event_msg, int wait)
8252 {
8253 	struct mbuf *m, *m2;
8254 	struct kern_event_pcb *ev_pcb;
8255 	struct kern_event_msg *ev;
8256 	char *tmp;
8257 	u_int32_t total_size;
8258 	int i;
8259 
8260 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
8261 	/*
8262 	 * Special hook for ALF state updates
8263 	 */
8264 	if (event_msg->vendor_code == KEV_VENDOR_APPLE &&
8265 	    event_msg->kev_class == KEV_NKE_CLASS &&
8266 	    event_msg->kev_subclass == KEV_NKE_ALF_SUBCLASS &&
8267 	    event_msg->event_code == KEV_NKE_ALF_STATE_CHANGED) {
8268 #if (DEBUG || DEVELOPMENT)
8269 		os_log_info(OS_LOG_DEFAULT, "KEV_NKE_ALF_STATE_CHANGED posted");
8270 #endif /* DEBUG || DEVELOPMENT */
8271 		net_filter_event_mark(NET_FILTER_EVENT_ALF,
8272 		    net_check_compatible_alf());
8273 	}
8274 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
8275 
8276 	/* Verify the message is small enough to fit in one mbuf w/o cluster */
8277 	total_size = KEV_MSG_HEADER_SIZE;
8278 
8279 	for (i = 0; i < 5; i++) {
8280 		if (event_msg->dv[i].data_length == 0) {
8281 			break;
8282 		}
8283 		total_size += event_msg->dv[i].data_length;
8284 	}
8285 
8286 	if (total_size > MLEN) {
8287 		os_atomic_inc(&kevtstat.kes_toobig, relaxed);
8288 		return EMSGSIZE;
8289 	}
8290 
8291 	m = m_get(wait, MT_DATA);
8292 	if (m == 0) {
8293 		os_atomic_inc(&kevtstat.kes_nomem, relaxed);
8294 		return ENOMEM;
8295 	}
8296 	ev = mtod(m, struct kern_event_msg *);
8297 	total_size = KEV_MSG_HEADER_SIZE;
8298 
8299 	tmp = (char *) &ev->event_data[0];
8300 	for (i = 0; i < 5; i++) {
8301 		if (event_msg->dv[i].data_length == 0) {
8302 			break;
8303 		}
8304 
8305 		total_size += event_msg->dv[i].data_length;
8306 		bcopy(event_msg->dv[i].data_ptr, tmp,
8307 		    event_msg->dv[i].data_length);
8308 		tmp += event_msg->dv[i].data_length;
8309 	}
8310 
8311 	ev->id = ++static_event_id;
8312 	ev->total_size   = total_size;
8313 	ev->vendor_code  = event_msg->vendor_code;
8314 	ev->kev_class    = event_msg->kev_class;
8315 	ev->kev_subclass = event_msg->kev_subclass;
8316 	ev->event_code   = event_msg->event_code;
8317 
8318 	m->m_len = total_size;
8319 	lck_rw_lock_shared(&kev_rwlock);
8320 	for (ev_pcb = LIST_FIRST(&kern_event_head);
8321 	    ev_pcb;
8322 	    ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
8323 		lck_mtx_lock(&ev_pcb->evp_mtx);
8324 		if (ev_pcb->evp_socket->so_pcb == NULL) {
8325 			lck_mtx_unlock(&ev_pcb->evp_mtx);
8326 			continue;
8327 		}
8328 		if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) {
8329 			if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) {
8330 				lck_mtx_unlock(&ev_pcb->evp_mtx);
8331 				continue;
8332 			}
8333 
8334 			if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) {
8335 				if (ev_pcb->evp_class_filter != ev->kev_class) {
8336 					lck_mtx_unlock(&ev_pcb->evp_mtx);
8337 					continue;
8338 				}
8339 
8340 				if ((ev_pcb->evp_subclass_filter !=
8341 				    KEV_ANY_SUBCLASS) &&
8342 				    (ev_pcb->evp_subclass_filter !=
8343 				    ev->kev_subclass)) {
8344 					lck_mtx_unlock(&ev_pcb->evp_mtx);
8345 					continue;
8346 				}
8347 			}
8348 		}
8349 
8350 		m2 = m_copym(m, 0, m->m_len, wait);
8351 		if (m2 == 0) {
8352 			os_atomic_inc(&kevtstat.kes_nomem, relaxed);
8353 			m_free(m);
8354 			lck_mtx_unlock(&ev_pcb->evp_mtx);
8355 			lck_rw_done(&kev_rwlock);
8356 			return ENOMEM;
8357 		}
8358 		if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) {
8359 			/*
8360 			 * We use "m" for the socket stats as it would be
8361 			 * unsafe to use "m2"
8362 			 */
8363 			so_inc_recv_data_stat(ev_pcb->evp_socket,
8364 			    1, m->m_len, MBUF_TC_BE);
8365 
8366 			sorwakeup(ev_pcb->evp_socket);
8367 			os_atomic_inc(&kevtstat.kes_posted, relaxed);
8368 		} else {
8369 			os_atomic_inc(&kevtstat.kes_fullsock, relaxed);
8370 		}
8371 		lck_mtx_unlock(&ev_pcb->evp_mtx);
8372 	}
8373 	m_free(m);
8374 	lck_rw_done(&kev_rwlock);
8375 
8376 	return 0;
8377 }
8378 
8379 int
kev_post_msg(struct kev_msg * event_msg)8380 kev_post_msg(struct kev_msg *event_msg)
8381 {
8382 	return kev_post_msg_internal(event_msg, M_WAIT);
8383 }
8384 
8385 int
kev_post_msg_nowait(struct kev_msg * event_msg)8386 kev_post_msg_nowait(struct kev_msg *event_msg)
8387 {
8388 	return kev_post_msg_internal(event_msg, M_NOWAIT);
8389 }
8390 
8391 static int
kev_control(struct socket * so,u_long cmd,caddr_t data,__unused struct ifnet * ifp,__unused struct proc * p)8392 kev_control(struct socket *so,
8393     u_long cmd,
8394     caddr_t data,
8395     __unused struct ifnet *ifp,
8396     __unused struct proc *p)
8397 {
8398 	struct kev_request *kev_req = (struct kev_request *) data;
8399 	struct kern_event_pcb  *ev_pcb;
8400 	struct kev_vendor_code *kev_vendor;
8401 	u_int32_t  *id_value = (u_int32_t *) data;
8402 
8403 	switch (cmd) {
8404 	case SIOCGKEVID:
8405 		*id_value = static_event_id;
8406 		break;
8407 	case SIOCSKEVFILT:
8408 		ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8409 		ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
8410 		ev_pcb->evp_class_filter = kev_req->kev_class;
8411 		ev_pcb->evp_subclass_filter  = kev_req->kev_subclass;
8412 		break;
8413 	case SIOCGKEVFILT:
8414 		ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8415 		kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
8416 		kev_req->kev_class   = ev_pcb->evp_class_filter;
8417 		kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
8418 		break;
8419 	case SIOCGKEVVENDOR:
8420 		kev_vendor = (struct kev_vendor_code *)data;
8421 		/* Make sure string is NULL terminated */
8422 		kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN - 1] = 0;
8423 		return net_str_id_find_internal(kev_vendor->vendor_string,
8424 		           &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0);
8425 	default:
8426 		return ENOTSUP;
8427 	}
8428 
8429 	return 0;
8430 }
8431 
8432 int
8433 kevt_getstat SYSCTL_HANDLER_ARGS
8434 {
8435 #pragma unused(oidp, arg1, arg2)
8436 	int error = 0;
8437 
8438 	lck_rw_lock_shared(&kev_rwlock);
8439 
8440 	if (req->newptr != USER_ADDR_NULL) {
8441 		error = EPERM;
8442 		goto done;
8443 	}
8444 	if (req->oldptr == USER_ADDR_NULL) {
8445 		req->oldidx = sizeof(struct kevtstat);
8446 		goto done;
8447 	}
8448 
8449 	error = SYSCTL_OUT(req, &kevtstat,
8450 	    MIN(sizeof(struct kevtstat), req->oldlen));
8451 done:
8452 	lck_rw_done(&kev_rwlock);
8453 
8454 	return error;
8455 }
8456 
8457 __private_extern__ int
8458 kevt_pcblist SYSCTL_HANDLER_ARGS
8459 {
8460 #pragma unused(oidp, arg1, arg2)
8461 	int error = 0;
8462 	uint64_t n, i;
8463 	struct xsystmgen xsg;
8464 	void *buf = NULL;
8465 	size_t item_size = ROUNDUP64(sizeof(struct xkevtpcb)) +
8466 	    ROUNDUP64(sizeof(struct xsocket_n)) +
8467 	    2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
8468 	    ROUNDUP64(sizeof(struct xsockstat_n));
8469 	struct kern_event_pcb  *ev_pcb;
8470 
8471 	buf = kalloc_data(item_size, Z_WAITOK | Z_ZERO);
8472 	if (buf == NULL) {
8473 		return ENOMEM;
8474 	}
8475 
8476 	lck_rw_lock_shared(&kev_rwlock);
8477 
8478 	n = kevtstat.kes_pcbcount;
8479 
8480 	if (req->oldptr == USER_ADDR_NULL) {
8481 		req->oldidx = (size_t) ((n + n / 8) * item_size);
8482 		goto done;
8483 	}
8484 	if (req->newptr != USER_ADDR_NULL) {
8485 		error = EPERM;
8486 		goto done;
8487 	}
8488 	bzero(&xsg, sizeof(xsg));
8489 	xsg.xg_len = sizeof(xsg);
8490 	xsg.xg_count = n;
8491 	xsg.xg_gen = kevtstat.kes_gencnt;
8492 	xsg.xg_sogen = so_gencnt;
8493 	error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
8494 	if (error) {
8495 		goto done;
8496 	}
8497 	/*
8498 	 * We are done if there is no pcb
8499 	 */
8500 	if (n == 0) {
8501 		goto done;
8502 	}
8503 
8504 	i = 0;
8505 	for (i = 0, ev_pcb = LIST_FIRST(&kern_event_head);
8506 	    i < n && ev_pcb != NULL;
8507 	    i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
8508 		struct xkevtpcb *xk = (struct xkevtpcb *)buf;
8509 		struct xsocket_n *xso = (struct xsocket_n *)
8510 		    ADVANCE64(xk, sizeof(*xk));
8511 		struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
8512 		    ADVANCE64(xso, sizeof(*xso));
8513 		struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
8514 		    ADVANCE64(xsbrcv, sizeof(*xsbrcv));
8515 		struct xsockstat_n *xsostats = (struct xsockstat_n *)
8516 		    ADVANCE64(xsbsnd, sizeof(*xsbsnd));
8517 
8518 		bzero(buf, item_size);
8519 
8520 		lck_mtx_lock(&ev_pcb->evp_mtx);
8521 
8522 		xk->kep_len = sizeof(struct xkevtpcb);
8523 		xk->kep_kind = XSO_EVT;
8524 		xk->kep_evtpcb = (uint64_t)VM_KERNEL_ADDRPERM(ev_pcb);
8525 		xk->kep_vendor_code_filter = ev_pcb->evp_vendor_code_filter;
8526 		xk->kep_class_filter = ev_pcb->evp_class_filter;
8527 		xk->kep_subclass_filter = ev_pcb->evp_subclass_filter;
8528 
8529 		sotoxsocket_n(ev_pcb->evp_socket, xso);
8530 		sbtoxsockbuf_n(ev_pcb->evp_socket ?
8531 		    &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
8532 		sbtoxsockbuf_n(ev_pcb->evp_socket ?
8533 		    &ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
8534 		sbtoxsockstat_n(ev_pcb->evp_socket, xsostats);
8535 
8536 		lck_mtx_unlock(&ev_pcb->evp_mtx);
8537 
8538 		error = SYSCTL_OUT(req, buf, item_size);
8539 	}
8540 
8541 	if (error == 0) {
8542 		/*
8543 		 * Give the user an updated idea of our state.
8544 		 * If the generation differs from what we told
8545 		 * her before, she knows that something happened
8546 		 * while we were processing this request, and it
8547 		 * might be necessary to retry.
8548 		 */
8549 		bzero(&xsg, sizeof(xsg));
8550 		xsg.xg_len = sizeof(xsg);
8551 		xsg.xg_count = n;
8552 		xsg.xg_gen = kevtstat.kes_gencnt;
8553 		xsg.xg_sogen = so_gencnt;
8554 		error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
8555 		if (error) {
8556 			goto done;
8557 		}
8558 	}
8559 
8560 done:
8561 	lck_rw_done(&kev_rwlock);
8562 
8563 	kfree_data(buf, item_size);
8564 	return error;
8565 }
8566 
8567 #endif /* SOCKETS */
8568 
8569 
8570 int
fill_kqueueinfo(kqueue_t kqu,struct kqueue_info * kinfo)8571 fill_kqueueinfo(kqueue_t kqu, struct kqueue_info * kinfo)
8572 {
8573 	struct vinfo_stat * st;
8574 
8575 	st = &kinfo->kq_stat;
8576 
8577 	st->vst_size = kqu.kq->kq_count;
8578 	if (kqu.kq->kq_state & KQ_KEV_QOS) {
8579 		st->vst_blksize = sizeof(struct kevent_qos_s);
8580 	} else if (kqu.kq->kq_state & KQ_KEV64) {
8581 		st->vst_blksize = sizeof(struct kevent64_s);
8582 	} else {
8583 		st->vst_blksize = sizeof(struct kevent);
8584 	}
8585 	st->vst_mode = S_IFIFO;
8586 	st->vst_ino = (kqu.kq->kq_state & KQ_DYNAMIC) ?
8587 	    kqu.kqwl->kqwl_dynamicid : 0;
8588 
8589 	/* flags exported to libproc as PROC_KQUEUE_* (sys/proc_info.h) */
8590 #define PROC_KQUEUE_MASK (KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS|KQ_WORKQ|KQ_WORKLOOP)
8591 	static_assert(PROC_KQUEUE_SLEEP == KQ_SLEEP);
8592 	static_assert(PROC_KQUEUE_32 == KQ_KEV32);
8593 	static_assert(PROC_KQUEUE_64 == KQ_KEV64);
8594 	static_assert(PROC_KQUEUE_QOS == KQ_KEV_QOS);
8595 	static_assert(PROC_KQUEUE_WORKQ == KQ_WORKQ);
8596 	static_assert(PROC_KQUEUE_WORKLOOP == KQ_WORKLOOP);
8597 	kinfo->kq_state = kqu.kq->kq_state & PROC_KQUEUE_MASK;
8598 	if ((kqu.kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0) {
8599 		if (kqu.kqf->kqf_sel.si_flags & SI_RECORDED) {
8600 			kinfo->kq_state |= PROC_KQUEUE_SELECT;
8601 		}
8602 	}
8603 
8604 	return 0;
8605 }
8606 
8607 static int
fill_kqueue_dyninfo(struct kqworkloop * kqwl,struct kqueue_dyninfo * kqdi)8608 fill_kqueue_dyninfo(struct kqworkloop *kqwl, struct kqueue_dyninfo *kqdi)
8609 {
8610 	workq_threadreq_t kqr = &kqwl->kqwl_request;
8611 	workq_threadreq_param_t trp = {};
8612 	int err;
8613 
8614 	if ((kqwl->kqwl_state & KQ_WORKLOOP) == 0) {
8615 		return EINVAL;
8616 	}
8617 
8618 	if ((err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi->kqdi_info))) {
8619 		return err;
8620 	}
8621 
8622 	kqlock(kqwl);
8623 
8624 	kqdi->kqdi_servicer = thread_tid(kqr_thread(kqr));
8625 	kqdi->kqdi_owner = thread_tid(kqwl->kqwl_owner);
8626 	kqdi->kqdi_request_state = kqr->tr_state;
8627 	kqdi->kqdi_async_qos = kqr->tr_kq_qos_index;
8628 	kqdi->kqdi_events_qos = kqr->tr_kq_override_index;
8629 	kqdi->kqdi_sync_waiters = 0;
8630 	kqdi->kqdi_sync_waiter_qos = 0;
8631 
8632 	trp.trp_value = kqwl->kqwl_params;
8633 	if (trp.trp_flags & TRP_PRIORITY) {
8634 		kqdi->kqdi_pri = trp.trp_pri;
8635 	} else {
8636 		kqdi->kqdi_pri = 0;
8637 	}
8638 
8639 	if (trp.trp_flags & TRP_POLICY) {
8640 		kqdi->kqdi_pol = trp.trp_pol;
8641 	} else {
8642 		kqdi->kqdi_pol = 0;
8643 	}
8644 
8645 	if (trp.trp_flags & TRP_CPUPERCENT) {
8646 		kqdi->kqdi_cpupercent = trp.trp_cpupercent;
8647 	} else {
8648 		kqdi->kqdi_cpupercent = 0;
8649 	}
8650 
8651 	kqunlock(kqwl);
8652 
8653 	return 0;
8654 }
8655 
8656 
8657 static unsigned long
kevent_extinfo_emit(struct kqueue * kq,struct knote * kn,struct kevent_extinfo * buf,unsigned long buflen,unsigned long nknotes)8658 kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf,
8659     unsigned long buflen, unsigned long nknotes)
8660 {
8661 	for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
8662 		if (kq == knote_get_kq(kn)) {
8663 			if (nknotes < buflen) {
8664 				struct kevent_extinfo *info = &buf[nknotes];
8665 
8666 				kqlock(kq);
8667 
8668 				info->kqext_kev         = *(struct kevent_qos_s *)&kn->kn_kevent;
8669 				if (knote_has_qos(kn)) {
8670 					info->kqext_kev.qos =
8671 					    _pthread_priority_thread_qos_fast(kn->kn_qos);
8672 				} else {
8673 					info->kqext_kev.qos = kn->kn_qos_override;
8674 				}
8675 				info->kqext_kev.filter |= 0xff00; /* sign extend filter */
8676 				info->kqext_kev.xflags  = 0; /* this is where sfflags lives */
8677 				info->kqext_kev.data    = 0; /* this is where sdata lives */
8678 				info->kqext_sdata       = kn->kn_sdata;
8679 				info->kqext_status      = kn->kn_status;
8680 				info->kqext_sfflags     = kn->kn_sfflags;
8681 
8682 				kqunlock(kq);
8683 			}
8684 
8685 			/* we return total number of knotes, which may be more than requested */
8686 			nknotes++;
8687 		}
8688 	}
8689 
8690 	return nknotes;
8691 }
8692 
8693 int
kevent_copyout_proc_dynkqids(void * proc,user_addr_t ubuf,uint32_t ubufsize,int32_t * nkqueues_out)8694 kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize,
8695     int32_t *nkqueues_out)
8696 {
8697 	proc_t p = (proc_t)proc;
8698 	struct filedesc *fdp = &p->p_fd;
8699 	unsigned int nkqueues = 0;
8700 	unsigned long ubuflen = ubufsize / sizeof(kqueue_id_t);
8701 	size_t buflen, bufsize;
8702 	kqueue_id_t *kq_ids = NULL;
8703 	int err = 0;
8704 
8705 	assert(p != NULL);
8706 
8707 	if (ubuf == USER_ADDR_NULL && ubufsize != 0) {
8708 		err = EINVAL;
8709 		goto out;
8710 	}
8711 
8712 	buflen = MIN(ubuflen, PROC_PIDDYNKQUEUES_MAX);
8713 
8714 	if (ubuflen != 0) {
8715 		if (os_mul_overflow(sizeof(kqueue_id_t), buflen, &bufsize)) {
8716 			err = ERANGE;
8717 			goto out;
8718 		}
8719 		kq_ids = (kqueue_id_t *)kalloc_data(bufsize, Z_WAITOK | Z_ZERO);
8720 		if (!kq_ids) {
8721 			err = ENOMEM;
8722 			goto out;
8723 		}
8724 	}
8725 
8726 	kqhash_lock(fdp);
8727 
8728 	if (fdp->fd_kqhashmask > 0) {
8729 		for (uint32_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
8730 			struct kqworkloop *kqwl;
8731 
8732 			LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
8733 				/* report the number of kqueues, even if they don't all fit */
8734 				if (nkqueues < buflen) {
8735 					kq_ids[nkqueues] = kqwl->kqwl_dynamicid;
8736 				}
8737 				nkqueues++;
8738 			}
8739 		}
8740 	}
8741 
8742 	kqhash_unlock(fdp);
8743 
8744 	if (kq_ids) {
8745 		size_t copysize;
8746 		if (os_mul_overflow(sizeof(kqueue_id_t), MIN(buflen, nkqueues), &copysize)) {
8747 			err = ERANGE;
8748 			goto out;
8749 		}
8750 
8751 		assert(ubufsize >= copysize);
8752 		err = copyout(kq_ids, ubuf, copysize);
8753 	}
8754 
8755 out:
8756 	if (kq_ids) {
8757 		kfree_data(kq_ids, bufsize);
8758 	}
8759 
8760 	if (!err) {
8761 		*nkqueues_out = (int)min(nkqueues, PROC_PIDDYNKQUEUES_MAX);
8762 	}
8763 	return err;
8764 }
8765 
8766 int
kevent_copyout_dynkqinfo(void * proc,kqueue_id_t kq_id,user_addr_t ubuf,uint32_t ubufsize,int32_t * size_out)8767 kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
8768     uint32_t ubufsize, int32_t *size_out)
8769 {
8770 	proc_t p = (proc_t)proc;
8771 	struct kqworkloop *kqwl;
8772 	int err = 0;
8773 	struct kqueue_dyninfo kqdi = { };
8774 
8775 	assert(p != NULL);
8776 
8777 	if (ubufsize < sizeof(struct kqueue_info)) {
8778 		return ENOBUFS;
8779 	}
8780 
8781 	kqwl = kqworkloop_hash_lookup_and_retain(&p->p_fd, kq_id);
8782 	if (!kqwl) {
8783 		return ESRCH;
8784 	}
8785 
8786 	/*
8787 	 * backward compatibility: allow the argument to this call to only be
8788 	 * a struct kqueue_info
8789 	 */
8790 	if (ubufsize >= sizeof(struct kqueue_dyninfo)) {
8791 		ubufsize = sizeof(struct kqueue_dyninfo);
8792 		err = fill_kqueue_dyninfo(kqwl, &kqdi);
8793 	} else {
8794 		ubufsize = sizeof(struct kqueue_info);
8795 		err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi.kqdi_info);
8796 	}
8797 	if (err == 0 && (err = copyout(&kqdi, ubuf, ubufsize)) == 0) {
8798 		*size_out = ubufsize;
8799 	}
8800 	kqworkloop_release(kqwl);
8801 	return err;
8802 }
8803 
8804 int
kevent_copyout_dynkqextinfo(void * proc,kqueue_id_t kq_id,user_addr_t ubuf,uint32_t ubufsize,int32_t * nknotes_out)8805 kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
8806     uint32_t ubufsize, int32_t *nknotes_out)
8807 {
8808 	proc_t p = (proc_t)proc;
8809 	struct kqworkloop *kqwl;
8810 	int err;
8811 
8812 	kqwl = kqworkloop_hash_lookup_and_retain(&p->p_fd, kq_id);
8813 	if (!kqwl) {
8814 		return ESRCH;
8815 	}
8816 
8817 	err = pid_kqueue_extinfo(p, &kqwl->kqwl_kqueue, ubuf, ubufsize, nknotes_out);
8818 	kqworkloop_release(kqwl);
8819 	return err;
8820 }
8821 
8822 int
pid_kqueue_extinfo(proc_t p,struct kqueue * kq,user_addr_t ubuf,uint32_t bufsize,int32_t * retval)8823 pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
8824     uint32_t bufsize, int32_t *retval)
8825 {
8826 	struct knote *kn;
8827 	int i;
8828 	int err = 0;
8829 	struct filedesc *fdp = &p->p_fd;
8830 	unsigned long nknotes = 0;
8831 	unsigned long buflen = bufsize / sizeof(struct kevent_extinfo);
8832 	struct kevent_extinfo *kqext = NULL;
8833 
8834 	/* arbitrary upper limit to cap kernel memory usage, copyout size, etc. */
8835 	buflen = MIN(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX);
8836 
8837 	kqext = (struct kevent_extinfo *)kalloc_data(buflen * sizeof(struct kevent_extinfo), Z_WAITOK | Z_ZERO);
8838 	if (kqext == NULL) {
8839 		err = ENOMEM;
8840 		goto out;
8841 	}
8842 
8843 	proc_fdlock(p);
8844 	for (i = 0; i < fdp->fd_knlistsize; i++) {
8845 		kn = SLIST_FIRST(&fdp->fd_knlist[i]);
8846 		nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
8847 	}
8848 	proc_fdunlock(p);
8849 
8850 	if (fdp->fd_knhashmask != 0) {
8851 		for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
8852 			knhash_lock(fdp);
8853 			kn = SLIST_FIRST(&fdp->fd_knhash[i]);
8854 			nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
8855 			knhash_unlock(fdp);
8856 		}
8857 	}
8858 
8859 	assert(bufsize >= sizeof(struct kevent_extinfo) * MIN(buflen, nknotes));
8860 	err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * MIN(buflen, nknotes));
8861 
8862 out:
8863 	kfree_data(kqext, buflen * sizeof(struct kevent_extinfo));
8864 
8865 	if (!err) {
8866 		*retval = (int32_t)MIN(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX);
8867 	}
8868 	return err;
8869 }
8870 
8871 static unsigned int
klist_copy_udata(struct klist * list,uint64_t * buf,unsigned int buflen,unsigned int nknotes)8872 klist_copy_udata(struct klist *list, uint64_t *buf,
8873     unsigned int buflen, unsigned int nknotes)
8874 {
8875 	struct knote *kn;
8876 	SLIST_FOREACH(kn, list, kn_link) {
8877 		if (nknotes < buflen) {
8878 			/*
8879 			 * kevent_register will always set kn_udata atomically
8880 			 * so that we don't have to take any kqlock here.
8881 			 */
8882 			buf[nknotes] = os_atomic_load_wide(&kn->kn_udata, relaxed);
8883 		}
8884 		/* we return total number of knotes, which may be more than requested */
8885 		nknotes++;
8886 	}
8887 
8888 	return nknotes;
8889 }
8890 
8891 int
kevent_proc_copy_uptrs(void * proc,uint64_t * buf,uint32_t bufsize)8892 kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize)
8893 {
8894 	proc_t p = (proc_t)proc;
8895 	struct filedesc *fdp = &p->p_fd;
8896 	unsigned int nuptrs = 0;
8897 	unsigned int buflen = bufsize / sizeof(uint64_t);
8898 	struct kqworkloop *kqwl;
8899 
8900 	if (buflen > 0) {
8901 		assert(buf != NULL);
8902 	}
8903 
8904 	proc_fdlock(p);
8905 	for (int i = 0; i < fdp->fd_knlistsize; i++) {
8906 		nuptrs = klist_copy_udata(&fdp->fd_knlist[i], buf, buflen, nuptrs);
8907 	}
8908 	proc_fdunlock(p);
8909 
8910 	knhash_lock(fdp);
8911 	if (fdp->fd_knhashmask != 0) {
8912 		for (size_t i = 0; i < fdp->fd_knhashmask + 1; i++) {
8913 			nuptrs = klist_copy_udata(&fdp->fd_knhash[i], buf, buflen, nuptrs);
8914 		}
8915 	}
8916 	knhash_unlock(fdp);
8917 
8918 	kqhash_lock(fdp);
8919 	if (fdp->fd_kqhashmask != 0) {
8920 		for (size_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
8921 			LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
8922 				if (nuptrs < buflen) {
8923 					buf[nuptrs] = kqwl->kqwl_dynamicid;
8924 				}
8925 				nuptrs++;
8926 			}
8927 		}
8928 	}
8929 	kqhash_unlock(fdp);
8930 
8931 	return (int)nuptrs;
8932 }
8933 
8934 static void
kevent_set_return_to_kernel_user_tsd(proc_t p,thread_t thread)8935 kevent_set_return_to_kernel_user_tsd(proc_t p, thread_t thread)
8936 {
8937 	uint64_t ast_addr;
8938 	bool proc_is_64bit = !!(p->p_flag & P_LP64);
8939 	size_t user_addr_size = proc_is_64bit ? 8 : 4;
8940 	uint32_t ast_flags32 = 0;
8941 	uint64_t ast_flags64 = 0;
8942 	struct uthread *ut = get_bsdthread_info(thread);
8943 
8944 	if (ut->uu_kqr_bound != NULL) {
8945 		ast_flags64 |= R2K_WORKLOOP_PENDING_EVENTS;
8946 	}
8947 
8948 	if (ast_flags64 == 0) {
8949 		return;
8950 	}
8951 
8952 	if (!(p->p_flag & P_LP64)) {
8953 		ast_flags32 = (uint32_t)ast_flags64;
8954 		assert(ast_flags64 < 0x100000000ull);
8955 	}
8956 
8957 	ast_addr = thread_rettokern_addr(thread);
8958 	if (ast_addr == 0) {
8959 		return;
8960 	}
8961 
8962 	if (copyout((proc_is_64bit ? (void *)&ast_flags64 : (void *)&ast_flags32),
8963 	    (user_addr_t)ast_addr,
8964 	    user_addr_size) != 0) {
8965 		printf("pid %d (tid:%llu): copyout of return_to_kernel ast flags failed with "
8966 		    "ast_addr = %llu\n", proc_getpid(p), thread_tid(current_thread()), ast_addr);
8967 	}
8968 }
8969 
8970 /*
8971  * Semantics of writing to TSD value:
8972  *
8973  * 1. It is written to by the kernel and cleared by userspace.
8974  * 2. When the userspace code clears the TSD field, it takes responsibility for
8975  * taking action on the quantum expiry action conveyed by kernel.
8976  * 3. The TSD value is always cleared upon entry into userspace and upon exit of
8977  * userspace back to kernel to make sure that it is never leaked across thread
8978  * requests.
8979  */
8980 void
kevent_set_workq_quantum_expiry_user_tsd(proc_t p,thread_t thread,uint64_t flags)8981 kevent_set_workq_quantum_expiry_user_tsd(proc_t p, thread_t thread,
8982     uint64_t flags)
8983 {
8984 	uint64_t ast_addr;
8985 	bool proc_is_64bit = !!(p->p_flag & P_LP64);
8986 	uint32_t ast_flags32 = 0;
8987 	uint64_t ast_flags64 = flags;
8988 
8989 	if (ast_flags64 == 0) {
8990 		return;
8991 	}
8992 
8993 	if (!(p->p_flag & P_LP64)) {
8994 		ast_flags32 = (uint32_t)ast_flags64;
8995 		assert(ast_flags64 < 0x100000000ull);
8996 	}
8997 
8998 	ast_addr = thread_wqquantum_addr(thread);
8999 	assert(ast_addr != 0);
9000 
9001 	if (proc_is_64bit) {
9002 		if (copyout_atomic64(ast_flags64, (user_addr_t) ast_addr)) {
9003 #if DEBUG || DEVELOPMENT
9004 			printf("pid %d (tid:%llu): copyout of workq quantum ast flags failed with "
9005 			    "ast_addr = %llu\n", proc_getpid(p), thread_tid(thread), ast_addr);
9006 #endif
9007 		}
9008 	} else {
9009 		if (copyout_atomic32(ast_flags32, (user_addr_t) ast_addr)) {
9010 #if DEBUG || DEVELOPMENT
9011 			printf("pid %d (tid:%llu): copyout of workq quantum ast flags failed with "
9012 			    "ast_addr = %llu\n", proc_getpid(p), thread_tid(thread), ast_addr);
9013 #endif
9014 		}
9015 	}
9016 }
9017 
9018 void
kevent_ast(thread_t thread,uint16_t bits)9019 kevent_ast(thread_t thread, uint16_t bits)
9020 {
9021 	proc_t p = current_proc();
9022 
9023 
9024 	if (bits & AST_KEVENT_REDRIVE_THREADREQ) {
9025 		workq_kern_threadreq_redrive(p, WORKQ_THREADREQ_CAN_CREATE_THREADS);
9026 	}
9027 	if (bits & AST_KEVENT_RETURN_TO_KERNEL) {
9028 		kevent_set_return_to_kernel_user_tsd(p, thread);
9029 	}
9030 
9031 	if (bits & AST_KEVENT_WORKQ_QUANTUM_EXPIRED) {
9032 		workq_kern_quantum_expiry_reevaluate(p, thread);
9033 	}
9034 }
9035 
9036 #if DEVELOPMENT || DEBUG
9037 
9038 #define KEVENT_SYSCTL_BOUND_ID 1
9039 
9040 static int
9041 kevent_sysctl SYSCTL_HANDLER_ARGS
9042 {
9043 #pragma unused(oidp, arg2)
9044 	uintptr_t type = (uintptr_t)arg1;
9045 	uint64_t bound_id = 0;
9046 
9047 	if (type != KEVENT_SYSCTL_BOUND_ID) {
9048 		return EINVAL;
9049 	}
9050 
9051 	if (req->newptr) {
9052 		return EINVAL;
9053 	}
9054 
9055 	struct uthread *ut = current_uthread();
9056 	if (!ut) {
9057 		return EFAULT;
9058 	}
9059 
9060 	workq_threadreq_t kqr = ut->uu_kqr_bound;
9061 	if (kqr) {
9062 		if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
9063 			bound_id = kqr_kqworkloop(kqr)->kqwl_dynamicid;
9064 		} else {
9065 			bound_id = -1;
9066 		}
9067 	}
9068 
9069 	return sysctl_io_number(req, bound_id, sizeof(bound_id), NULL, NULL);
9070 }
9071 
9072 SYSCTL_NODE(_kern, OID_AUTO, kevent, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
9073     "kevent information");
9074 
9075 SYSCTL_PROC(_kern_kevent, OID_AUTO, bound_id,
9076     CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
9077     (void *)KEVENT_SYSCTL_BOUND_ID,
9078     sizeof(kqueue_id_t), kevent_sysctl, "Q",
9079     "get the ID of the bound kqueue");
9080 
9081 #endif /* DEVELOPMENT || DEBUG */
9082