1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 *
28 */
29 /*-
30 * Copyright (c) 1999,2000,2001 Jonathan Lemon <[email protected]>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54 /*
55 * @(#)kern_event.c 1.0 (3/31/2000)
56 */
57 #include <stdint.h>
58 #include <machine/atomic.h>
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/filedesc.h>
63 #include <sys/kernel.h>
64 #include <sys/proc_internal.h>
65 #include <sys/kauth.h>
66 #include <sys/malloc.h>
67 #include <sys/unistd.h>
68 #include <sys/file_internal.h>
69 #include <sys/fcntl.h>
70 #include <sys/select.h>
71 #include <sys/queue.h>
72 #include <sys/event.h>
73 #include <sys/eventvar.h>
74 #include <sys/protosw.h>
75 #include <sys/socket.h>
76 #include <sys/socketvar.h>
77 #include <sys/stat.h>
78 #include <sys/syscall.h> // SYS_* constants
79 #include <sys/sysctl.h>
80 #include <sys/uio.h>
81 #include <sys/sysproto.h>
82 #include <sys/user.h>
83 #include <sys/vnode_internal.h>
84 #include <string.h>
85 #include <sys/proc_info.h>
86 #include <sys/codesign.h>
87 #include <sys/pthread_shims.h>
88 #include <sys/kdebug.h>
89 #include <os/base.h>
90 #include <pexpert/pexpert.h>
91
92 #include <kern/thread_group.h>
93 #include <kern/locks.h>
94 #include <kern/clock.h>
95 #include <kern/cpu_data.h>
96 #include <kern/policy_internal.h>
97 #include <kern/thread_call.h>
98 #include <kern/sched_prim.h>
99 #include <kern/waitq.h>
100 #include <kern/zalloc.h>
101 #include <kern/kalloc.h>
102 #include <kern/assert.h>
103 #include <kern/ast.h>
104 #include <kern/thread.h>
105 #include <kern/kcdata.h>
106
107 #include <pthread/priority_private.h>
108 #include <pthread/workqueue_syscalls.h>
109 #include <pthread/workqueue_internal.h>
110 #include <libkern/libkern.h>
111
112 #include <os/log.h>
113
114 #include "net/net_str_id.h"
115
116 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
117 #include <skywalk/lib/net_filter_event.h>
118
119 extern bool net_check_compatible_alf(void);
120 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
121
122 #include <mach/task.h>
123 #include <libkern/section_keywords.h>
124
125 #if CONFIG_MEMORYSTATUS
126 #include <sys/kern_memorystatus.h>
127 #endif
128
129 #if DEVELOPMENT || DEBUG
130 #define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0)
131 #define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1)
132 TUNABLE(uint32_t, kevent_debug_flags, "kevent_debug", 0);
133 #endif
134
135 static LCK_GRP_DECLARE(kq_lck_grp, "kqueue");
136 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) kn_kq_packing_params =
137 VM_PACKING_PARAMS(KNOTE_KQ_PACKED);
138
139 extern mach_port_name_t ipc_entry_name_mask(mach_port_name_t name); /* osfmk/ipc/ipc_entry.h */
140 extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); /* bsd/kern/kern_sig.c */
141
142 #define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code))
143
144 static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
145 vfs_context_t ctx);
146 static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
147 static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
148 struct kevent_qos_s *kev);
149 static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
150
151 static const struct fileops kqueueops = {
152 .fo_type = DTYPE_KQUEUE,
153 .fo_read = fo_no_read,
154 .fo_write = fo_no_write,
155 .fo_ioctl = fo_no_ioctl,
156 .fo_select = kqueue_select,
157 .fo_close = kqueue_close,
158 .fo_drain = kqueue_drain,
159 .fo_kqfilter = kqueue_kqfilter,
160 };
161
162 static inline int kevent_modern_copyout(struct kevent_qos_s *, user_addr_t *);
163 static int kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int result);
164 static void kevent_register_wait_block(struct turnstile *ts, thread_t handoff_thread,
165 thread_continue_t cont, struct _kevent_register *cont_args) __dead2;
166 static void kevent_register_wait_return(struct _kevent_register *cont_args) __dead2;
167 static void kevent_register_wait_cleanup(struct knote *kn);
168
169 static struct kqtailq *kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn);
170 static void kqueue_threadreq_initiate(struct kqueue *kq, workq_threadreq_t, kq_index_t qos, int flags);
171
172 static void kqworkq_unbind(proc_t p, workq_threadreq_t);
173 static thread_qos_t kqworkq_unbind_locked(struct kqworkq *kqwq, workq_threadreq_t, thread_t thread);
174 static workq_threadreq_t kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
175 static void kqueue_update_iotier_override(kqueue_t kqu);
176
177 static void kqworkloop_unbind(struct kqworkloop *kwql);
178
179 enum kqwl_unbind_locked_mode {
180 KQWL_OVERRIDE_DROP_IMMEDIATELY,
181 KQWL_OVERRIDE_DROP_DELAYED,
182 };
183 static void kqworkloop_unbind_locked(struct kqworkloop *kwql, thread_t thread,
184 enum kqwl_unbind_locked_mode how);
185 static void kqworkloop_unbind_delayed_override_drop(thread_t thread);
186 static kq_index_t kqworkloop_override(struct kqworkloop *kqwl);
187 static void kqworkloop_set_overcommit(struct kqworkloop *kqwl);
188 enum {
189 KQWL_UTQ_NONE,
190 /*
191 * The wakeup qos is the qos of QUEUED knotes.
192 *
193 * This QoS is accounted for with the events override in the
194 * kqr_override_index field. It is raised each time a new knote is queued at
195 * a given QoS. The kqwl_wakeup_qos field is a superset of the non empty
196 * knote buckets and is recomputed after each event delivery.
197 */
198 KQWL_UTQ_UPDATE_WAKEUP_QOS,
199 KQWL_UTQ_RECOMPUTE_WAKEUP_QOS,
200 KQWL_UTQ_UNBINDING, /* attempt to rebind */
201 KQWL_UTQ_PARKING,
202 /*
203 * The wakeup override is for suppressed knotes that have fired again at
204 * a higher QoS than the one for which they are suppressed already.
205 * This override is cleared when the knote suppressed list becomes empty.
206 */
207 KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
208 KQWL_UTQ_RESET_WAKEUP_OVERRIDE,
209 /*
210 * The QoS is the maximum QoS of an event enqueued on this workloop in
211 * userland. It is copied from the only EVFILT_WORKLOOP knote with
212 * a NOTE_WL_THREAD_REQUEST bit set allowed on this workloop. If there is no
213 * such knote, this QoS is 0.
214 */
215 KQWL_UTQ_SET_QOS_INDEX,
216 KQWL_UTQ_REDRIVE_EVENTS,
217 };
218 static void kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos);
219 static int kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags);
220
221 static struct knote *knote_alloc(void);
222 static void knote_free(struct knote *kn);
223 static int kq_add_knote(struct kqueue *kq, struct knote *kn,
224 struct knote_lock_ctx *knlc, struct proc *p);
225 static struct knote *kq_find_knote_and_kq_lock(struct kqueue *kq,
226 struct kevent_qos_s *kev, bool is_fd, struct proc *p);
227
228 static void knote_activate(kqueue_t kqu, struct knote *kn, int result);
229 static void knote_dequeue(kqueue_t kqu, struct knote *kn);
230
231 static void knote_apply_touch(kqueue_t kqu, struct knote *kn,
232 struct kevent_qos_s *kev, int result);
233 static void knote_suppress(kqueue_t kqu, struct knote *kn);
234 static void knote_unsuppress(kqueue_t kqu, struct knote *kn);
235 static void knote_drop(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc);
236
237 // both these functions may dequeue the knote and it is up to the caller
238 // to enqueue the knote back
239 static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result);
240 static void knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp);
241
242 static ZONE_DEFINE(knote_zone, "knote zone",
243 sizeof(struct knote), ZC_CACHING | ZC_ZFREE_CLEARMEM);
244 static ZONE_DEFINE(kqfile_zone, "kqueue file zone",
245 sizeof(struct kqfile), ZC_ZFREE_CLEARMEM | ZC_NOTBITAG);
246 static ZONE_DEFINE(kqworkq_zone, "kqueue workq zone",
247 sizeof(struct kqworkq), ZC_ZFREE_CLEARMEM | ZC_NOTBITAG);
248 static ZONE_DEFINE(kqworkloop_zone, "kqueue workloop zone",
249 sizeof(struct kqworkloop), ZC_CACHING | ZC_ZFREE_CLEARMEM | ZC_NOTBITAG);
250
251 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
252
253 static int filt_no_attach(struct knote *kn, struct kevent_qos_s *kev);
254 static void filt_no_detach(struct knote *kn);
255 static int filt_bad_event(struct knote *kn, long hint);
256 static int filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev);
257 static int filt_bad_process(struct knote *kn, struct kevent_qos_s *kev);
258
259 SECURITY_READ_ONLY_EARLY(static struct filterops) bad_filtops = {
260 .f_attach = filt_no_attach,
261 .f_detach = filt_no_detach,
262 .f_event = filt_bad_event,
263 .f_touch = filt_bad_touch,
264 .f_process = filt_bad_process,
265 };
266
267 #if CONFIG_MEMORYSTATUS
268 extern const struct filterops memorystatus_filtops;
269 #endif /* CONFIG_MEMORYSTATUS */
270 extern const struct filterops fs_filtops;
271 extern const struct filterops sig_filtops;
272 extern const struct filterops machport_filtops;
273 extern const struct filterops pipe_nfiltops;
274 extern const struct filterops pipe_rfiltops;
275 extern const struct filterops pipe_wfiltops;
276 extern const struct filterops ptsd_kqops;
277 extern const struct filterops ptmx_kqops;
278 extern const struct filterops soread_filtops;
279 extern const struct filterops sowrite_filtops;
280 extern const struct filterops sock_filtops;
281 extern const struct filterops soexcept_filtops;
282 extern const struct filterops spec_filtops;
283 extern const struct filterops bpfread_filtops;
284 extern const struct filterops necp_fd_rfiltops;
285 #if SKYWALK
286 extern const struct filterops skywalk_channel_rfiltops;
287 extern const struct filterops skywalk_channel_wfiltops;
288 extern const struct filterops skywalk_channel_efiltops;
289 #endif /* SKYWALK */
290 extern const struct filterops fsevent_filtops;
291 extern const struct filterops vnode_filtops;
292 extern const struct filterops tty_filtops;
293
294 const static struct filterops file_filtops;
295 const static struct filterops kqread_filtops;
296 const static struct filterops proc_filtops;
297 const static struct filterops timer_filtops;
298 const static struct filterops user_filtops;
299 const static struct filterops workloop_filtops;
300
301 /*
302 *
303 * Rules for adding new filters to the system:
304 * Public filters:
305 * - Add a new "EVFILT_" option value to bsd/sys/event.h (typically a negative value)
306 * in the exported section of the header
307 * - Update the EVFILT_SYSCOUNT value to reflect the new addition
308 * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end
309 * of the Public Filters section in the array.
310 * Private filters:
311 * - Add a new "EVFILT_" value to bsd/sys/event.h (typically a positive value)
312 * in the XNU_KERNEL_PRIVATE section of the header
313 * - Update the EVFILTID_MAX value to reflect the new addition
314 * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of
315 * the Private filters section of the array.
316 */
317 static_assert(EVFILTID_MAX < UINT8_MAX, "kn_filtid expects this to be true");
318 static const struct filterops * const sysfilt_ops[EVFILTID_MAX] = {
319 /* Public Filters */
320 [~EVFILT_READ] = &file_filtops,
321 [~EVFILT_WRITE] = &file_filtops,
322 [~EVFILT_AIO] = &bad_filtops,
323 [~EVFILT_VNODE] = &file_filtops,
324 [~EVFILT_PROC] = &proc_filtops,
325 [~EVFILT_SIGNAL] = &sig_filtops,
326 [~EVFILT_TIMER] = &timer_filtops,
327 [~EVFILT_MACHPORT] = &machport_filtops,
328 [~EVFILT_FS] = &fs_filtops,
329 [~EVFILT_USER] = &user_filtops,
330 [~EVFILT_UNUSED_11] = &bad_filtops,
331 [~EVFILT_VM] = &bad_filtops,
332 [~EVFILT_SOCK] = &file_filtops,
333 #if CONFIG_MEMORYSTATUS
334 [~EVFILT_MEMORYSTATUS] = &memorystatus_filtops,
335 #else
336 [~EVFILT_MEMORYSTATUS] = &bad_filtops,
337 #endif
338 [~EVFILT_EXCEPT] = &file_filtops,
339 #if SKYWALK
340 [~EVFILT_NW_CHANNEL] = &file_filtops,
341 #else /* !SKYWALK */
342 [~EVFILT_NW_CHANNEL] = &bad_filtops,
343 #endif /* !SKYWALK */
344 [~EVFILT_WORKLOOP] = &workloop_filtops,
345
346 /* Private filters */
347 [EVFILTID_KQREAD] = &kqread_filtops,
348 [EVFILTID_PIPE_N] = &pipe_nfiltops,
349 [EVFILTID_PIPE_R] = &pipe_rfiltops,
350 [EVFILTID_PIPE_W] = &pipe_wfiltops,
351 [EVFILTID_PTSD] = &ptsd_kqops,
352 [EVFILTID_SOREAD] = &soread_filtops,
353 [EVFILTID_SOWRITE] = &sowrite_filtops,
354 [EVFILTID_SCK] = &sock_filtops,
355 [EVFILTID_SOEXCEPT] = &soexcept_filtops,
356 [EVFILTID_SPEC] = &spec_filtops,
357 [EVFILTID_BPFREAD] = &bpfread_filtops,
358 [EVFILTID_NECP_FD] = &necp_fd_rfiltops,
359 #if SKYWALK
360 [EVFILTID_SKYWALK_CHANNEL_W] = &skywalk_channel_wfiltops,
361 [EVFILTID_SKYWALK_CHANNEL_R] = &skywalk_channel_rfiltops,
362 [EVFILTID_SKYWALK_CHANNEL_E] = &skywalk_channel_efiltops,
363 #else /* !SKYWALK */
364 [EVFILTID_SKYWALK_CHANNEL_W] = &bad_filtops,
365 [EVFILTID_SKYWALK_CHANNEL_R] = &bad_filtops,
366 [EVFILTID_SKYWALK_CHANNEL_E] = &bad_filtops,
367 #endif /* !SKYWALK */
368 [EVFILTID_FSEVENT] = &fsevent_filtops,
369 [EVFILTID_VN] = &vnode_filtops,
370 [EVFILTID_TTY] = &tty_filtops,
371 [EVFILTID_PTMX] = &ptmx_kqops,
372
373 /* fake filter for detached knotes, keep last */
374 [EVFILTID_DETACHED] = &bad_filtops,
375 };
376
377 static inline bool
kqr_thread_bound(workq_threadreq_t kqr)378 kqr_thread_bound(workq_threadreq_t kqr)
379 {
380 return kqr->tr_state == WORKQ_TR_STATE_BOUND;
381 }
382
383 static inline bool
kqr_thread_requested_pending(workq_threadreq_t kqr)384 kqr_thread_requested_pending(workq_threadreq_t kqr)
385 {
386 workq_tr_state_t tr_state = kqr->tr_state;
387 return tr_state > WORKQ_TR_STATE_IDLE && tr_state < WORKQ_TR_STATE_BOUND;
388 }
389
390 static inline bool
kqr_thread_requested(workq_threadreq_t kqr)391 kqr_thread_requested(workq_threadreq_t kqr)
392 {
393 return kqr->tr_state != WORKQ_TR_STATE_IDLE;
394 }
395
396 static inline thread_t
kqr_thread_fast(workq_threadreq_t kqr)397 kqr_thread_fast(workq_threadreq_t kqr)
398 {
399 assert(kqr_thread_bound(kqr));
400 return kqr->tr_thread;
401 }
402
403 static inline thread_t
kqr_thread(workq_threadreq_t kqr)404 kqr_thread(workq_threadreq_t kqr)
405 {
406 return kqr_thread_bound(kqr) ? kqr->tr_thread : THREAD_NULL;
407 }
408
409 static inline struct kqworkloop *
kqr_kqworkloop(workq_threadreq_t kqr)410 kqr_kqworkloop(workq_threadreq_t kqr)
411 {
412 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
413 return __container_of(kqr, struct kqworkloop, kqwl_request);
414 }
415 return NULL;
416 }
417
418 static inline kqueue_t
kqr_kqueue(proc_t p,workq_threadreq_t kqr)419 kqr_kqueue(proc_t p, workq_threadreq_t kqr)
420 {
421 kqueue_t kqu;
422 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
423 kqu.kqwl = kqr_kqworkloop(kqr);
424 } else {
425 kqu.kqwq = p->p_fd.fd_wqkqueue;
426 assert(kqr >= kqu.kqwq->kqwq_request &&
427 kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS);
428 }
429 return kqu;
430 }
431
432 #if CONFIG_PREADOPT_TG
433 /* There are no guarantees about which locks are held when this is called */
434 inline thread_group_qos_t
kqr_preadopt_thread_group(workq_threadreq_t req)435 kqr_preadopt_thread_group(workq_threadreq_t req)
436 {
437 struct kqworkloop *kqwl = kqr_kqworkloop(req);
438 return kqwl ? os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed) : NULL;
439 }
440
441 /* There are no guarantees about which locks are held when this is called */
_Atomic(thread_group_qos_t)442 inline _Atomic(thread_group_qos_t) *
443 kqr_preadopt_thread_group_addr(workq_threadreq_t req)
444 {
445 struct kqworkloop *kqwl = kqr_kqworkloop(req);
446 return kqwl ? (&kqwl->kqwl_preadopt_tg) : NULL;
447 }
448 #endif
449
450 /*
451 * kqueue/note lock implementations
452 *
453 * The kqueue lock guards the kq state, the state of its queues,
454 * and the kqueue-aware status and locks of individual knotes.
455 *
456 * The kqueue workq lock is used to protect state guarding the
457 * interaction of the kqueue with the workq. This state cannot
458 * be guarded by the kq lock - as it needs to be taken when we
459 * already have the waitq set lock held (during the waitq hook
460 * callback). It might be better to use the waitq lock itself
461 * for this, but the IRQ requirements make that difficult).
462 *
463 * Knote flags, filter flags, and associated data are protected
464 * by the underlying object lock - and are only ever looked at
465 * by calling the filter to get a [consistent] snapshot of that
466 * data.
467 */
468
469 static inline void
kqlock(kqueue_t kqu)470 kqlock(kqueue_t kqu)
471 {
472 lck_spin_lock(&kqu.kq->kq_lock);
473 }
474
475 static inline void
kqlock_held(__assert_only kqueue_t kqu)476 kqlock_held(__assert_only kqueue_t kqu)
477 {
478 LCK_SPIN_ASSERT(&kqu.kq->kq_lock, LCK_ASSERT_OWNED);
479 }
480
481 static inline void
kqunlock(kqueue_t kqu)482 kqunlock(kqueue_t kqu)
483 {
484 lck_spin_unlock(&kqu.kq->kq_lock);
485 }
486
487 static inline void
knhash_lock(struct filedesc * fdp)488 knhash_lock(struct filedesc *fdp)
489 {
490 lck_mtx_lock(&fdp->fd_knhashlock);
491 }
492
493 static inline void
knhash_unlock(struct filedesc * fdp)494 knhash_unlock(struct filedesc *fdp)
495 {
496 lck_mtx_unlock(&fdp->fd_knhashlock);
497 }
498
499 /* wait event for knote locks */
500 static inline event_t
knote_lock_wev(struct knote * kn)501 knote_lock_wev(struct knote *kn)
502 {
503 return (event_t)(&kn->kn_hook);
504 }
505
506 /* wait event for kevent_register_wait_* */
507 static inline event64_t
knote_filt_wev64(struct knote * kn)508 knote_filt_wev64(struct knote *kn)
509 {
510 /* kdp_workloop_sync_wait_find_owner knows about this */
511 return CAST_EVENT64_T(kn);
512 }
513
514 /* wait event for knote_post/knote_drop */
515 static inline event_t
knote_post_wev(struct knote * kn)516 knote_post_wev(struct knote *kn)
517 {
518 return &kn->kn_kevent;
519 }
520
521 /*!
522 * @function knote_has_qos
523 *
524 * @brief
525 * Whether the knote has a regular QoS.
526 *
527 * @discussion
528 * kn_qos_override is:
529 * - 0 on kqfiles
530 * - THREAD_QOS_LAST for special buckets (manager)
531 *
532 * Other values mean the knote participates to QoS propagation.
533 */
534 static inline bool
knote_has_qos(struct knote * kn)535 knote_has_qos(struct knote *kn)
536 {
537 return kn->kn_qos_override > 0 && kn->kn_qos_override < THREAD_QOS_LAST;
538 }
539
540 #pragma mark knote locks
541
542 /*
543 * Enum used by the knote_lock_* functions.
544 *
545 * KNOTE_KQ_LOCK_ALWAYS
546 * The function will always return with the kq lock held.
547 *
548 * KNOTE_KQ_LOCK_ON_SUCCESS
549 * The function will return with the kq lock held if it was successful
550 * (knote_lock() is the only function that can fail).
551 *
552 * KNOTE_KQ_LOCK_ON_FAILURE
553 * The function will return with the kq lock held if it was unsuccessful
554 * (knote_lock() is the only function that can fail).
555 *
556 * KNOTE_KQ_UNLOCK:
557 * The function returns with the kq unlocked.
558 */
559 enum kqlocking {
560 KNOTE_KQ_LOCK_ALWAYS,
561 KNOTE_KQ_LOCK_ON_SUCCESS,
562 KNOTE_KQ_LOCK_ON_FAILURE,
563 KNOTE_KQ_UNLOCK,
564 };
565
566 static struct knote_lock_ctx *
knote_lock_ctx_find(kqueue_t kqu,struct knote * kn)567 knote_lock_ctx_find(kqueue_t kqu, struct knote *kn)
568 {
569 struct knote_lock_ctx *ctx;
570 LIST_FOREACH(ctx, &kqu.kq->kq_knlocks, knlc_link) {
571 if (ctx->knlc_knote == kn) {
572 return ctx;
573 }
574 }
575 panic("knote lock context not found: %p", kn);
576 __builtin_trap();
577 }
578
579 /* slowpath of knote_lock() */
580 __attribute__((noinline))
581 static bool __result_use_check
knote_lock_slow(kqueue_t kqu,struct knote * kn,struct knote_lock_ctx * knlc,int kqlocking)582 knote_lock_slow(kqueue_t kqu, struct knote *kn,
583 struct knote_lock_ctx *knlc, int kqlocking)
584 {
585 struct knote_lock_ctx *owner_lc;
586 struct uthread *uth = current_uthread();
587 wait_result_t wr;
588
589 kqlock_held(kqu);
590
591 owner_lc = knote_lock_ctx_find(kqu, kn);
592 #if DEBUG || DEVELOPMENT
593 knlc->knlc_state = KNOTE_LOCK_CTX_WAITING;
594 #endif
595 owner_lc->knlc_waiters++;
596
597 /*
598 * Make our lock context visible to knote_unlock()
599 */
600 uth->uu_knlock = knlc;
601
602 wr = lck_spin_sleep_with_inheritor(&kqu.kq->kq_lock, LCK_SLEEP_UNLOCK,
603 knote_lock_wev(kn), owner_lc->knlc_thread,
604 THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER);
605
606 if (wr == THREAD_RESTART) {
607 /*
608 * We haven't been woken up by knote_unlock() but knote_unlock_cancel.
609 * We need to cleanup the state since no one did.
610 */
611 uth->uu_knlock = NULL;
612 #if DEBUG || DEVELOPMENT
613 assert(knlc->knlc_state == KNOTE_LOCK_CTX_WAITING);
614 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
615 #endif
616
617 if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
618 kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
619 kqlock(kqu);
620 }
621 return false;
622 } else {
623 if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
624 kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) {
625 kqlock(kqu);
626 #if DEBUG || DEVELOPMENT
627 /*
628 * This state is set under the lock so we can't
629 * really assert this unless we hold the lock.
630 */
631 assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
632 #endif
633 }
634 return true;
635 }
636 }
637
638 /*
639 * Attempts to take the "knote" lock.
640 *
641 * Called with the kqueue lock held.
642 *
643 * Returns true if the knote lock is acquired, false if it has been dropped
644 */
645 static bool __result_use_check
knote_lock(kqueue_t kqu,struct knote * kn,struct knote_lock_ctx * knlc,enum kqlocking kqlocking)646 knote_lock(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc,
647 enum kqlocking kqlocking)
648 {
649 kqlock_held(kqu);
650
651 #if DEBUG || DEVELOPMENT
652 assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
653 #endif
654 knlc->knlc_knote = kn;
655 knlc->knlc_thread = current_thread();
656 knlc->knlc_waiters = 0;
657
658 if (__improbable(kn->kn_status & KN_LOCKED)) {
659 return knote_lock_slow(kqu, kn, knlc, kqlocking);
660 }
661
662 /*
663 * When the knote will be dropped, the knote lock is taken before
664 * KN_DROPPING is set, and then the knote will be removed from any
665 * hash table that references it before the lock is canceled.
666 */
667 assert((kn->kn_status & KN_DROPPING) == 0);
668 LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, knlc, knlc_link);
669 kn->kn_status |= KN_LOCKED;
670 #if DEBUG || DEVELOPMENT
671 knlc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
672 #endif
673
674 if (kqlocking == KNOTE_KQ_UNLOCK ||
675 kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
676 kqunlock(kqu);
677 }
678 return true;
679 }
680
681 /*
682 * Unlocks a knote successfully locked with knote_lock().
683 *
684 * Called with the kqueue lock held.
685 *
686 * Returns with the kqueue lock held according to KNOTE_KQ_* mode.
687 */
688 static void
knote_unlock(kqueue_t kqu,struct knote * kn,struct knote_lock_ctx * knlc,enum kqlocking kqlocking)689 knote_unlock(kqueue_t kqu, struct knote *kn,
690 struct knote_lock_ctx *knlc, enum kqlocking kqlocking)
691 {
692 kqlock_held(kqu);
693
694 assert(knlc->knlc_knote == kn);
695 assert(kn->kn_status & KN_LOCKED);
696 #if DEBUG || DEVELOPMENT
697 assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
698 #endif
699
700 LIST_REMOVE(knlc, knlc_link);
701
702 if (knlc->knlc_waiters) {
703 thread_t thread = THREAD_NULL;
704
705 wakeup_one_with_inheritor(knote_lock_wev(kn), THREAD_AWAKENED,
706 LCK_WAKE_DEFAULT, &thread);
707
708 /*
709 * knote_lock_slow() publishes the lock context of waiters
710 * in uthread::uu_knlock.
711 *
712 * Reach out and make this context the new owner.
713 */
714 struct uthread *ut = get_bsdthread_info(thread);
715 struct knote_lock_ctx *next_owner_lc = ut->uu_knlock;
716
717 assert(next_owner_lc->knlc_knote == kn);
718 next_owner_lc->knlc_waiters = knlc->knlc_waiters - 1;
719 LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, next_owner_lc, knlc_link);
720 #if DEBUG || DEVELOPMENT
721 next_owner_lc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
722 #endif
723 ut->uu_knlock = NULL;
724 thread_deallocate_safe(thread);
725 } else {
726 kn->kn_status &= ~KN_LOCKED;
727 }
728
729 if ((kn->kn_status & KN_MERGE_QOS) && !(kn->kn_status & KN_POSTING)) {
730 /*
731 * No f_event() in flight anymore, we can leave QoS "Merge" mode
732 *
733 * See knote_adjust_qos()
734 */
735 kn->kn_status &= ~KN_MERGE_QOS;
736 }
737 if (kqlocking == KNOTE_KQ_UNLOCK) {
738 kqunlock(kqu);
739 }
740 #if DEBUG || DEVELOPMENT
741 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
742 #endif
743 }
744
745 /*
746 * Aborts all waiters for a knote lock, and unlock the knote.
747 *
748 * Called with the kqueue lock held.
749 *
750 * Returns with the kqueue unlocked.
751 */
752 static void
knote_unlock_cancel(struct kqueue * kq,struct knote * kn,struct knote_lock_ctx * knlc)753 knote_unlock_cancel(struct kqueue *kq, struct knote *kn,
754 struct knote_lock_ctx *knlc)
755 {
756 kqlock_held(kq);
757
758 assert(knlc->knlc_knote == kn);
759 assert(kn->kn_status & KN_LOCKED);
760 assert(kn->kn_status & KN_DROPPING);
761
762 LIST_REMOVE(knlc, knlc_link);
763 kn->kn_status &= ~KN_LOCKED;
764 kqunlock(kq);
765
766 if (knlc->knlc_waiters) {
767 wakeup_all_with_inheritor(knote_lock_wev(kn), THREAD_RESTART);
768 }
769 #if DEBUG || DEVELOPMENT
770 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
771 #endif
772 }
773
774 /*
775 * Call the f_event hook of a given filter.
776 *
777 * Takes a use count to protect against concurrent drops.
778 * Called with the object lock held.
779 */
780 static void
knote_post(struct knote * kn,long hint)781 knote_post(struct knote *kn, long hint)
782 {
783 struct kqueue *kq = knote_get_kq(kn);
784 int dropping, result;
785
786 kqlock(kq);
787
788 if (__improbable(kn->kn_status & (KN_DROPPING | KN_VANISHED))) {
789 return kqunlock(kq);
790 }
791
792 if (__improbable(kn->kn_status & KN_POSTING)) {
793 panic("KNOTE() called concurrently on knote %p", kn);
794 }
795
796 kn->kn_status |= KN_POSTING;
797
798 kqunlock(kq);
799 result = filter_call(knote_fops(kn), f_event(kn, hint));
800 kqlock(kq);
801
802 /* Someone dropped the knote/the monitored object vanished while we
803 * were in f_event, swallow the side effects of the post.
804 */
805 dropping = (kn->kn_status & (KN_DROPPING | KN_VANISHED));
806
807 if (!dropping && (result & FILTER_ADJUST_EVENT_IOTIER_BIT)) {
808 kqueue_update_iotier_override(kq);
809 }
810
811 if (!dropping && (result & FILTER_ACTIVE)) {
812 knote_activate(kq, kn, result);
813 }
814
815 if ((kn->kn_status & KN_LOCKED) == 0) {
816 /*
817 * There's no other f_* call in flight, we can leave QoS "Merge" mode.
818 *
819 * See knote_adjust_qos()
820 */
821 kn->kn_status &= ~(KN_POSTING | KN_MERGE_QOS);
822 } else {
823 kn->kn_status &= ~KN_POSTING;
824 }
825
826 if (__improbable(dropping)) {
827 thread_wakeup(knote_post_wev(kn));
828 }
829
830 kqunlock(kq);
831 }
832
833 /*
834 * Called by knote_drop() and knote_fdclose() to wait for the last f_event()
835 * caller to be done.
836 *
837 * - kq locked at entry
838 * - kq unlocked at exit
839 */
840 static void
knote_wait_for_post(struct kqueue * kq,struct knote * kn)841 knote_wait_for_post(struct kqueue *kq, struct knote *kn)
842 {
843 kqlock_held(kq);
844
845 assert(kn->kn_status & (KN_DROPPING | KN_VANISHED));
846
847 if (kn->kn_status & KN_POSTING) {
848 lck_spin_sleep(&kq->kq_lock, LCK_SLEEP_UNLOCK, knote_post_wev(kn),
849 THREAD_UNINT | THREAD_WAIT_NOREPORT);
850 } else {
851 kqunlock(kq);
852 }
853 }
854
855 #pragma mark knote helpers for filters
856
857 OS_ALWAYS_INLINE
858 void *
knote_kn_hook_get_raw(struct knote * kn)859 knote_kn_hook_get_raw(struct knote *kn)
860 {
861 uintptr_t *addr = &kn->kn_hook;
862
863 void *hook = (void *) *addr;
864 #if __has_feature(ptrauth_calls)
865 if (hook) {
866 uint16_t blend = kn->kn_filter;
867 blend |= (kn->kn_filtid << 8);
868 blend ^= OS_PTRAUTH_DISCRIMINATOR("kn.kn_hook");
869
870 hook = ptrauth_auth_data(hook, ptrauth_key_process_independent_data,
871 ptrauth_blend_discriminator(addr, blend));
872 }
873 #endif
874
875 return hook;
876 }
877
878 OS_ALWAYS_INLINE void
knote_kn_hook_set_raw(struct knote * kn,void * kn_hook)879 knote_kn_hook_set_raw(struct knote *kn, void *kn_hook)
880 {
881 uintptr_t *addr = &kn->kn_hook;
882 #if __has_feature(ptrauth_calls)
883 if (kn_hook) {
884 uint16_t blend = kn->kn_filter;
885 blend |= (kn->kn_filtid << 8);
886 blend ^= OS_PTRAUTH_DISCRIMINATOR("kn.kn_hook");
887
888 kn_hook = ptrauth_sign_unauthenticated(kn_hook,
889 ptrauth_key_process_independent_data,
890 ptrauth_blend_discriminator(addr, blend));
891 }
892 #endif
893 *addr = (uintptr_t) kn_hook;
894 }
895
896 OS_ALWAYS_INLINE
897 void
knote_set_error(struct knote * kn,int error)898 knote_set_error(struct knote *kn, int error)
899 {
900 kn->kn_flags |= EV_ERROR;
901 kn->kn_sdata = error;
902 }
903
904 OS_ALWAYS_INLINE
905 int64_t
knote_low_watermark(const struct knote * kn)906 knote_low_watermark(const struct knote *kn)
907 {
908 return (kn->kn_sfflags & NOTE_LOWAT) ? kn->kn_sdata : 1;
909 }
910
911 /*!
912 * @function knote_fill_kevent_with_sdata
913 *
914 * @brief
915 * Fills in a kevent from the current content of a knote.
916 *
917 * @discussion
918 * This is meant to be called from filter's f_event hooks.
919 * The kevent data is filled with kn->kn_sdata.
920 *
921 * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
922 *
923 * Using knote_fill_kevent is typically preferred.
924 */
925 OS_ALWAYS_INLINE
926 void
knote_fill_kevent_with_sdata(struct knote * kn,struct kevent_qos_s * kev)927 knote_fill_kevent_with_sdata(struct knote *kn, struct kevent_qos_s *kev)
928 {
929 #define knote_assert_aliases(name1, offs1, name2) \
930 static_assert(offsetof(struct kevent_qos_s, name1) + offs1 == \
931 offsetof(struct kevent_internal_s, name2), \
932 "kevent_qos_s::" #name1 " and kevent_internal_s::" #name2 "need to alias")
933 /*
934 * All the code makes assumptions on these aliasing,
935 * so make sure we fail the build if we ever ever ever break them.
936 */
937 knote_assert_aliases(ident, 0, kei_ident);
938 #ifdef __LITTLE_ENDIAN__
939 knote_assert_aliases(filter, 0, kei_filter); // non trivial overlap
940 knote_assert_aliases(filter, 1, kei_filtid); // non trivial overlap
941 #else
942 knote_assert_aliases(filter, 0, kei_filtid); // non trivial overlap
943 knote_assert_aliases(filter, 1, kei_filter); // non trivial overlap
944 #endif
945 knote_assert_aliases(flags, 0, kei_flags);
946 knote_assert_aliases(qos, 0, kei_qos);
947 knote_assert_aliases(udata, 0, kei_udata);
948 knote_assert_aliases(fflags, 0, kei_fflags);
949 knote_assert_aliases(xflags, 0, kei_sfflags); // non trivial overlap
950 knote_assert_aliases(data, 0, kei_sdata); // non trivial overlap
951 knote_assert_aliases(ext, 0, kei_ext);
952 #undef knote_assert_aliases
953
954 /*
955 * Fix the differences between kevent_qos_s and kevent_internal_s:
956 * - xflags is where kn_sfflags lives, we need to zero it
957 * - fixup the high bits of `filter` where kn_filtid lives
958 */
959 *kev = *(struct kevent_qos_s *)&kn->kn_kevent;
960 kev->xflags = 0;
961 kev->filter |= 0xff00;
962 if (kn->kn_flags & EV_CLEAR) {
963 kn->kn_fflags = 0;
964 }
965 }
966
967 /*!
968 * @function knote_fill_kevent
969 *
970 * @brief
971 * Fills in a kevent from the current content of a knote.
972 *
973 * @discussion
974 * This is meant to be called from filter's f_event hooks.
975 * The kevent data is filled with the passed in data.
976 *
977 * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
978 */
979 OS_ALWAYS_INLINE
980 void
knote_fill_kevent(struct knote * kn,struct kevent_qos_s * kev,int64_t data)981 knote_fill_kevent(struct knote *kn, struct kevent_qos_s *kev, int64_t data)
982 {
983 knote_fill_kevent_with_sdata(kn, kev);
984 kev->filter = kn->kn_filter;
985 kev->data = data;
986 }
987
988
989 #pragma mark file_filtops
990
991 static int
filt_fileattach(struct knote * kn,struct kevent_qos_s * kev)992 filt_fileattach(struct knote *kn, struct kevent_qos_s *kev)
993 {
994 return fo_kqfilter(kn->kn_fp, kn, kev);
995 }
996
997 SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = {
998 .f_isfd = 1,
999 .f_attach = filt_fileattach,
1000 };
1001
1002 #pragma mark kqread_filtops
1003
1004 #define f_flag fp_glob->fg_flag
1005 #define f_ops fp_glob->fg_ops
1006 #define f_lflags fp_glob->fg_lflags
1007
1008 static void
filt_kqdetach(struct knote * kn)1009 filt_kqdetach(struct knote *kn)
1010 {
1011 struct kqfile *kqf = (struct kqfile *)fp_get_data(kn->kn_fp);
1012 struct kqueue *kq = &kqf->kqf_kqueue;
1013
1014 kqlock(kq);
1015 KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
1016 kqunlock(kq);
1017 }
1018
1019 static int
filt_kqueue(struct knote * kn,__unused long hint)1020 filt_kqueue(struct knote *kn, __unused long hint)
1021 {
1022 struct kqueue *kq = (struct kqueue *)fp_get_data(kn->kn_fp);
1023
1024 return kq->kq_count > 0;
1025 }
1026
1027 static int
filt_kqtouch(struct knote * kn,struct kevent_qos_s * kev)1028 filt_kqtouch(struct knote *kn, struct kevent_qos_s *kev)
1029 {
1030 #pragma unused(kev)
1031 struct kqueue *kq = (struct kqueue *)fp_get_data(kn->kn_fp);
1032 int res;
1033
1034 kqlock(kq);
1035 res = (kq->kq_count > 0);
1036 kqunlock(kq);
1037
1038 return res;
1039 }
1040
1041 static int
filt_kqprocess(struct knote * kn,struct kevent_qos_s * kev)1042 filt_kqprocess(struct knote *kn, struct kevent_qos_s *kev)
1043 {
1044 struct kqueue *kq = (struct kqueue *)fp_get_data(kn->kn_fp);
1045 int res = 0;
1046
1047 kqlock(kq);
1048 if (kq->kq_count) {
1049 knote_fill_kevent(kn, kev, kq->kq_count);
1050 res = 1;
1051 }
1052 kqunlock(kq);
1053
1054 return res;
1055 }
1056
1057 SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = {
1058 .f_isfd = 1,
1059 .f_detach = filt_kqdetach,
1060 .f_event = filt_kqueue,
1061 .f_touch = filt_kqtouch,
1062 .f_process = filt_kqprocess,
1063 };
1064
1065 #pragma mark proc_filtops
1066
1067 static int
filt_procattach(struct knote * kn,__unused struct kevent_qos_s * kev)1068 filt_procattach(struct knote *kn, __unused struct kevent_qos_s *kev)
1069 {
1070 struct proc *p;
1071
1072 assert(PID_MAX < NOTE_PDATAMASK);
1073
1074 if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
1075 knote_set_error(kn, ENOTSUP);
1076 return 0;
1077 }
1078
1079 p = proc_find((int)kn->kn_id);
1080 if (p == NULL) {
1081 knote_set_error(kn, ESRCH);
1082 return 0;
1083 }
1084
1085 const uint32_t NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
1086
1087 if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits) {
1088 do {
1089 pid_t selfpid = proc_selfpid();
1090
1091 if (p->p_ppid == selfpid) {
1092 break; /* parent => ok */
1093 }
1094 if ((p->p_lflag & P_LTRACED) != 0 &&
1095 (p->p_oppid == selfpid)) {
1096 break; /* parent-in-waiting => ok */
1097 }
1098 if (cansignal(current_proc(), kauth_cred_get(), p, SIGKILL)) {
1099 break; /* allowed to signal => ok */
1100 }
1101 proc_rele(p);
1102 knote_set_error(kn, EACCES);
1103 return 0;
1104 } while (0);
1105 }
1106
1107 kn->kn_proc = p;
1108 kn->kn_flags |= EV_CLEAR; /* automatically set */
1109 kn->kn_sdata = 0; /* incoming data is ignored */
1110
1111 proc_klist_lock();
1112
1113 KNOTE_ATTACH(&p->p_klist, kn);
1114
1115 proc_klist_unlock();
1116
1117 proc_rele(p);
1118
1119 /*
1120 * only captures edge-triggered events after this point
1121 * so it can't already be fired.
1122 */
1123 return 0;
1124 }
1125
1126
1127 /*
1128 * The knote may be attached to a different process, which may exit,
1129 * leaving nothing for the knote to be attached to. In that case,
1130 * the pointer to the process will have already been nulled out.
1131 */
1132 static void
filt_procdetach(struct knote * kn)1133 filt_procdetach(struct knote *kn)
1134 {
1135 struct proc *p;
1136
1137 proc_klist_lock();
1138
1139 p = kn->kn_proc;
1140 if (p != PROC_NULL) {
1141 kn->kn_proc = PROC_NULL;
1142 KNOTE_DETACH(&p->p_klist, kn);
1143 }
1144
1145 proc_klist_unlock();
1146 }
1147
1148 static int
filt_procevent(struct knote * kn,long hint)1149 filt_procevent(struct knote *kn, long hint)
1150 {
1151 u_int event;
1152
1153 /* ALWAYS CALLED WITH proc_klist_lock */
1154
1155 /*
1156 * Note: a lot of bits in hint may be obtained from the knote
1157 * To free some of those bits, see <rdar://problem/12592988> Freeing up
1158 * bits in hint for filt_procevent
1159 *
1160 * mask off extra data
1161 */
1162 event = (u_int)hint & NOTE_PCTRLMASK;
1163
1164 /*
1165 * termination lifecycle events can happen while a debugger
1166 * has reparented a process, in which case notifications
1167 * should be quashed except to the tracing parent. When
1168 * the debugger reaps the child (either via wait4(2) or
1169 * process exit), the child will be reparented to the original
1170 * parent and these knotes re-fired.
1171 */
1172 if (event & NOTE_EXIT) {
1173 if ((kn->kn_proc->p_oppid != 0)
1174 && (proc_getpid(knote_get_kq(kn)->kq_p) != kn->kn_proc->p_ppid)) {
1175 /*
1176 * This knote is not for the current ptrace(2) parent, ignore.
1177 */
1178 return 0;
1179 }
1180 }
1181
1182 /*
1183 * if the user is interested in this event, record it.
1184 */
1185 if (kn->kn_sfflags & event) {
1186 kn->kn_fflags |= event;
1187 }
1188
1189 #pragma clang diagnostic push
1190 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1191 if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
1192 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1193 }
1194 #pragma clang diagnostic pop
1195
1196
1197 /*
1198 * The kernel has a wrapper in place that returns the same data
1199 * as is collected here, in kn_hook32. Any changes to how
1200 * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
1201 * should also be reflected in the proc_pidnoteexit() wrapper.
1202 */
1203 if (event == NOTE_EXIT) {
1204 kn->kn_hook32 = 0;
1205 if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
1206 kn->kn_fflags |= NOTE_EXITSTATUS;
1207 kn->kn_hook32 |= (hint & NOTE_PDATAMASK);
1208 }
1209 if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
1210 kn->kn_fflags |= NOTE_EXIT_DETAIL;
1211 if ((kn->kn_proc->p_lflag &
1212 P_LTERM_DECRYPTFAIL) != 0) {
1213 kn->kn_hook32 |= NOTE_EXIT_DECRYPTFAIL;
1214 }
1215 if ((kn->kn_proc->p_lflag &
1216 P_LTERM_JETSAM) != 0) {
1217 kn->kn_hook32 |= NOTE_EXIT_MEMORY;
1218 switch (kn->kn_proc->p_lflag & P_JETSAM_MASK) {
1219 case P_JETSAM_VMPAGESHORTAGE:
1220 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
1221 break;
1222 case P_JETSAM_VMTHRASHING:
1223 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMTHRASHING;
1224 break;
1225 case P_JETSAM_FCTHRASHING:
1226 kn->kn_hook32 |= NOTE_EXIT_MEMORY_FCTHRASHING;
1227 break;
1228 case P_JETSAM_VNODE:
1229 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VNODE;
1230 break;
1231 case P_JETSAM_HIWAT:
1232 kn->kn_hook32 |= NOTE_EXIT_MEMORY_HIWAT;
1233 break;
1234 case P_JETSAM_PID:
1235 kn->kn_hook32 |= NOTE_EXIT_MEMORY_PID;
1236 break;
1237 case P_JETSAM_IDLEEXIT:
1238 kn->kn_hook32 |= NOTE_EXIT_MEMORY_IDLE;
1239 break;
1240 }
1241 }
1242 if ((proc_getcsflags(kn->kn_proc) &
1243 CS_KILLED) != 0) {
1244 kn->kn_hook32 |= NOTE_EXIT_CSERROR;
1245 }
1246 }
1247 }
1248
1249 /* if we have any matching state, activate the knote */
1250 return kn->kn_fflags != 0;
1251 }
1252
1253 static int
filt_proctouch(struct knote * kn,struct kevent_qos_s * kev)1254 filt_proctouch(struct knote *kn, struct kevent_qos_s *kev)
1255 {
1256 int res;
1257
1258 proc_klist_lock();
1259
1260 /* accept new filter flags and mask off output events no long interesting */
1261 kn->kn_sfflags = kev->fflags;
1262
1263 /* restrict the current results to the (smaller?) set of new interest */
1264 /*
1265 * For compatibility with previous implementations, we leave kn_fflags
1266 * as they were before.
1267 */
1268 //kn->kn_fflags &= kn->kn_sfflags;
1269
1270 res = (kn->kn_fflags != 0);
1271
1272 proc_klist_unlock();
1273
1274 return res;
1275 }
1276
1277 static int
filt_procprocess(struct knote * kn,struct kevent_qos_s * kev)1278 filt_procprocess(struct knote *kn, struct kevent_qos_s *kev)
1279 {
1280 int res = 0;
1281
1282 proc_klist_lock();
1283 if (kn->kn_fflags) {
1284 knote_fill_kevent(kn, kev, kn->kn_hook32);
1285 kn->kn_hook32 = 0;
1286 res = 1;
1287 }
1288 proc_klist_unlock();
1289 return res;
1290 }
1291
1292 SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = {
1293 .f_attach = filt_procattach,
1294 .f_detach = filt_procdetach,
1295 .f_event = filt_procevent,
1296 .f_touch = filt_proctouch,
1297 .f_process = filt_procprocess,
1298 };
1299
1300 #pragma mark timer_filtops
1301
1302 struct filt_timer_params {
1303 uint64_t deadline; /* deadline in abs/cont time
1304 * (or 0 if NOTE_ABSOLUTE and deadline is in past) */
1305 uint64_t leeway; /* leeway in abstime, or 0 if none */
1306 uint64_t interval; /* interval in abstime or 0 if non-repeating timer */
1307 };
1308
1309 /*
1310 * Values stored in the knote at rest (using Mach absolute time units)
1311 *
1312 * kn->kn_thcall where the thread_call object is stored
1313 * kn->kn_ext[0] next deadline or 0 if immediate expiration
1314 * kn->kn_ext[1] leeway value
1315 * kn->kn_sdata interval timer: the interval
1316 * absolute/deadline timer: 0
1317 * kn->kn_hook32 timer state (with gencount)
1318 *
1319 * TIMER_IDLE:
1320 * The timer has either never been scheduled or been cancelled.
1321 * It is safe to schedule a new one in this state.
1322 *
1323 * TIMER_ARMED:
1324 * The timer has been scheduled
1325 *
1326 * TIMER_FIRED
1327 * The timer has fired and an event needs to be delivered.
1328 * When in this state, the callout may still be running.
1329 *
1330 * TIMER_IMMEDIATE
1331 * The timer has fired at registration time, and the callout was never
1332 * dispatched.
1333 */
1334 #define TIMER_IDLE 0x0
1335 #define TIMER_ARMED 0x1
1336 #define TIMER_FIRED 0x2
1337 #define TIMER_IMMEDIATE 0x3
1338 #define TIMER_STATE_MASK 0x3
1339 #define TIMER_GEN_INC 0x4
1340
1341 static void
filt_timer_set_params(struct knote * kn,struct filt_timer_params * params)1342 filt_timer_set_params(struct knote *kn, struct filt_timer_params *params)
1343 {
1344 kn->kn_ext[0] = params->deadline;
1345 kn->kn_ext[1] = params->leeway;
1346 kn->kn_sdata = params->interval;
1347 }
1348
1349 /*
1350 * filt_timervalidate - process data from user
1351 *
1352 * Sets up the deadline, interval, and leeway from the provided user data
1353 *
1354 * Input:
1355 * kn_sdata timer deadline or interval time
1356 * kn_sfflags style of timer, unit of measurement
1357 *
1358 * Output:
1359 * struct filter_timer_params to apply to the filter with
1360 * filt_timer_set_params when changes are ready to be commited.
1361 *
1362 * Returns:
1363 * EINVAL Invalid user data parameters
1364 * ERANGE Various overflows with the parameters
1365 *
1366 * Called with timer filter lock held.
1367 */
1368 static int
filt_timervalidate(const struct kevent_qos_s * kev,struct filt_timer_params * params)1369 filt_timervalidate(const struct kevent_qos_s *kev,
1370 struct filt_timer_params *params)
1371 {
1372 /*
1373 * There are 5 knobs that need to be chosen for a timer registration:
1374 *
1375 * A) Units of time (what is the time duration of the specified number)
1376 * Absolute and interval take:
1377 * NOTE_SECONDS, NOTE_USECONDS, NOTE_NSECONDS, NOTE_MACHTIME
1378 * Defaults to milliseconds if not specified
1379 *
1380 * B) Clock epoch (what is the zero point of the specified number)
1381 * For interval, there is none
1382 * For absolute, defaults to the gettimeofday/calendar epoch
1383 * With NOTE_MACHTIME, uses mach_absolute_time()
1384 * With NOTE_MACHTIME and NOTE_MACH_CONTINUOUS_TIME, uses mach_continuous_time()
1385 *
1386 * C) The knote's behavior on delivery
1387 * Interval timer causes the knote to arm for the next interval unless one-shot is set
1388 * Absolute is a forced one-shot timer which deletes on delivery
1389 * TODO: Add a way for absolute to be not forced one-shot
1390 *
1391 * D) Whether the time duration is relative to now or absolute
1392 * Interval fires at now + duration when it is set up
1393 * Absolute fires at now + difference between now walltime and passed in walltime
1394 * With NOTE_MACHTIME it fires at an absolute MAT or MCT.
1395 *
1396 * E) Whether the timer continues to tick across sleep
1397 * By default all three do not.
1398 * For interval and absolute, NOTE_MACH_CONTINUOUS_TIME causes them to tick across sleep
1399 * With NOTE_ABSOLUTE | NOTE_MACHTIME | NOTE_MACH_CONTINUOUS_TIME:
1400 * expires when mach_continuous_time() is > the passed in value.
1401 */
1402
1403 uint64_t multiplier;
1404
1405 boolean_t use_abstime = FALSE;
1406
1407 switch (kev->fflags & (NOTE_SECONDS | NOTE_USECONDS | NOTE_NSECONDS | NOTE_MACHTIME)) {
1408 case NOTE_SECONDS:
1409 multiplier = NSEC_PER_SEC;
1410 break;
1411 case NOTE_USECONDS:
1412 multiplier = NSEC_PER_USEC;
1413 break;
1414 case NOTE_NSECONDS:
1415 multiplier = 1;
1416 break;
1417 case NOTE_MACHTIME:
1418 multiplier = 0;
1419 use_abstime = TRUE;
1420 break;
1421 case 0: /* milliseconds (default) */
1422 multiplier = NSEC_PER_SEC / 1000;
1423 break;
1424 default:
1425 return EINVAL;
1426 }
1427
1428 /* transform the leeway in kn_ext[1] to same time scale */
1429 if (kev->fflags & NOTE_LEEWAY) {
1430 uint64_t leeway_abs;
1431
1432 if (use_abstime) {
1433 leeway_abs = (uint64_t)kev->ext[1];
1434 } else {
1435 uint64_t leeway_ns;
1436 if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns)) {
1437 return ERANGE;
1438 }
1439
1440 nanoseconds_to_absolutetime(leeway_ns, &leeway_abs);
1441 }
1442
1443 params->leeway = leeway_abs;
1444 } else {
1445 params->leeway = 0;
1446 }
1447
1448 if (kev->fflags & NOTE_ABSOLUTE) {
1449 uint64_t deadline_abs;
1450
1451 if (use_abstime) {
1452 deadline_abs = (uint64_t)kev->data;
1453 } else {
1454 uint64_t calendar_deadline_ns;
1455
1456 if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns)) {
1457 return ERANGE;
1458 }
1459
1460 /* calendar_deadline_ns is in nanoseconds since the epoch */
1461
1462 clock_sec_t seconds;
1463 clock_nsec_t nanoseconds;
1464
1465 /*
1466 * Note that the conversion through wall-time is only done once.
1467 *
1468 * If the relationship between MAT and gettimeofday changes,
1469 * the underlying timer does not update.
1470 *
1471 * TODO: build a wall-time denominated timer_call queue
1472 * and a flag to request DTRTing with wall-time timers
1473 */
1474 clock_get_calendar_nanotime(&seconds, &nanoseconds);
1475
1476 uint64_t calendar_now_ns = (uint64_t)seconds * NSEC_PER_SEC + nanoseconds;
1477
1478 /* if deadline is in the future */
1479 if (calendar_now_ns < calendar_deadline_ns) {
1480 uint64_t interval_ns = calendar_deadline_ns - calendar_now_ns;
1481 uint64_t interval_abs;
1482
1483 nanoseconds_to_absolutetime(interval_ns, &interval_abs);
1484
1485 /*
1486 * Note that the NOTE_MACH_CONTINUOUS_TIME flag here only
1487 * causes the timer to keep ticking across sleep, but
1488 * it does not change the calendar timebase.
1489 */
1490
1491 if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
1492 clock_continuoustime_interval_to_deadline(interval_abs,
1493 &deadline_abs);
1494 } else {
1495 clock_absolutetime_interval_to_deadline(interval_abs,
1496 &deadline_abs);
1497 }
1498 } else {
1499 deadline_abs = 0; /* cause immediate expiration */
1500 }
1501 }
1502
1503 params->deadline = deadline_abs;
1504 params->interval = 0; /* NOTE_ABSOLUTE is non-repeating */
1505 } else if (kev->data < 0) {
1506 /*
1507 * Negative interval timers fire immediately, once.
1508 *
1509 * Ideally a negative interval would be an error, but certain clients
1510 * pass negative values on accident, and expect an event back.
1511 *
1512 * In the old implementation the timer would repeat with no delay
1513 * N times until mach_absolute_time() + (N * interval) underflowed,
1514 * then it would wait ~forever by accidentally arming a timer for the far future.
1515 *
1516 * We now skip the power-wasting hot spin phase and go straight to the idle phase.
1517 */
1518
1519 params->deadline = 0; /* expire immediately */
1520 params->interval = 0; /* non-repeating */
1521 } else {
1522 uint64_t interval_abs = 0;
1523
1524 if (use_abstime) {
1525 interval_abs = (uint64_t)kev->data;
1526 } else {
1527 uint64_t interval_ns;
1528 if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns)) {
1529 return ERANGE;
1530 }
1531
1532 nanoseconds_to_absolutetime(interval_ns, &interval_abs);
1533 }
1534
1535 uint64_t deadline = 0;
1536
1537 if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
1538 clock_continuoustime_interval_to_deadline(interval_abs, &deadline);
1539 } else {
1540 clock_absolutetime_interval_to_deadline(interval_abs, &deadline);
1541 }
1542
1543 params->deadline = deadline;
1544 params->interval = interval_abs;
1545 }
1546
1547 return 0;
1548 }
1549
1550 /*
1551 * filt_timerexpire - the timer callout routine
1552 */
1553 static void
filt_timerexpire(void * knx,void * state_on_arm)1554 filt_timerexpire(void *knx, void *state_on_arm)
1555 {
1556 struct knote *kn = knx;
1557
1558 uint32_t state = (uint32_t)(uintptr_t)state_on_arm;
1559 uint32_t fired_state = state ^ TIMER_ARMED ^ TIMER_FIRED;
1560
1561 if (os_atomic_cmpxchg(&kn->kn_hook32, state, fired_state, relaxed)) {
1562 // our f_event always would say FILTER_ACTIVE,
1563 // so be leaner and just do it.
1564 struct kqueue *kq = knote_get_kq(kn);
1565 kqlock(kq);
1566 knote_activate(kq, kn, FILTER_ACTIVE);
1567 kqunlock(kq);
1568 } else {
1569 /*
1570 * The timer has been reprogrammed or canceled since it was armed,
1571 * and this is a late firing for the timer, just ignore it.
1572 */
1573 }
1574 }
1575
1576 /*
1577 * Does this deadline needs a timer armed for it, or has it expired?
1578 */
1579 static bool
filt_timer_is_ready(struct knote * kn)1580 filt_timer_is_ready(struct knote *kn)
1581 {
1582 uint64_t now, deadline = kn->kn_ext[0];
1583
1584 if (deadline == 0) {
1585 return true;
1586 }
1587
1588 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1589 now = mach_continuous_time();
1590 } else {
1591 now = mach_absolute_time();
1592 }
1593 return deadline <= now;
1594 }
1595
1596 /*
1597 * Arm a timer
1598 *
1599 * It is the responsibility of the caller to make sure the timer call
1600 * has completed or been cancelled properly prior to arming it.
1601 */
1602 static void
filt_timerarm(struct knote * kn)1603 filt_timerarm(struct knote *kn)
1604 {
1605 uint64_t deadline = kn->kn_ext[0];
1606 uint64_t leeway = kn->kn_ext[1];
1607 uint32_t state;
1608
1609 int filter_flags = kn->kn_sfflags;
1610 unsigned int timer_flags = 0;
1611
1612 if (filter_flags & NOTE_CRITICAL) {
1613 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
1614 } else if (filter_flags & NOTE_BACKGROUND) {
1615 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
1616 } else {
1617 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
1618 }
1619
1620 if (filter_flags & NOTE_LEEWAY) {
1621 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
1622 }
1623
1624 if (filter_flags & NOTE_MACH_CONTINUOUS_TIME) {
1625 timer_flags |= THREAD_CALL_CONTINUOUS;
1626 }
1627
1628 /*
1629 * Move to ARMED.
1630 *
1631 * We increase the gencount, and setup the thread call with this expected
1632 * state. It means that if there was a previous generation of the timer in
1633 * flight that needs to be ignored, then 3 things are possible:
1634 *
1635 * - the timer fires first, filt_timerexpire() and sets the state to FIRED
1636 * but we clobber it with ARMED and a new gencount. The knote will still
1637 * be activated, but filt_timerprocess() which is serialized with this
1638 * call will not see the FIRED bit set and will not deliver an event.
1639 *
1640 * - this code runs first, but filt_timerexpire() comes second. Because it
1641 * knows an old gencount, it will debounce and not activate the knote.
1642 *
1643 * - filt_timerexpire() wasn't in flight yet, and thread_call_enter below
1644 * will just cancel it properly.
1645 *
1646 * This is important as userspace expects to never be woken up for past
1647 * timers after filt_timertouch ran.
1648 */
1649 state = os_atomic_load(&kn->kn_hook32, relaxed);
1650 state &= ~TIMER_STATE_MASK;
1651 state += TIMER_GEN_INC + TIMER_ARMED;
1652 os_atomic_store(&kn->kn_hook32, state, relaxed);
1653
1654 thread_call_enter_delayed_with_leeway(kn->kn_thcall,
1655 (void *)(uintptr_t)state, deadline, leeway, timer_flags);
1656 }
1657
1658 /*
1659 * Mark a timer as "already fired" when it is being reprogrammed
1660 *
1661 * If there is a timer in flight, this will do a best effort at canceling it,
1662 * but will not wait. If the thread call was in flight, having set the
1663 * TIMER_IMMEDIATE bit will debounce a filt_timerexpire() racing with this
1664 * cancelation.
1665 */
1666 static void
filt_timerfire_immediate(struct knote * kn)1667 filt_timerfire_immediate(struct knote *kn)
1668 {
1669 uint32_t state;
1670
1671 static_assert(TIMER_IMMEDIATE == TIMER_STATE_MASK,
1672 "validate that this atomic or will transition to IMMEDIATE");
1673 state = os_atomic_or_orig(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
1674
1675 if ((state & TIMER_STATE_MASK) == TIMER_ARMED) {
1676 thread_call_cancel(kn->kn_thcall);
1677 }
1678 }
1679
1680 /*
1681 * Allocate a thread call for the knote's lifetime, and kick off the timer.
1682 */
1683 static int
filt_timerattach(struct knote * kn,struct kevent_qos_s * kev)1684 filt_timerattach(struct knote *kn, struct kevent_qos_s *kev)
1685 {
1686 thread_call_t callout;
1687 struct filt_timer_params params;
1688 int error;
1689
1690 if ((error = filt_timervalidate(kev, ¶ms)) != 0) {
1691 knote_set_error(kn, error);
1692 return 0;
1693 }
1694
1695 callout = thread_call_allocate_with_options(filt_timerexpire,
1696 (thread_call_param_t)kn, THREAD_CALL_PRIORITY_HIGH,
1697 THREAD_CALL_OPTIONS_ONCE);
1698
1699 if (NULL == callout) {
1700 knote_set_error(kn, ENOMEM);
1701 return 0;
1702 }
1703
1704 filt_timer_set_params(kn, ¶ms);
1705 kn->kn_thcall = callout;
1706 kn->kn_flags |= EV_CLEAR;
1707 os_atomic_store(&kn->kn_hook32, TIMER_IDLE, relaxed);
1708
1709 /* NOTE_ABSOLUTE implies EV_ONESHOT */
1710 if (kn->kn_sfflags & NOTE_ABSOLUTE) {
1711 kn->kn_flags |= EV_ONESHOT;
1712 }
1713
1714 if (filt_timer_is_ready(kn)) {
1715 os_atomic_store(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
1716 return FILTER_ACTIVE;
1717 } else {
1718 filt_timerarm(kn);
1719 return 0;
1720 }
1721 }
1722
1723 /*
1724 * Shut down the timer if it's running, and free the callout.
1725 */
1726 static void
filt_timerdetach(struct knote * kn)1727 filt_timerdetach(struct knote *kn)
1728 {
1729 __assert_only boolean_t freed;
1730
1731 /*
1732 * Unconditionally cancel to make sure there can't be any filt_timerexpire()
1733 * running anymore.
1734 */
1735 thread_call_cancel_wait(kn->kn_thcall);
1736 freed = thread_call_free(kn->kn_thcall);
1737 assert(freed);
1738 }
1739
1740 /*
1741 * filt_timertouch - update timer knote with new user input
1742 *
1743 * Cancel and restart the timer based on new user data. When
1744 * the user picks up a knote, clear the count of how many timer
1745 * pops have gone off (in kn_data).
1746 */
1747 static int
filt_timertouch(struct knote * kn,struct kevent_qos_s * kev)1748 filt_timertouch(struct knote *kn, struct kevent_qos_s *kev)
1749 {
1750 struct filt_timer_params params;
1751 uint32_t changed_flags = (kn->kn_sfflags ^ kev->fflags);
1752 int error;
1753
1754 if (kev->qos && (knote_get_kq(kn)->kq_state & KQ_WORKLOOP) &&
1755 !_pthread_priority_thread_qos(kev->qos)) {
1756 /* validate usage of FILTER_UPDATE_REQ_QOS */
1757 kev->flags |= EV_ERROR;
1758 kev->data = ERANGE;
1759 return 0;
1760 }
1761
1762 if (changed_flags & NOTE_ABSOLUTE) {
1763 kev->flags |= EV_ERROR;
1764 kev->data = EINVAL;
1765 return 0;
1766 }
1767
1768 if ((error = filt_timervalidate(kev, ¶ms)) != 0) {
1769 kev->flags |= EV_ERROR;
1770 kev->data = error;
1771 return 0;
1772 }
1773
1774 /* capture the new values used to compute deadline */
1775 filt_timer_set_params(kn, ¶ms);
1776 kn->kn_sfflags = kev->fflags;
1777
1778 if (filt_timer_is_ready(kn)) {
1779 filt_timerfire_immediate(kn);
1780 return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS;
1781 } else {
1782 filt_timerarm(kn);
1783 return FILTER_UPDATE_REQ_QOS;
1784 }
1785 }
1786
1787 /*
1788 * filt_timerprocess - query state of knote and snapshot event data
1789 *
1790 * Determine if the timer has fired in the past, snapshot the state
1791 * of the kevent for returning to user-space, and clear pending event
1792 * counters for the next time.
1793 */
1794 static int
filt_timerprocess(struct knote * kn,struct kevent_qos_s * kev)1795 filt_timerprocess(struct knote *kn, struct kevent_qos_s *kev)
1796 {
1797 uint32_t state = os_atomic_load(&kn->kn_hook32, relaxed);
1798
1799 /*
1800 * filt_timerprocess is serialized with any filter routine except for
1801 * filt_timerexpire which atomically does a TIMER_ARMED -> TIMER_FIRED
1802 * transition, and on success, activates the knote.
1803 *
1804 * Hence, we don't need atomic modifications of the state, only to peek at
1805 * whether we see any of the "FIRED" state, and if we do, it is safe to
1806 * do simple state machine transitions.
1807 */
1808 switch (state & TIMER_STATE_MASK) {
1809 case TIMER_IDLE:
1810 case TIMER_ARMED:
1811 /*
1812 * This can happen if a touch resets a timer that had fired
1813 * without being processed
1814 */
1815 return 0;
1816 }
1817
1818 os_atomic_store(&kn->kn_hook32, state & ~TIMER_STATE_MASK, relaxed);
1819
1820 /*
1821 * Copy out the interesting kevent state,
1822 * but don't leak out the raw time calculations.
1823 *
1824 * TODO: potential enhancements - tell the user about:
1825 * - deadline to which this timer thought it was expiring
1826 * - return kn_sfflags in the fflags field so the client can know
1827 * under what flags the timer fired
1828 */
1829 knote_fill_kevent(kn, kev, 1);
1830 kev->ext[0] = 0;
1831 /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */
1832
1833 if (kn->kn_sdata != 0) {
1834 /*
1835 * This is a 'repeating' timer, so we have to emit
1836 * how many intervals expired between the arm
1837 * and the process.
1838 *
1839 * A very strange style of interface, because
1840 * this could easily be done in the client...
1841 */
1842
1843 uint64_t now;
1844
1845 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1846 now = mach_continuous_time();
1847 } else {
1848 now = mach_absolute_time();
1849 }
1850
1851 uint64_t first_deadline = kn->kn_ext[0];
1852 uint64_t interval_abs = kn->kn_sdata;
1853 uint64_t orig_arm_time = first_deadline - interval_abs;
1854
1855 assert(now > orig_arm_time);
1856 assert(now > first_deadline);
1857
1858 uint64_t elapsed = now - orig_arm_time;
1859
1860 uint64_t num_fired = elapsed / interval_abs;
1861
1862 /*
1863 * To reach this code, we must have seen the timer pop
1864 * and be in repeating mode, so therefore it must have been
1865 * more than 'interval' time since the attach or last
1866 * successful touch.
1867 */
1868 assert(num_fired > 0);
1869
1870 /* report how many intervals have elapsed to the user */
1871 kev->data = (int64_t)num_fired;
1872
1873 /* We only need to re-arm the timer if it's not about to be destroyed */
1874 if ((kn->kn_flags & EV_ONESHOT) == 0) {
1875 /* fire at the end of the next interval */
1876 uint64_t new_deadline = first_deadline + num_fired * interval_abs;
1877
1878 assert(new_deadline > now);
1879
1880 kn->kn_ext[0] = new_deadline;
1881
1882 /*
1883 * This can't shortcut setting up the thread call, because
1884 * knote_process deactivates EV_CLEAR knotes unconditionnally.
1885 */
1886 filt_timerarm(kn);
1887 }
1888 }
1889
1890 return FILTER_ACTIVE;
1891 }
1892
1893 SECURITY_READ_ONLY_EARLY(static struct filterops) timer_filtops = {
1894 .f_extended_codes = true,
1895 .f_attach = filt_timerattach,
1896 .f_detach = filt_timerdetach,
1897 .f_event = filt_bad_event,
1898 .f_touch = filt_timertouch,
1899 .f_process = filt_timerprocess,
1900 };
1901
1902 #pragma mark user_filtops
1903
1904 static int
filt_userattach(struct knote * kn,__unused struct kevent_qos_s * kev)1905 filt_userattach(struct knote *kn, __unused struct kevent_qos_s *kev)
1906 {
1907 if (kn->kn_sfflags & NOTE_TRIGGER) {
1908 kn->kn_hook32 = FILTER_ACTIVE;
1909 } else {
1910 kn->kn_hook32 = 0;
1911 }
1912 return kn->kn_hook32;
1913 }
1914
1915 static int
filt_usertouch(struct knote * kn,struct kevent_qos_s * kev)1916 filt_usertouch(struct knote *kn, struct kevent_qos_s *kev)
1917 {
1918 uint32_t ffctrl;
1919 int fflags;
1920
1921 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1922 fflags = kev->fflags & NOTE_FFLAGSMASK;
1923 switch (ffctrl) {
1924 case NOTE_FFNOP:
1925 break;
1926 case NOTE_FFAND:
1927 kn->kn_sfflags &= fflags;
1928 break;
1929 case NOTE_FFOR:
1930 kn->kn_sfflags |= fflags;
1931 break;
1932 case NOTE_FFCOPY:
1933 kn->kn_sfflags = fflags;
1934 break;
1935 }
1936 kn->kn_sdata = kev->data;
1937
1938 if (kev->fflags & NOTE_TRIGGER) {
1939 kn->kn_hook32 = FILTER_ACTIVE;
1940 }
1941 return (int)kn->kn_hook32;
1942 }
1943
1944 static int
filt_userprocess(struct knote * kn,struct kevent_qos_s * kev)1945 filt_userprocess(struct knote *kn, struct kevent_qos_s *kev)
1946 {
1947 int result = (int)kn->kn_hook32;
1948
1949 if (result) {
1950 /* EVFILT_USER returns the data that was passed in */
1951 knote_fill_kevent_with_sdata(kn, kev);
1952 kev->fflags = kn->kn_sfflags;
1953 if (kn->kn_flags & EV_CLEAR) {
1954 /* knote_fill_kevent cleared kn_fflags */
1955 kn->kn_hook32 = 0;
1956 }
1957 }
1958
1959 return result;
1960 }
1961
1962 SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = {
1963 .f_extended_codes = true,
1964 .f_attach = filt_userattach,
1965 .f_detach = filt_no_detach,
1966 .f_event = filt_bad_event,
1967 .f_touch = filt_usertouch,
1968 .f_process = filt_userprocess,
1969 };
1970
1971 #pragma mark workloop_filtops
1972
1973 #define EPREEMPTDISABLED (-1)
1974
1975 static inline void
filt_wllock(struct kqworkloop * kqwl)1976 filt_wllock(struct kqworkloop *kqwl)
1977 {
1978 lck_spin_lock(&kqwl->kqwl_statelock);
1979 }
1980
1981 static inline void
filt_wlunlock(struct kqworkloop * kqwl)1982 filt_wlunlock(struct kqworkloop *kqwl)
1983 {
1984 lck_spin_unlock(&kqwl->kqwl_statelock);
1985 }
1986
1987 /*
1988 * Returns true when the interlock for the turnstile is the workqueue lock
1989 *
1990 * When this is the case, all turnstiles operations are delegated
1991 * to the workqueue subsystem.
1992 *
1993 * This is required because kqueue_threadreq_bind_prepost only holds the
1994 * workqueue lock but needs to move the inheritor from the workloop turnstile
1995 * away from the creator thread, so that this now fulfilled request cannot be
1996 * picked anymore by other threads.
1997 */
1998 static inline bool
filt_wlturnstile_interlock_is_workq(struct kqworkloop * kqwl)1999 filt_wlturnstile_interlock_is_workq(struct kqworkloop *kqwl)
2000 {
2001 return kqr_thread_requested_pending(&kqwl->kqwl_request);
2002 }
2003
2004 static void
filt_wlupdate_inheritor(struct kqworkloop * kqwl,struct turnstile * ts,turnstile_update_flags_t flags)2005 filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts,
2006 turnstile_update_flags_t flags)
2007 {
2008 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
2009 workq_threadreq_t kqr = &kqwl->kqwl_request;
2010
2011 /*
2012 * binding to the workq should always happen through
2013 * workq_kern_threadreq_update_inheritor()
2014 */
2015 assert(!filt_wlturnstile_interlock_is_workq(kqwl));
2016
2017 if ((inheritor = kqwl->kqwl_owner)) {
2018 flags |= TURNSTILE_INHERITOR_THREAD;
2019 } else if ((inheritor = kqr_thread(kqr))) {
2020 flags |= TURNSTILE_INHERITOR_THREAD;
2021 }
2022
2023 turnstile_update_inheritor(ts, inheritor, flags);
2024 }
2025
2026 #define EVFILT_WORKLOOP_EFAULT_RETRY_COUNT 100
2027 #define FILT_WLATTACH 0
2028 #define FILT_WLTOUCH 1
2029 #define FILT_WLDROP 2
2030
2031 __result_use_check
2032 static int
filt_wlupdate(struct kqworkloop * kqwl,struct knote * kn,struct kevent_qos_s * kev,kq_index_t qos_index,int op)2033 filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn,
2034 struct kevent_qos_s *kev, kq_index_t qos_index, int op)
2035 {
2036 user_addr_t uaddr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]);
2037 workq_threadreq_t kqr = &kqwl->kqwl_request;
2038 thread_t cur_owner, new_owner, extra_thread_ref = THREAD_NULL;
2039 kq_index_t cur_override = THREAD_QOS_UNSPECIFIED;
2040 int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
2041 int action = KQWL_UTQ_NONE, error = 0;
2042 bool wl_inheritor_updated = false, needs_wake = false;
2043 uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
2044 uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
2045 uint64_t udata = 0;
2046 struct turnstile *ts = TURNSTILE_NULL;
2047
2048 filt_wllock(kqwl);
2049
2050 again:
2051 new_owner = cur_owner = kqwl->kqwl_owner;
2052
2053 /*
2054 * Phase 1:
2055 *
2056 * If asked, load the uint64 value at the user provided address and compare
2057 * it against the passed in mask and expected value.
2058 *
2059 * If NOTE_WL_DISCOVER_OWNER is specified, translate the loaded name as
2060 * a thread reference.
2061 *
2062 * If NOTE_WL_END_OWNERSHIP is specified and the currently known owner is
2063 * the current thread, then end ownership.
2064 *
2065 * Lastly decide whether we need to perform a QoS update.
2066 */
2067 if (uaddr) {
2068 /*
2069 * Until <rdar://problem/24999882> exists,
2070 * disabling preemption copyin forces any
2071 * vm_fault we encounter to fail.
2072 */
2073 error = copyin_atomic64(uaddr, &udata);
2074
2075 /*
2076 * If we get EFAULT, drop locks, and retry.
2077 * If we still get an error report it,
2078 * else assume the memory has been faulted
2079 * and attempt to copyin under lock again.
2080 */
2081 switch (error) {
2082 case 0:
2083 break;
2084 case EFAULT:
2085 if (efault_retry-- > 0) {
2086 filt_wlunlock(kqwl);
2087 error = copyin_atomic64(uaddr, &udata);
2088 filt_wllock(kqwl);
2089 if (error == 0) {
2090 goto again;
2091 }
2092 }
2093 OS_FALLTHROUGH;
2094 default:
2095 goto out;
2096 }
2097
2098 /* Update state as copied in. */
2099 kev->ext[EV_EXTIDX_WL_VALUE] = udata;
2100
2101 if ((udata & mask) != (kdata & mask)) {
2102 error = ESTALE;
2103 } else if (kev->fflags & NOTE_WL_DISCOVER_OWNER) {
2104 /*
2105 * Decipher the owner port name, and translate accordingly.
2106 * The low 2 bits were borrowed for other flags, so mask them off.
2107 *
2108 * Then attempt translation to a thread reference or fail.
2109 */
2110 mach_port_name_t name = (mach_port_name_t)udata & ~0x3;
2111 if (name != MACH_PORT_NULL) {
2112 name = ipc_entry_name_mask(name);
2113 extra_thread_ref = port_name_to_thread(name,
2114 PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2115 if (extra_thread_ref == THREAD_NULL) {
2116 error = EOWNERDEAD;
2117 goto out;
2118 }
2119 new_owner = extra_thread_ref;
2120 }
2121 }
2122 }
2123
2124 if ((kev->fflags & NOTE_WL_END_OWNERSHIP) && new_owner == current_thread()) {
2125 new_owner = THREAD_NULL;
2126 }
2127
2128 if (error == 0) {
2129 if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) {
2130 action = KQWL_UTQ_SET_QOS_INDEX;
2131 } else if (qos_index && kqr->tr_kq_qos_index != qos_index) {
2132 action = KQWL_UTQ_SET_QOS_INDEX;
2133 }
2134
2135 if (op == FILT_WLTOUCH) {
2136 /*
2137 * Save off any additional fflags/data we just accepted
2138 * But only keep the last round of "update" bits we acted on which helps
2139 * debugging a lot.
2140 */
2141 kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK;
2142 kn->kn_sfflags |= kev->fflags;
2143 if (kev->fflags & NOTE_WL_SYNC_WAKE) {
2144 needs_wake = (kn->kn_thread != THREAD_NULL);
2145 }
2146 } else if (op == FILT_WLDROP) {
2147 if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) ==
2148 NOTE_WL_SYNC_WAIT) {
2149 /*
2150 * When deleting a SYNC_WAIT knote that hasn't been woken up
2151 * explicitly, issue a wake up.
2152 */
2153 kn->kn_sfflags |= NOTE_WL_SYNC_WAKE;
2154 needs_wake = (kn->kn_thread != THREAD_NULL);
2155 }
2156 }
2157 }
2158
2159 /*
2160 * Phase 2:
2161 *
2162 * Commit ownership and QoS changes if any, possibly wake up waiters
2163 */
2164
2165 if (cur_owner == new_owner && action == KQWL_UTQ_NONE && !needs_wake) {
2166 goto out;
2167 }
2168
2169 kqlock(kqwl);
2170
2171 /* If already tracked as servicer, don't track as owner */
2172 if (new_owner == kqr_thread(kqr)) {
2173 new_owner = THREAD_NULL;
2174 }
2175
2176 if (cur_owner != new_owner) {
2177 kqwl->kqwl_owner = new_owner;
2178 if (new_owner == extra_thread_ref) {
2179 /* we just transfered this ref to kqwl_owner */
2180 extra_thread_ref = THREAD_NULL;
2181 }
2182 cur_override = kqworkloop_override(kqwl);
2183
2184 if (new_owner) {
2185 /* override it before we drop the old */
2186 if (cur_override != THREAD_QOS_UNSPECIFIED) {
2187 thread_add_kevent_override(new_owner, cur_override);
2188 }
2189 if (kqr_thread_requested_pending(kqr)) {
2190 if (action == KQWL_UTQ_NONE) {
2191 action = KQWL_UTQ_REDRIVE_EVENTS;
2192 }
2193 }
2194 } else if (action == KQWL_UTQ_NONE &&
2195 !kqr_thread_requested(kqr) &&
2196 kqwl->kqwl_wakeup_qos) {
2197 action = KQWL_UTQ_REDRIVE_EVENTS;
2198 }
2199 }
2200
2201 if (action != KQWL_UTQ_NONE) {
2202 kqworkloop_update_threads_qos(kqwl, action, qos_index);
2203 }
2204
2205 ts = kqwl->kqwl_turnstile;
2206 if (cur_owner != new_owner && ts) {
2207 if (action == KQWL_UTQ_REDRIVE_EVENTS) {
2208 /*
2209 * Note that when action is KQWL_UTQ_REDRIVE_EVENTS,
2210 * the code went through workq_kern_threadreq_initiate()
2211 * and the workqueue has set the inheritor already
2212 */
2213 assert(filt_wlturnstile_interlock_is_workq(kqwl));
2214 } else if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2215 workq_kern_threadreq_lock(kqwl->kqwl_p);
2216 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, kqr, new_owner,
2217 ts, TURNSTILE_IMMEDIATE_UPDATE);
2218 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2219 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
2220 /*
2221 * If the workq is no longer the interlock, then
2222 * workq_kern_threadreq_update_inheritor() has finished a bind
2223 * and we need to fallback to the regular path.
2224 */
2225 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
2226 }
2227 wl_inheritor_updated = true;
2228 } else {
2229 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
2230 wl_inheritor_updated = true;
2231 }
2232
2233 /*
2234 * We need a turnstile reference because we are dropping the interlock
2235 * and the caller has not called turnstile_prepare.
2236 */
2237 if (wl_inheritor_updated) {
2238 turnstile_reference(ts);
2239 }
2240 }
2241
2242 if (needs_wake && ts) {
2243 waitq_wakeup64_thread(&ts->ts_waitq, knote_filt_wev64(kn),
2244 kn->kn_thread, THREAD_AWAKENED);
2245 if (op == FILT_WLATTACH || op == FILT_WLTOUCH) {
2246 disable_preemption();
2247 error = EPREEMPTDISABLED;
2248 }
2249 }
2250
2251 kqunlock(kqwl);
2252
2253 out:
2254 /*
2255 * Phase 3:
2256 *
2257 * Unlock and cleanup various lingering references and things.
2258 */
2259 filt_wlunlock(kqwl);
2260
2261 #if CONFIG_WORKLOOP_DEBUG
2262 KQWL_HISTORY_WRITE_ENTRY(kqwl, {
2263 .updater = current_thread(),
2264 .servicer = kqr_thread(kqr), /* Note: racy */
2265 .old_owner = cur_owner,
2266 .new_owner = new_owner,
2267
2268 .kev_ident = kev->ident,
2269 .error = (int16_t)error,
2270 .kev_flags = kev->flags,
2271 .kev_fflags = kev->fflags,
2272
2273 .kev_mask = mask,
2274 .kev_value = kdata,
2275 .in_value = udata,
2276 });
2277 #endif // CONFIG_WORKLOOP_DEBUG
2278
2279 if (wl_inheritor_updated) {
2280 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
2281 turnstile_deallocate_safe(ts);
2282 }
2283
2284 if (cur_owner && new_owner != cur_owner) {
2285 if (cur_override != THREAD_QOS_UNSPECIFIED) {
2286 thread_drop_kevent_override(cur_owner);
2287 }
2288 thread_deallocate_safe(cur_owner);
2289 }
2290 if (extra_thread_ref) {
2291 thread_deallocate_safe(extra_thread_ref);
2292 }
2293 return error;
2294 }
2295
2296 /*
2297 * Remembers the last updated that came in from userspace for debugging reasons.
2298 * - fflags is mirrored from the userspace kevent
2299 * - ext[i, i != VALUE] is mirrored from the userspace kevent
2300 * - ext[VALUE] is set to what the kernel loaded atomically
2301 * - data is set to the error if any
2302 */
2303 static inline void
filt_wlremember_last_update(struct knote * kn,struct kevent_qos_s * kev,int error)2304 filt_wlremember_last_update(struct knote *kn, struct kevent_qos_s *kev,
2305 int error)
2306 {
2307 kn->kn_fflags = kev->fflags;
2308 kn->kn_sdata = error;
2309 memcpy(kn->kn_ext, kev->ext, sizeof(kev->ext));
2310 }
2311
2312 static int
filt_wlupdate_sync_ipc(struct kqworkloop * kqwl,struct knote * kn,struct kevent_qos_s * kev,int op)2313 filt_wlupdate_sync_ipc(struct kqworkloop *kqwl, struct knote *kn,
2314 struct kevent_qos_s *kev, int op)
2315 {
2316 user_addr_t uaddr = (user_addr_t) kev->ext[EV_EXTIDX_WL_ADDR];
2317 uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
2318 uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
2319 uint64_t udata = 0;
2320 int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
2321 int error = 0;
2322
2323 if (op == FILT_WLATTACH) {
2324 (void)kqueue_alloc_turnstile(&kqwl->kqwl_kqueue);
2325 } else if (uaddr == 0) {
2326 return 0;
2327 }
2328
2329 filt_wllock(kqwl);
2330
2331 again:
2332
2333 /*
2334 * Do the debounce thing, the lock serializing the state is the knote lock.
2335 */
2336 if (uaddr) {
2337 /*
2338 * Until <rdar://problem/24999882> exists,
2339 * disabling preemption copyin forces any
2340 * vm_fault we encounter to fail.
2341 */
2342 error = copyin_atomic64(uaddr, &udata);
2343
2344 /*
2345 * If we get EFAULT, drop locks, and retry.
2346 * If we still get an error report it,
2347 * else assume the memory has been faulted
2348 * and attempt to copyin under lock again.
2349 */
2350 switch (error) {
2351 case 0:
2352 break;
2353 case EFAULT:
2354 if (efault_retry-- > 0) {
2355 filt_wlunlock(kqwl);
2356 error = copyin_atomic64(uaddr, &udata);
2357 filt_wllock(kqwl);
2358 if (error == 0) {
2359 goto again;
2360 }
2361 }
2362 OS_FALLTHROUGH;
2363 default:
2364 goto out;
2365 }
2366
2367 kev->ext[EV_EXTIDX_WL_VALUE] = udata;
2368 kn->kn_ext[EV_EXTIDX_WL_VALUE] = udata;
2369
2370 if ((udata & mask) != (kdata & mask)) {
2371 error = ESTALE;
2372 goto out;
2373 }
2374 }
2375
2376 if (op == FILT_WLATTACH) {
2377 error = filt_wlattach_sync_ipc(kn);
2378 if (error == 0) {
2379 disable_preemption();
2380 error = EPREEMPTDISABLED;
2381 }
2382 }
2383
2384 out:
2385 filt_wlunlock(kqwl);
2386 return error;
2387 }
2388
2389 static int
filt_wlattach(struct knote * kn,struct kevent_qos_s * kev)2390 filt_wlattach(struct knote *kn, struct kevent_qos_s *kev)
2391 {
2392 struct kqueue *kq = knote_get_kq(kn);
2393 struct kqworkloop *kqwl = (struct kqworkloop *)kq;
2394 int error = 0, result = 0;
2395 kq_index_t qos_index = 0;
2396
2397 if (__improbable((kq->kq_state & KQ_WORKLOOP) == 0)) {
2398 error = ENOTSUP;
2399 goto out;
2400 }
2401
2402 uint32_t command = (kn->kn_sfflags & NOTE_WL_COMMANDS_MASK);
2403 switch (command) {
2404 case NOTE_WL_THREAD_REQUEST:
2405 if (kn->kn_id != kqwl->kqwl_dynamicid) {
2406 error = EINVAL;
2407 goto out;
2408 }
2409 qos_index = _pthread_priority_thread_qos(kn->kn_qos);
2410 if (qos_index == THREAD_QOS_UNSPECIFIED) {
2411 error = ERANGE;
2412 goto out;
2413 }
2414 if (kqwl->kqwl_request.tr_kq_qos_index) {
2415 /*
2416 * There already is a thread request, and well, you're only allowed
2417 * one per workloop, so fail the attach.
2418 */
2419 error = EALREADY;
2420 goto out;
2421 }
2422 break;
2423 case NOTE_WL_SYNC_WAIT:
2424 case NOTE_WL_SYNC_WAKE:
2425 if (kn->kn_id == kqwl->kqwl_dynamicid) {
2426 error = EINVAL;
2427 goto out;
2428 }
2429 if ((kn->kn_flags & EV_DISABLE) == 0) {
2430 error = EINVAL;
2431 goto out;
2432 }
2433 if (kn->kn_sfflags & NOTE_WL_END_OWNERSHIP) {
2434 error = EINVAL;
2435 goto out;
2436 }
2437 break;
2438
2439 case NOTE_WL_SYNC_IPC:
2440 if ((kn->kn_flags & EV_DISABLE) == 0) {
2441 error = EINVAL;
2442 goto out;
2443 }
2444 if (kn->kn_sfflags & (NOTE_WL_UPDATE_QOS | NOTE_WL_DISCOVER_OWNER)) {
2445 error = EINVAL;
2446 goto out;
2447 }
2448 break;
2449 default:
2450 error = EINVAL;
2451 goto out;
2452 }
2453
2454 if (command == NOTE_WL_SYNC_IPC) {
2455 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLATTACH);
2456 } else {
2457 error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLATTACH);
2458 }
2459
2460 if (error == EPREEMPTDISABLED) {
2461 error = 0;
2462 result = FILTER_THREADREQ_NODEFEER;
2463 }
2464 out:
2465 if (error) {
2466 /* If userland wants ESTALE to be hidden, fail the attach anyway */
2467 if (error == ESTALE && (kn->kn_sfflags & NOTE_WL_IGNORE_ESTALE)) {
2468 error = 0;
2469 }
2470 knote_set_error(kn, error);
2471 return result;
2472 }
2473 if (command == NOTE_WL_SYNC_WAIT) {
2474 return kevent_register_wait_prepare(kn, kev, result);
2475 }
2476 /* Just attaching the thread request successfully will fire it */
2477 if (command == NOTE_WL_THREAD_REQUEST) {
2478 /*
2479 * Thread Request knotes need an explicit touch to be active again,
2480 * so delivering an event needs to also consume it.
2481 */
2482 kn->kn_flags |= EV_CLEAR;
2483 return result | FILTER_ACTIVE;
2484 }
2485 return result;
2486 }
2487
2488 static void __dead2
filt_wlwait_continue(void * parameter,wait_result_t wr)2489 filt_wlwait_continue(void *parameter, wait_result_t wr)
2490 {
2491 struct _kevent_register *cont_args = parameter;
2492 struct kqworkloop *kqwl = cont_args->kqwl;
2493
2494 kqlock(kqwl);
2495 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2496 workq_kern_threadreq_lock(kqwl->kqwl_p);
2497 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
2498 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2499 } else {
2500 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
2501 }
2502 kqunlock(kqwl);
2503
2504 turnstile_cleanup();
2505
2506 if (wr == THREAD_INTERRUPTED) {
2507 cont_args->kev.flags |= EV_ERROR;
2508 cont_args->kev.data = EINTR;
2509 } else if (wr != THREAD_AWAKENED) {
2510 panic("Unexpected wait result: %d", wr);
2511 }
2512
2513 kevent_register_wait_return(cont_args);
2514 }
2515
2516 /*
2517 * Called with the workloop mutex held, most of the time never returns as it
2518 * calls filt_wlwait_continue through a continuation.
2519 */
2520 static void __dead2
filt_wlpost_register_wait(struct uthread * uth,struct knote * kn,struct _kevent_register * cont_args)2521 filt_wlpost_register_wait(struct uthread *uth, struct knote *kn,
2522 struct _kevent_register *cont_args)
2523 {
2524 struct kqworkloop *kqwl = cont_args->kqwl;
2525 workq_threadreq_t kqr = &kqwl->kqwl_request;
2526 struct turnstile *ts;
2527 bool workq_locked = false;
2528
2529 kqlock_held(kqwl);
2530
2531 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2532 workq_kern_threadreq_lock(kqwl->kqwl_p);
2533 workq_locked = true;
2534 }
2535
2536 ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
2537 TURNSTILE_NULL, TURNSTILE_WORKLOOPS);
2538
2539 if (workq_locked) {
2540 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
2541 &kqwl->kqwl_request, kqwl->kqwl_owner, ts,
2542 TURNSTILE_DELAYED_UPDATE);
2543 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
2544 /*
2545 * if the interlock is no longer the workqueue lock,
2546 * then we don't need to hold it anymore.
2547 */
2548 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2549 workq_locked = false;
2550 }
2551 }
2552 if (!workq_locked) {
2553 /*
2554 * If the interlock is the workloop's, then it's our responsibility to
2555 * call update_inheritor, so just do it.
2556 */
2557 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_DELAYED_UPDATE);
2558 }
2559
2560 thread_set_pending_block_hint(get_machthread(uth), kThreadWaitWorkloopSyncWait);
2561 waitq_assert_wait64(&ts->ts_waitq, knote_filt_wev64(kn),
2562 THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER);
2563
2564 if (workq_locked) {
2565 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2566 }
2567
2568 thread_t thread = kqwl->kqwl_owner ?: kqr_thread(kqr);
2569 if (thread) {
2570 thread_reference(thread);
2571 }
2572
2573 kevent_register_wait_block(ts, thread, filt_wlwait_continue, cont_args);
2574 }
2575
2576 /* called in stackshot context to report the thread responsible for blocking this thread */
2577 void
kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread,event64_t event,thread_waitinfo_t * waitinfo)2578 kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread,
2579 event64_t event, thread_waitinfo_t *waitinfo)
2580 {
2581 struct knote *kn = (struct knote *)event;
2582
2583 zone_require(knote_zone, kn);
2584
2585 assert(kn->kn_thread == thread);
2586
2587 struct kqueue *kq = knote_get_kq(kn);
2588
2589 zone_require(kqworkloop_zone, kq);
2590 assert(kq->kq_state & KQ_WORKLOOP);
2591
2592 struct kqworkloop *kqwl = (struct kqworkloop *)kq;
2593 workq_threadreq_t kqr = &kqwl->kqwl_request;
2594
2595 thread_t kqwl_owner = kqwl->kqwl_owner;
2596
2597 if (kqwl_owner != THREAD_NULL) {
2598 thread_require(kqwl_owner);
2599 waitinfo->owner = thread_tid(kqwl->kqwl_owner);
2600 } else if ((kqr->tr_state >= WORKQ_TR_STATE_BINDING) && (kqr->tr_thread != NULL)) {
2601 thread_require(kqr->tr_thread);
2602 waitinfo->owner = thread_tid(kqr->tr_thread);
2603 } else if (kqr_thread_requested_pending(kqr)) { /* > idle, < bound */
2604 waitinfo->owner = STACKSHOT_WAITOWNER_THREQUESTED;
2605 } else {
2606 waitinfo->owner = 0;
2607 }
2608
2609 waitinfo->context = kqwl->kqwl_dynamicid;
2610 }
2611
2612 static void
filt_wldetach(struct knote * kn)2613 filt_wldetach(struct knote *kn)
2614 {
2615 if (kn->kn_sfflags & NOTE_WL_SYNC_IPC) {
2616 filt_wldetach_sync_ipc(kn);
2617 } else if (kn->kn_thread) {
2618 kevent_register_wait_cleanup(kn);
2619 }
2620 }
2621
2622 static int
filt_wlvalidate_kev_flags(struct knote * kn,struct kevent_qos_s * kev,thread_qos_t * qos_index)2623 filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_qos_s *kev,
2624 thread_qos_t *qos_index)
2625 {
2626 uint32_t new_commands = kev->fflags & NOTE_WL_COMMANDS_MASK;
2627 uint32_t sav_commands = kn->kn_sfflags & NOTE_WL_COMMANDS_MASK;
2628
2629 if ((kev->fflags & NOTE_WL_DISCOVER_OWNER) && (kev->flags & EV_DELETE)) {
2630 return EINVAL;
2631 }
2632 if (kev->fflags & NOTE_WL_UPDATE_QOS) {
2633 if (kev->flags & EV_DELETE) {
2634 return EINVAL;
2635 }
2636 if (sav_commands != NOTE_WL_THREAD_REQUEST) {
2637 return EINVAL;
2638 }
2639 if (!(*qos_index = _pthread_priority_thread_qos(kev->qos))) {
2640 return ERANGE;
2641 }
2642 }
2643
2644 switch (new_commands) {
2645 case NOTE_WL_THREAD_REQUEST:
2646 /* thread requests can only update themselves */
2647 if (sav_commands != NOTE_WL_THREAD_REQUEST) {
2648 return EINVAL;
2649 }
2650 break;
2651
2652 case NOTE_WL_SYNC_WAIT:
2653 if (kev->fflags & NOTE_WL_END_OWNERSHIP) {
2654 return EINVAL;
2655 }
2656 goto sync_checks;
2657
2658 case NOTE_WL_SYNC_WAKE:
2659 sync_checks:
2660 if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE))) {
2661 return EINVAL;
2662 }
2663 if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
2664 return EINVAL;
2665 }
2666 break;
2667
2668 case NOTE_WL_SYNC_IPC:
2669 if (sav_commands != NOTE_WL_SYNC_IPC) {
2670 return EINVAL;
2671 }
2672 if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
2673 return EINVAL;
2674 }
2675 break;
2676
2677 default:
2678 return EINVAL;
2679 }
2680 return 0;
2681 }
2682
2683 static int
filt_wltouch(struct knote * kn,struct kevent_qos_s * kev)2684 filt_wltouch(struct knote *kn, struct kevent_qos_s *kev)
2685 {
2686 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2687 thread_qos_t qos_index = THREAD_QOS_UNSPECIFIED;
2688 int result = 0;
2689
2690 int error = filt_wlvalidate_kev_flags(kn, kev, &qos_index);
2691 if (error) {
2692 goto out;
2693 }
2694
2695 uint32_t command = kev->fflags & NOTE_WL_COMMANDS_MASK;
2696 if (command == NOTE_WL_SYNC_IPC) {
2697 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLTOUCH);
2698 } else {
2699 error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLTOUCH);
2700 filt_wlremember_last_update(kn, kev, error);
2701 }
2702 if (error == EPREEMPTDISABLED) {
2703 error = 0;
2704 result = FILTER_THREADREQ_NODEFEER;
2705 }
2706
2707 out:
2708 if (error) {
2709 if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
2710 /* If userland wants ESTALE to be hidden, do not activate */
2711 return result;
2712 }
2713 kev->flags |= EV_ERROR;
2714 kev->data = error;
2715 return result;
2716 }
2717 if (command == NOTE_WL_SYNC_WAIT && !(kn->kn_sfflags & NOTE_WL_SYNC_WAKE)) {
2718 return kevent_register_wait_prepare(kn, kev, result);
2719 }
2720 /* Just touching the thread request successfully will fire it */
2721 if (command == NOTE_WL_THREAD_REQUEST) {
2722 if (kev->fflags & NOTE_WL_UPDATE_QOS) {
2723 result |= FILTER_UPDATE_REQ_QOS;
2724 }
2725 result |= FILTER_ACTIVE;
2726 }
2727 return result;
2728 }
2729
2730 static bool
filt_wlallow_drop(struct knote * kn,struct kevent_qos_s * kev)2731 filt_wlallow_drop(struct knote *kn, struct kevent_qos_s *kev)
2732 {
2733 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2734
2735 int error = filt_wlvalidate_kev_flags(kn, kev, NULL);
2736 if (error) {
2737 goto out;
2738 }
2739
2740 uint32_t command = (kev->fflags & NOTE_WL_COMMANDS_MASK);
2741 if (command == NOTE_WL_SYNC_IPC) {
2742 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLDROP);
2743 } else {
2744 error = filt_wlupdate(kqwl, kn, kev, 0, FILT_WLDROP);
2745 filt_wlremember_last_update(kn, kev, error);
2746 }
2747 assert(error != EPREEMPTDISABLED);
2748
2749 out:
2750 if (error) {
2751 if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
2752 return false;
2753 }
2754 kev->flags |= EV_ERROR;
2755 kev->data = error;
2756 return false;
2757 }
2758 return true;
2759 }
2760
2761 static int
filt_wlprocess(struct knote * kn,struct kevent_qos_s * kev)2762 filt_wlprocess(struct knote *kn, struct kevent_qos_s *kev)
2763 {
2764 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2765 int rc = 0;
2766
2767 assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST);
2768
2769 kqlock(kqwl);
2770
2771 if (kqwl->kqwl_owner) {
2772 /*
2773 * <rdar://problem/33584321> userspace sometimes due to events being
2774 * delivered but not triggering a drain session can cause a process
2775 * of the thread request knote.
2776 *
2777 * When that happens, the automatic deactivation due to process
2778 * would swallow the event, so we have to activate the knote again.
2779 */
2780 knote_activate(kqwl, kn, FILTER_ACTIVE);
2781 } else {
2782 #if DEBUG || DEVELOPMENT
2783 if (kevent_debug_flags & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) {
2784 /*
2785 * see src/queue_internal.h in libdispatch
2786 */
2787 #define DISPATCH_QUEUE_ENQUEUED 0x1ull
2788 user_addr_t addr = CAST_USER_ADDR_T(kn->kn_ext[EV_EXTIDX_WL_ADDR]);
2789 task_t t = current_task();
2790 uint64_t val;
2791 if (addr && task_is_active(t) && !task_is_halting(t) &&
2792 copyin_atomic64(addr, &val) == 0 &&
2793 val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 &&
2794 (val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) {
2795 panic("kevent: workloop %#016llx is not enqueued "
2796 "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)",
2797 kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]);
2798 }
2799 }
2800 #endif
2801 knote_fill_kevent(kn, kev, 0);
2802 kev->fflags = kn->kn_sfflags;
2803 rc |= FILTER_ACTIVE;
2804 }
2805
2806 kqunlock(kqwl);
2807
2808 if (rc & FILTER_ACTIVE) {
2809 workq_thread_set_max_qos(kqwl->kqwl_p, &kqwl->kqwl_request);
2810 }
2811 return rc;
2812 }
2813
2814 SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = {
2815 .f_extended_codes = true,
2816 .f_attach = filt_wlattach,
2817 .f_detach = filt_wldetach,
2818 .f_event = filt_bad_event,
2819 .f_touch = filt_wltouch,
2820 .f_process = filt_wlprocess,
2821 .f_allow_drop = filt_wlallow_drop,
2822 .f_post_register_wait = filt_wlpost_register_wait,
2823 };
2824
2825 #pragma mark - kqueues allocation and deallocation
2826
2827 OS_NOINLINE
2828 static void
2829 kqworkloop_dealloc(struct kqworkloop *, bool hash_remove);
2830
2831 static inline bool
kqworkloop_try_retain(struct kqworkloop * kqwl)2832 kqworkloop_try_retain(struct kqworkloop *kqwl)
2833 {
2834 return os_ref_retain_try_raw(&kqwl->kqwl_retains, NULL);
2835 }
2836
2837 static inline void
kqworkloop_retain(struct kqworkloop * kqwl)2838 kqworkloop_retain(struct kqworkloop *kqwl)
2839 {
2840 return os_ref_retain_raw(&kqwl->kqwl_retains, NULL);
2841 }
2842
2843 OS_ALWAYS_INLINE
2844 static inline void
kqueue_retain(kqueue_t kqu)2845 kqueue_retain(kqueue_t kqu)
2846 {
2847 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2848 kqworkloop_retain(kqu.kqwl);
2849 }
2850 }
2851
2852 OS_ALWAYS_INLINE
2853 static inline void
kqworkloop_release_live(struct kqworkloop * kqwl)2854 kqworkloop_release_live(struct kqworkloop *kqwl)
2855 {
2856 os_ref_release_live_raw(&kqwl->kqwl_retains, NULL);
2857 }
2858
2859 OS_ALWAYS_INLINE
2860 static inline void
kqueue_release_live(kqueue_t kqu)2861 kqueue_release_live(kqueue_t kqu)
2862 {
2863 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2864 kqworkloop_release_live(kqu.kqwl);
2865 }
2866 }
2867
2868 OS_ALWAYS_INLINE
2869 static inline void
kqworkloop_release(struct kqworkloop * kqwl)2870 kqworkloop_release(struct kqworkloop *kqwl)
2871 {
2872 if (os_ref_release_raw(&kqwl->kqwl_retains, NULL) == 0) {
2873 kqworkloop_dealloc(kqwl, true);
2874 }
2875 }
2876
2877 OS_ALWAYS_INLINE
2878 static inline void
kqueue_release(kqueue_t kqu)2879 kqueue_release(kqueue_t kqu)
2880 {
2881 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2882 kqworkloop_release(kqu.kqwl);
2883 }
2884 }
2885
2886 /*!
2887 * @function kqueue_destroy
2888 *
2889 * @brief
2890 * Common part to all kqueue dealloc functions.
2891 */
2892 OS_NOINLINE
2893 static void
kqueue_destroy(kqueue_t kqu,zone_t zone)2894 kqueue_destroy(kqueue_t kqu, zone_t zone)
2895 {
2896 lck_spin_destroy(&kqu.kq->kq_lock, &kq_lck_grp);
2897
2898 zfree(zone, kqu.kq);
2899 }
2900
2901 /*!
2902 * @function kqueue_init
2903 *
2904 * @brief
2905 * Common part to all kqueue alloc functions.
2906 */
2907 static kqueue_t
kqueue_init(kqueue_t kqu)2908 kqueue_init(kqueue_t kqu)
2909 {
2910 lck_spin_init(&kqu.kq->kq_lock, &kq_lck_grp, LCK_ATTR_NULL);
2911 return kqu;
2912 }
2913
2914 #pragma mark kqfile allocation and deallocation
2915
2916 /*!
2917 * @function kqueue_dealloc
2918 *
2919 * @brief
2920 * Detach all knotes from a kqfile and free it.
2921 *
2922 * @discussion
2923 * We walk each list looking for knotes referencing this
2924 * this kqueue. If we find one, we try to drop it. But
2925 * if we fail to get a drop reference, that will wait
2926 * until it is dropped. So, we can just restart again
2927 * safe in the assumption that the list will eventually
2928 * not contain any more references to this kqueue (either
2929 * we dropped them all, or someone else did).
2930 *
2931 * Assumes no new events are being added to the kqueue.
2932 * Nothing locked on entry or exit.
2933 */
2934 void
kqueue_dealloc(struct kqueue * kq)2935 kqueue_dealloc(struct kqueue *kq)
2936 {
2937 KNOTE_LOCK_CTX(knlc);
2938 struct proc *p = kq->kq_p;
2939 struct filedesc *fdp = &p->p_fd;
2940 struct knote *kn;
2941
2942 assert(kq && (kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
2943
2944 proc_fdlock(p);
2945 for (int i = 0; i < fdp->fd_knlistsize; i++) {
2946 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
2947 while (kn != NULL) {
2948 if (kq == knote_get_kq(kn)) {
2949 kqlock(kq);
2950 proc_fdunlock(p);
2951 if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
2952 knote_drop(kq, kn, &knlc);
2953 }
2954 proc_fdlock(p);
2955 /* start over at beginning of list */
2956 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
2957 continue;
2958 }
2959 kn = SLIST_NEXT(kn, kn_link);
2960 }
2961 }
2962
2963 knhash_lock(fdp);
2964 proc_fdunlock(p);
2965
2966 if (fdp->fd_knhashmask != 0) {
2967 for (int i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
2968 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
2969 while (kn != NULL) {
2970 if (kq == knote_get_kq(kn)) {
2971 kqlock(kq);
2972 knhash_unlock(fdp);
2973 if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
2974 knote_drop(kq, kn, &knlc);
2975 }
2976 knhash_lock(fdp);
2977 /* start over at beginning of list */
2978 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
2979 continue;
2980 }
2981 kn = SLIST_NEXT(kn, kn_link);
2982 }
2983 }
2984 }
2985 knhash_unlock(fdp);
2986
2987 kqueue_destroy(kq, kqfile_zone);
2988 }
2989
2990 /*!
2991 * @function kqueue_alloc
2992 *
2993 * @brief
2994 * Allocate a kqfile.
2995 */
2996 struct kqueue *
kqueue_alloc(struct proc * p)2997 kqueue_alloc(struct proc *p)
2998 {
2999 struct kqfile *kqf;
3000
3001 /*
3002 * kqfiles are created with kqueue() so we need to wait for
3003 * the first kevent syscall to know which bit among
3004 * KQ_KEV_{32,64,QOS} will be set in kqf_state
3005 */
3006 kqf = zalloc_flags(kqfile_zone, Z_WAITOK | Z_ZERO);
3007 kqf->kqf_p = p;
3008 TAILQ_INIT_AFTER_BZERO(&kqf->kqf_queue);
3009 TAILQ_INIT_AFTER_BZERO(&kqf->kqf_suppressed);
3010
3011 return kqueue_init(kqf).kq;
3012 }
3013
3014 /*!
3015 * @function kqueue_internal
3016 *
3017 * @brief
3018 * Core implementation for kqueue and guarded_kqueue_np()
3019 */
3020 int
kqueue_internal(struct proc * p,fp_initfn_t fp_init,void * initarg,int32_t * retval)3021 kqueue_internal(struct proc *p, fp_initfn_t fp_init, void *initarg, int32_t *retval)
3022 {
3023 struct kqueue *kq;
3024 struct fileproc *fp;
3025 int fd, error;
3026
3027 error = falloc_withinit(p, &fp, &fd, vfs_context_current(),
3028 fp_init, initarg);
3029 if (error) {
3030 return error;
3031 }
3032
3033 kq = kqueue_alloc(p);
3034 if (kq == NULL) {
3035 fp_free(p, fd, fp);
3036 return ENOMEM;
3037 }
3038
3039 fp->fp_flags |= FP_CLOEXEC | FP_CLOFORK;
3040 fp->f_flag = FREAD | FWRITE;
3041 fp->f_ops = &kqueueops;
3042 fp_set_data(fp, kq);
3043 fp->f_lflags |= FG_CONFINED;
3044
3045 proc_fdlock(p);
3046 procfdtbl_releasefd(p, fd, NULL);
3047 fp_drop(p, fd, fp, 1);
3048 proc_fdunlock(p);
3049
3050 *retval = fd;
3051 return error;
3052 }
3053
3054 /*!
3055 * @function kqueue
3056 *
3057 * @brief
3058 * The kqueue syscall.
3059 */
3060 int
kqueue(struct proc * p,__unused struct kqueue_args * uap,int32_t * retval)3061 kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
3062 {
3063 return kqueue_internal(p, NULL, NULL, retval);
3064 }
3065
3066 #pragma mark kqworkq allocation and deallocation
3067
3068 /*!
3069 * @function kqworkq_dealloc
3070 *
3071 * @brief
3072 * Deallocates a workqueue kqueue.
3073 *
3074 * @discussion
3075 * This only happens at process death, or for races with concurrent
3076 * kevent_get_kqwq calls, hence we don't have to care about knotes referencing
3077 * this kqueue, either there are none, or someone else took care of them.
3078 */
3079 void
kqworkq_dealloc(struct kqworkq * kqwq)3080 kqworkq_dealloc(struct kqworkq *kqwq)
3081 {
3082 kqueue_destroy(kqwq, kqworkq_zone);
3083 }
3084
3085 /*!
3086 * @function kqworkq_alloc
3087 *
3088 * @brief
3089 * Allocates a workqueue kqueue.
3090 *
3091 * @discussion
3092 * This is the slow path of kevent_get_kqwq.
3093 * This takes care of making sure procs have a single workq kqueue.
3094 */
3095 OS_NOINLINE
3096 static struct kqworkq *
kqworkq_alloc(struct proc * p,unsigned int flags)3097 kqworkq_alloc(struct proc *p, unsigned int flags)
3098 {
3099 struct kqworkq *kqwq, *tmp;
3100
3101 kqwq = zalloc_flags(kqworkq_zone, Z_WAITOK | Z_ZERO);
3102
3103 assert((flags & KEVENT_FLAG_LEGACY32) == 0);
3104 if (flags & KEVENT_FLAG_LEGACY64) {
3105 kqwq->kqwq_state = KQ_WORKQ | KQ_KEV64;
3106 } else {
3107 kqwq->kqwq_state = KQ_WORKQ | KQ_KEV_QOS;
3108 }
3109 kqwq->kqwq_p = p;
3110
3111 for (int i = 0; i < KQWQ_NBUCKETS; i++) {
3112 TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_queue[i]);
3113 TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_suppressed[i]);
3114 }
3115 for (int i = 0; i < KQWQ_NBUCKETS; i++) {
3116 /*
3117 * Because of how the bucketized system works, we mix overcommit
3118 * sources with not overcommit: each time we move a knote from
3119 * one bucket to the next due to overrides, we'd had to track
3120 * overcommitness, and it's really not worth it in the workloop
3121 * enabled world that track this faithfully.
3122 *
3123 * Incidentally, this behaves like the original manager-based
3124 * kqwq where event delivery always happened (hence is
3125 * "overcommit")
3126 */
3127 kqwq->kqwq_request[i].tr_state = WORKQ_TR_STATE_IDLE;
3128 kqwq->kqwq_request[i].tr_flags = WORKQ_TR_FLAG_KEVENT;
3129 if (i != KQWQ_QOS_MANAGER) {
3130 kqwq->kqwq_request[i].tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
3131 }
3132 kqwq->kqwq_request[i].tr_kq_qos_index = (kq_index_t)i + 1;
3133 }
3134
3135 kqueue_init(kqwq);
3136
3137 if (!os_atomic_cmpxchgv(&p->p_fd.fd_wqkqueue, NULL, kqwq, &tmp, release)) {
3138 kqworkq_dealloc(kqwq);
3139 return tmp;
3140 }
3141
3142 return kqwq;
3143 }
3144
3145 #pragma mark kqworkloop allocation and deallocation
3146
3147 #define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
3148 #define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE
3149
3150 OS_ALWAYS_INLINE
3151 static inline void
kqhash_lock(struct filedesc * fdp)3152 kqhash_lock(struct filedesc *fdp)
3153 {
3154 lck_mtx_lock_spin_always(&fdp->fd_kqhashlock);
3155 }
3156
3157 OS_ALWAYS_INLINE
3158 static inline void
kqhash_unlock(struct filedesc * fdp)3159 kqhash_unlock(struct filedesc *fdp)
3160 {
3161 lck_mtx_unlock(&fdp->fd_kqhashlock);
3162 }
3163
3164 OS_ALWAYS_INLINE
3165 static inline void
kqworkloop_hash_insert_locked(struct filedesc * fdp,kqueue_id_t id,struct kqworkloop * kqwl)3166 kqworkloop_hash_insert_locked(struct filedesc *fdp, kqueue_id_t id,
3167 struct kqworkloop *kqwl)
3168 {
3169 struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
3170 LIST_INSERT_HEAD(list, kqwl, kqwl_hashlink);
3171 }
3172
3173 OS_ALWAYS_INLINE
3174 static inline struct kqworkloop *
kqworkloop_hash_lookup_locked(struct filedesc * fdp,kqueue_id_t id)3175 kqworkloop_hash_lookup_locked(struct filedesc *fdp, kqueue_id_t id)
3176 {
3177 struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
3178 struct kqworkloop *kqwl;
3179
3180 LIST_FOREACH(kqwl, list, kqwl_hashlink) {
3181 if (kqwl->kqwl_dynamicid == id) {
3182 return kqwl;
3183 }
3184 }
3185 return NULL;
3186 }
3187
3188 static struct kqworkloop *
kqworkloop_hash_lookup_and_retain(struct filedesc * fdp,kqueue_id_t kq_id)3189 kqworkloop_hash_lookup_and_retain(struct filedesc *fdp, kqueue_id_t kq_id)
3190 {
3191 struct kqworkloop *kqwl = NULL;
3192
3193 kqhash_lock(fdp);
3194 if (__probable(fdp->fd_kqhash)) {
3195 kqwl = kqworkloop_hash_lookup_locked(fdp, kq_id);
3196 if (kqwl && !kqworkloop_try_retain(kqwl)) {
3197 kqwl = NULL;
3198 }
3199 }
3200 kqhash_unlock(fdp);
3201 return kqwl;
3202 }
3203
3204 OS_NOINLINE
3205 static void
kqworkloop_hash_init(struct filedesc * fdp)3206 kqworkloop_hash_init(struct filedesc *fdp)
3207 {
3208 struct kqwllist *alloc_hash;
3209 u_long alloc_mask;
3210
3211 kqhash_unlock(fdp);
3212 alloc_hash = hashinit(CONFIG_KQ_HASHSIZE, M_KQUEUE, &alloc_mask);
3213 kqhash_lock(fdp);
3214
3215 /* See if we won the race */
3216 if (__probable(fdp->fd_kqhashmask == 0)) {
3217 fdp->fd_kqhash = alloc_hash;
3218 fdp->fd_kqhashmask = alloc_mask;
3219 } else {
3220 kqhash_unlock(fdp);
3221 hashdestroy(alloc_hash, M_KQUEUE, alloc_mask);
3222 kqhash_lock(fdp);
3223 }
3224 }
3225
3226 /*
3227 * kqueue iotier override is only supported for kqueue that has
3228 * only one port as a mach port source. Updating the iotier
3229 * override on the mach port source will update the override
3230 * on kqueue as well. Since kqueue with iotier override will
3231 * only have one port attached, there is no logic for saturation
3232 * like qos override, the iotier override of mach port source
3233 * would be reflected in kevent iotier override.
3234 */
3235 void
kqueue_set_iotier_override(kqueue_t kqu,uint8_t iotier_override)3236 kqueue_set_iotier_override(kqueue_t kqu, uint8_t iotier_override)
3237 {
3238 if (!(kqu.kq->kq_state & KQ_WORKLOOP)) {
3239 return;
3240 }
3241
3242 struct kqworkloop *kqwl = kqu.kqwl;
3243 os_atomic_store(&kqwl->kqwl_iotier_override, iotier_override, relaxed);
3244 }
3245
3246 uint8_t
kqueue_get_iotier_override(kqueue_t kqu)3247 kqueue_get_iotier_override(kqueue_t kqu)
3248 {
3249 if (!(kqu.kq->kq_state & KQ_WORKLOOP)) {
3250 return THROTTLE_LEVEL_END;
3251 }
3252
3253 struct kqworkloop *kqwl = kqu.kqwl;
3254 return os_atomic_load(&kqwl->kqwl_iotier_override, relaxed);
3255 }
3256
3257 #if CONFIG_PREADOPT_TG
3258 /*
3259 * This function is called with a borrowed reference on the thread group without
3260 * kq lock held with the mqueue lock held. It may or may not have the knote lock
3261 * (called from both fevent as well as fattach/ftouch). Upon success, an
3262 * additional reference on the TG is taken
3263 */
3264 void
kqueue_set_preadopted_thread_group(kqueue_t kqu,struct thread_group * tg,thread_qos_t qos)3265 kqueue_set_preadopted_thread_group(kqueue_t kqu, struct thread_group *tg, thread_qos_t qos)
3266 {
3267 if (!(kqu.kq->kq_state & KQ_WORKLOOP)) {
3268 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_PREADOPT_NA),
3269 (uintptr_t)thread_tid(current_thread()), 0, 0, 0);
3270 return;
3271 }
3272
3273 struct kqworkloop *kqwl = kqu.kqwl;
3274
3275 assert(qos < THREAD_QOS_LAST);
3276
3277 thread_group_retain(tg);
3278
3279 thread_group_qos_t old_tg; thread_group_qos_t new_tg;
3280 int ret = os_atomic_rmw_loop(&kqwl->kqwl_preadopt_tg, old_tg, new_tg, relaxed, {
3281 if (!KQWL_CAN_ADOPT_PREADOPT_TG(old_tg)) {
3282 os_atomic_rmw_loop_give_up(break);
3283 }
3284
3285 if (old_tg != KQWL_PREADOPTED_TG_NULL) {
3286 /*
3287 * Note that old_tg could be a NULL TG pointer but with a QoS
3288 * set. See also workq_thread_reset_pri.
3289 *
3290 * Compare the QoS of existing preadopted tg with new one and
3291 * only overwrite the thread group if we have one with a higher
3292 * QoS.
3293 */
3294 thread_qos_t existing_qos = KQWL_GET_PREADOPTED_TG_QOS(old_tg);
3295 if (existing_qos >= qos) {
3296 os_atomic_rmw_loop_give_up(break);
3297 }
3298 }
3299
3300 // Transfer the ref taken earlier in the function to the kqwl
3301 new_tg = KQWL_ENCODE_PREADOPTED_TG_QOS(tg, qos);
3302 });
3303
3304 if (ret) {
3305 KQWL_PREADOPT_TG_HISTORY_WRITE_ENTRY(kqwl, KQWL_PREADOPT_OP_INCOMING_IPC, old_tg, tg);
3306
3307 if (KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) {
3308 thread_group_deallocate_safe(KQWL_GET_PREADOPTED_TG(old_tg));
3309 }
3310
3311 os_atomic_store(&kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_NEEDS_REDRIVE, release);
3312 } else {
3313 // We failed to write to the kqwl_preadopt_tg, drop the ref we took
3314 // earlier in the function
3315 thread_group_deallocate_safe(tg);
3316 }
3317 }
3318
3319 /*
3320 * Called from fprocess of EVFILT_MACHPORT without the kqueue lock held.
3321 */
3322 bool
kqueue_process_preadopt_thread_group(thread_t thread,struct kqueue * kq,struct thread_group * tg)3323 kqueue_process_preadopt_thread_group(thread_t thread, struct kqueue *kq, struct thread_group *tg)
3324 {
3325 bool success = false;
3326 if (kq->kq_state & KQ_WORKLOOP) {
3327 struct kqworkloop *kqwl = (struct kqworkloop *) kq;
3328 thread_group_qos_t old_tg;
3329 success = os_atomic_cmpxchgv(&kqwl->kqwl_preadopt_tg,
3330 KQWL_PREADOPTED_TG_SENTINEL, KQWL_PREADOPTED_TG_PROCESSED,
3331 &old_tg, relaxed);
3332 if (success) {
3333 thread_set_preadopt_thread_group(thread, tg);
3334 }
3335
3336 __assert_only thread_group_qos_t preadopt_tg;
3337 preadopt_tg = os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed);
3338 assert(preadopt_tg == KQWL_PREADOPTED_TG_PROCESSED ||
3339 preadopt_tg == KQWL_PREADOPTED_TG_NEVER);
3340 }
3341
3342 return success;
3343 }
3344 #endif
3345
3346 /*!
3347 * @function kqworkloop_dealloc
3348 *
3349 * @brief
3350 * Deallocates a workloop kqueue.
3351 *
3352 * @discussion
3353 * Knotes hold references on the workloop, so we can't really reach this
3354 * function unless all of these are already gone.
3355 *
3356 * Nothing locked on entry or exit.
3357 *
3358 * @param hash_remove
3359 * Whether to remove the workloop from its hash table.
3360 */
3361 static void
kqworkloop_dealloc(struct kqworkloop * kqwl,bool hash_remove)3362 kqworkloop_dealloc(struct kqworkloop *kqwl, bool hash_remove)
3363 {
3364 thread_t cur_owner;
3365
3366 cur_owner = kqwl->kqwl_owner;
3367 if (cur_owner) {
3368 if (kqworkloop_override(kqwl) != THREAD_QOS_UNSPECIFIED) {
3369 thread_drop_kevent_override(cur_owner);
3370 }
3371 thread_deallocate(cur_owner);
3372 kqwl->kqwl_owner = THREAD_NULL;
3373 }
3374
3375 if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
3376 struct turnstile *ts;
3377 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
3378 &ts, TURNSTILE_WORKLOOPS);
3379 turnstile_cleanup();
3380 turnstile_deallocate(ts);
3381 }
3382
3383 if (hash_remove) {
3384 struct filedesc *fdp = &kqwl->kqwl_p->p_fd;
3385
3386 kqhash_lock(fdp);
3387 LIST_REMOVE(kqwl, kqwl_hashlink);
3388 kqhash_unlock(fdp);
3389 }
3390
3391 #if CONFIG_PREADOPT_TG
3392 thread_group_qos_t tg = os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed);
3393 if (KQWL_HAS_VALID_PREADOPTED_TG(tg)) {
3394 thread_group_release(KQWL_GET_PREADOPTED_TG(tg));
3395 }
3396 #endif
3397
3398 assert(TAILQ_EMPTY(&kqwl->kqwl_suppressed));
3399 assert(kqwl->kqwl_owner == THREAD_NULL);
3400 assert(kqwl->kqwl_turnstile == TURNSTILE_NULL);
3401
3402 lck_spin_destroy(&kqwl->kqwl_statelock, &kq_lck_grp);
3403 kqueue_destroy(kqwl, kqworkloop_zone);
3404 }
3405
3406 /*!
3407 * @function kqworkloop_alloc
3408 *
3409 * @brief
3410 * Allocates a workloop kqueue.
3411 */
3412 static void
kqworkloop_init(struct kqworkloop * kqwl,proc_t p,kqueue_id_t id,workq_threadreq_param_t * trp)3413 kqworkloop_init(struct kqworkloop *kqwl, proc_t p,
3414 kqueue_id_t id, workq_threadreq_param_t *trp)
3415 {
3416 kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC | KQ_KEV_QOS;
3417 os_ref_init_raw(&kqwl->kqwl_retains, NULL);
3418 kqwl->kqwl_dynamicid = id;
3419 kqwl->kqwl_p = p;
3420 if (trp) {
3421 kqwl->kqwl_params = trp->trp_value;
3422 }
3423
3424 workq_tr_flags_t tr_flags = WORKQ_TR_FLAG_WORKLOOP;
3425 if (trp) {
3426 if (trp->trp_flags & TRP_PRIORITY) {
3427 tr_flags |= WORKQ_TR_FLAG_WL_OUTSIDE_QOS;
3428 }
3429 if (trp->trp_flags) {
3430 tr_flags |= WORKQ_TR_FLAG_WL_PARAMS;
3431 }
3432 }
3433 kqwl->kqwl_request.tr_state = WORKQ_TR_STATE_IDLE;
3434 kqwl->kqwl_request.tr_flags = tr_flags;
3435 os_atomic_store(&kqwl->kqwl_iotier_override, (uint8_t)THROTTLE_LEVEL_END, relaxed);
3436 #if CONFIG_PREADOPT_TG
3437 if (task_is_app(current_task())) {
3438 /* Apps will never adopt a thread group that is not their own. This is a
3439 * gross hack to simulate the post-process that is done in the voucher
3440 * subsystem today for thread groups */
3441 os_atomic_store(&kqwl->kqwl_preadopt_tg, KQWL_PREADOPTED_TG_NEVER, relaxed);
3442 }
3443 #endif
3444
3445 for (int i = 0; i < KQWL_NBUCKETS; i++) {
3446 TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_queue[i]);
3447 }
3448 TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_suppressed);
3449
3450 lck_spin_init(&kqwl->kqwl_statelock, &kq_lck_grp, LCK_ATTR_NULL);
3451
3452 kqueue_init(kqwl);
3453 }
3454
3455 /*!
3456 * @function kqworkloop_get_or_create
3457 *
3458 * @brief
3459 * Wrapper around kqworkloop_alloc that handles the uniquing of workloops.
3460 *
3461 * @returns
3462 * 0: success
3463 * EINVAL: invalid parameters
3464 * EEXIST: KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST is set and a collision exists.
3465 * ENOENT: KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST is set and the entry wasn't found.
3466 * ENOMEM: allocation failed
3467 */
3468 static int
kqworkloop_get_or_create(struct proc * p,kqueue_id_t id,workq_threadreq_param_t * trp,unsigned int flags,struct kqworkloop ** kqwlp)3469 kqworkloop_get_or_create(struct proc *p, kqueue_id_t id,
3470 workq_threadreq_param_t *trp, unsigned int flags, struct kqworkloop **kqwlp)
3471 {
3472 struct filedesc *fdp = &p->p_fd;
3473 struct kqworkloop *alloc_kqwl = NULL;
3474 struct kqworkloop *kqwl = NULL;
3475 int error = 0;
3476
3477 assert(!trp || (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST));
3478
3479 if (id == 0 || id == (kqueue_id_t)-1) {
3480 return EINVAL;
3481 }
3482
3483 for (;;) {
3484 kqhash_lock(fdp);
3485 if (__improbable(fdp->fd_kqhash == NULL)) {
3486 kqworkloop_hash_init(fdp);
3487 }
3488
3489 kqwl = kqworkloop_hash_lookup_locked(fdp, id);
3490 if (kqwl) {
3491 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
3492 /*
3493 * If MUST_NOT_EXIST was passed, even if we would have failed
3494 * the try_retain, it could have gone the other way, and
3495 * userspace can't tell. Let'em fix their race.
3496 */
3497 error = EEXIST;
3498 break;
3499 }
3500
3501 if (__probable(kqworkloop_try_retain(kqwl))) {
3502 /*
3503 * This is a valid live workloop !
3504 */
3505 *kqwlp = kqwl;
3506 error = 0;
3507 break;
3508 }
3509 }
3510
3511 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST)) {
3512 error = ENOENT;
3513 break;
3514 }
3515
3516 /*
3517 * We didn't find what we were looking for.
3518 *
3519 * If this is the second time we reach this point (alloc_kqwl != NULL),
3520 * then we're done.
3521 *
3522 * If this is the first time we reach this point (alloc_kqwl == NULL),
3523 * then try to allocate one without blocking.
3524 */
3525 if (__probable(alloc_kqwl == NULL)) {
3526 alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_NOWAIT | Z_ZERO);
3527 }
3528 if (__probable(alloc_kqwl)) {
3529 kqworkloop_init(alloc_kqwl, p, id, trp);
3530 kqworkloop_hash_insert_locked(fdp, id, alloc_kqwl);
3531 kqhash_unlock(fdp);
3532 *kqwlp = alloc_kqwl;
3533 return 0;
3534 }
3535
3536 /*
3537 * We have to block to allocate a workloop, drop the lock,
3538 * allocate one, but then we need to retry lookups as someone
3539 * else could race with us.
3540 */
3541 kqhash_unlock(fdp);
3542
3543 alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_WAITOK | Z_ZERO);
3544 }
3545
3546 kqhash_unlock(fdp);
3547
3548 if (__improbable(alloc_kqwl)) {
3549 zfree(kqworkloop_zone, alloc_kqwl);
3550 }
3551
3552 return error;
3553 }
3554
3555 #pragma mark - knotes
3556
3557 static int
filt_no_attach(struct knote * kn,__unused struct kevent_qos_s * kev)3558 filt_no_attach(struct knote *kn, __unused struct kevent_qos_s *kev)
3559 {
3560 knote_set_error(kn, ENOTSUP);
3561 return 0;
3562 }
3563
3564 static void
filt_no_detach(__unused struct knote * kn)3565 filt_no_detach(__unused struct knote *kn)
3566 {
3567 }
3568
3569 static int __dead2
filt_bad_event(struct knote * kn,long hint)3570 filt_bad_event(struct knote *kn, long hint)
3571 {
3572 panic("%s[%d](%p, %ld)", __func__, kn->kn_filter, kn, hint);
3573 }
3574
3575 static int __dead2
filt_bad_touch(struct knote * kn,struct kevent_qos_s * kev)3576 filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev)
3577 {
3578 panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
3579 }
3580
3581 static int __dead2
filt_bad_process(struct knote * kn,struct kevent_qos_s * kev)3582 filt_bad_process(struct knote *kn, struct kevent_qos_s *kev)
3583 {
3584 panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
3585 }
3586
3587 /*
3588 * knotes_dealloc - detach all knotes for the process and drop them
3589 *
3590 * Process is in such a state that it will not try to allocate
3591 * any more knotes during this process (stopped for exit or exec).
3592 */
3593 void
knotes_dealloc(proc_t p)3594 knotes_dealloc(proc_t p)
3595 {
3596 struct filedesc *fdp = &p->p_fd;
3597 struct kqueue *kq;
3598 struct knote *kn;
3599 struct klist *kn_hash = NULL;
3600 u_long kn_hashmask;
3601 int i;
3602
3603 proc_fdlock(p);
3604
3605 /* Close all the fd-indexed knotes up front */
3606 if (fdp->fd_knlistsize > 0) {
3607 for (i = 0; i < fdp->fd_knlistsize; i++) {
3608 while ((kn = SLIST_FIRST(&fdp->fd_knlist[i])) != NULL) {
3609 kq = knote_get_kq(kn);
3610 kqlock(kq);
3611 proc_fdunlock(p);
3612 knote_drop(kq, kn, NULL);
3613 proc_fdlock(p);
3614 }
3615 }
3616 /* free the table */
3617 kfree_type(struct klist, fdp->fd_knlistsize, fdp->fd_knlist);
3618 }
3619 fdp->fd_knlistsize = 0;
3620
3621 proc_fdunlock(p);
3622
3623 knhash_lock(fdp);
3624
3625 /* Clean out all the hashed knotes as well */
3626 if (fdp->fd_knhashmask != 0) {
3627 for (i = 0; i <= (int)fdp->fd_knhashmask; i++) {
3628 while ((kn = SLIST_FIRST(&fdp->fd_knhash[i])) != NULL) {
3629 kq = knote_get_kq(kn);
3630 kqlock(kq);
3631 knhash_unlock(fdp);
3632 knote_drop(kq, kn, NULL);
3633 knhash_lock(fdp);
3634 }
3635 }
3636 kn_hash = fdp->fd_knhash;
3637 kn_hashmask = fdp->fd_knhashmask;
3638 fdp->fd_knhashmask = 0;
3639 fdp->fd_knhash = NULL;
3640 }
3641
3642 knhash_unlock(fdp);
3643
3644 if (kn_hash) {
3645 hashdestroy(kn_hash, M_KQUEUE, kn_hashmask);
3646 }
3647 }
3648
3649 /*
3650 * kqworkloops_dealloc - rebalance retains on kqworkloops created with
3651 * scheduling parameters
3652 *
3653 * Process is in such a state that it will not try to allocate
3654 * any more knotes during this process (stopped for exit or exec).
3655 */
3656 void
kqworkloops_dealloc(proc_t p)3657 kqworkloops_dealloc(proc_t p)
3658 {
3659 struct filedesc *fdp = &p->p_fd;
3660 struct kqworkloop *kqwl, *kqwln;
3661 struct kqwllist tofree;
3662
3663 if (!fdt_flag_test(fdp, FD_WORKLOOP)) {
3664 return;
3665 }
3666
3667 kqhash_lock(fdp);
3668
3669 if (fdp->fd_kqhashmask == 0) {
3670 kqhash_unlock(fdp);
3671 return;
3672 }
3673
3674 LIST_INIT(&tofree);
3675
3676 for (size_t i = 0; i <= fdp->fd_kqhashmask; i++) {
3677 LIST_FOREACH_SAFE(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink, kqwln) {
3678 /*
3679 * kqworkloops that have scheduling parameters have an
3680 * implicit retain from kqueue_workloop_ctl that needs
3681 * to be balanced on process exit.
3682 */
3683 assert(kqwl->kqwl_params);
3684 LIST_REMOVE(kqwl, kqwl_hashlink);
3685 LIST_INSERT_HEAD(&tofree, kqwl, kqwl_hashlink);
3686 }
3687 }
3688
3689 kqhash_unlock(fdp);
3690
3691 LIST_FOREACH_SAFE(kqwl, &tofree, kqwl_hashlink, kqwln) {
3692 uint32_t ref = os_ref_get_count_raw(&kqwl->kqwl_retains);
3693 if (ref != 1) {
3694 panic("kq(%p) invalid refcount %d", kqwl, ref);
3695 }
3696 kqworkloop_dealloc(kqwl, false);
3697 }
3698 }
3699
3700 static int
kevent_register_validate_priority(struct kqueue * kq,struct knote * kn,struct kevent_qos_s * kev)3701 kevent_register_validate_priority(struct kqueue *kq, struct knote *kn,
3702 struct kevent_qos_s *kev)
3703 {
3704 /* We don't care about the priority of a disabled or deleted knote */
3705 if (kev->flags & (EV_DISABLE | EV_DELETE)) {
3706 return 0;
3707 }
3708
3709 if (kq->kq_state & KQ_WORKLOOP) {
3710 /*
3711 * Workloops need valid priorities with a QOS (excluding manager) for
3712 * any enabled knote.
3713 *
3714 * When it is pre-existing, just make sure it has a valid QoS as
3715 * kevent_register() will not use the incoming priority (filters who do
3716 * have the responsibility to validate it again, see filt_wltouch).
3717 *
3718 * If the knote is being made, validate the incoming priority.
3719 */
3720 if (!_pthread_priority_thread_qos(kn ? kn->kn_qos : kev->qos)) {
3721 return ERANGE;
3722 }
3723 }
3724
3725 return 0;
3726 }
3727
3728 /*
3729 * Prepare a filter for waiting after register.
3730 *
3731 * The f_post_register_wait hook will be called later by kevent_register()
3732 * and should call kevent_register_wait_block()
3733 */
3734 static int
kevent_register_wait_prepare(struct knote * kn,struct kevent_qos_s * kev,int rc)3735 kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int rc)
3736 {
3737 thread_t thread = current_thread();
3738
3739 assert(knote_fops(kn)->f_extended_codes);
3740
3741 if (kn->kn_thread == NULL) {
3742 thread_reference(thread);
3743 kn->kn_thread = thread;
3744 } else if (kn->kn_thread != thread) {
3745 /*
3746 * kn_thread may be set from a previous aborted wait
3747 * However, it has to be from the same thread.
3748 */
3749 kev->flags |= EV_ERROR;
3750 kev->data = EXDEV;
3751 return 0;
3752 }
3753
3754 return FILTER_REGISTER_WAIT | rc;
3755 }
3756
3757 /*
3758 * Cleanup a kevent_register_wait_prepare() effect for threads that have been
3759 * aborted instead of properly woken up with thread_wakeup_thread().
3760 */
3761 static void
kevent_register_wait_cleanup(struct knote * kn)3762 kevent_register_wait_cleanup(struct knote *kn)
3763 {
3764 thread_t thread = kn->kn_thread;
3765 kn->kn_thread = NULL;
3766 thread_deallocate(thread);
3767 }
3768
3769 /*
3770 * Must be called at the end of a f_post_register_wait call from a filter.
3771 */
3772 static void
kevent_register_wait_block(struct turnstile * ts,thread_t thread,thread_continue_t cont,struct _kevent_register * cont_args)3773 kevent_register_wait_block(struct turnstile *ts, thread_t thread,
3774 thread_continue_t cont, struct _kevent_register *cont_args)
3775 {
3776 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
3777 kqunlock(cont_args->kqwl);
3778 cont_args->handoff_thread = thread;
3779 thread_handoff_parameter(thread, cont, cont_args, THREAD_HANDOFF_NONE);
3780 }
3781
3782 /*
3783 * Called by Filters using a f_post_register_wait to return from their wait.
3784 */
3785 static void
kevent_register_wait_return(struct _kevent_register * cont_args)3786 kevent_register_wait_return(struct _kevent_register *cont_args)
3787 {
3788 struct kqworkloop *kqwl = cont_args->kqwl;
3789 struct kevent_qos_s *kev = &cont_args->kev;
3790 int error = 0;
3791
3792 if (cont_args->handoff_thread) {
3793 thread_deallocate(cont_args->handoff_thread);
3794 }
3795
3796 if (kev->flags & (EV_ERROR | EV_RECEIPT)) {
3797 if ((kev->flags & EV_ERROR) == 0) {
3798 kev->flags |= EV_ERROR;
3799 kev->data = 0;
3800 }
3801 error = kevent_modern_copyout(kev, &cont_args->ueventlist);
3802 if (error == 0) {
3803 cont_args->eventout++;
3804 }
3805 }
3806
3807 kqworkloop_release(kqwl);
3808 if (error == 0) {
3809 *(int32_t *)¤t_uthread()->uu_rval = cont_args->eventout;
3810 }
3811 unix_syscall_return(error);
3812 }
3813
3814 /*
3815 * kevent_register - add a new event to a kqueue
3816 *
3817 * Creates a mapping between the event source and
3818 * the kqueue via a knote data structure.
3819 *
3820 * Because many/most the event sources are file
3821 * descriptor related, the knote is linked off
3822 * the filedescriptor table for quick access.
3823 *
3824 * called with nothing locked
3825 * caller holds a reference on the kqueue
3826 */
3827
3828 int
kevent_register(struct kqueue * kq,struct kevent_qos_s * kev,struct knote ** kn_out)3829 kevent_register(struct kqueue *kq, struct kevent_qos_s *kev,
3830 struct knote **kn_out)
3831 {
3832 struct proc *p = kq->kq_p;
3833 const struct filterops *fops;
3834 struct knote *kn = NULL;
3835 int result = 0, error = 0;
3836 unsigned short kev_flags = kev->flags;
3837 KNOTE_LOCK_CTX(knlc);
3838
3839 if (__probable(kev->filter < 0 && kev->filter + EVFILT_SYSCOUNT >= 0)) {
3840 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
3841 } else {
3842 error = EINVAL;
3843 goto out;
3844 }
3845
3846 /* restrict EV_VANISHED to adding udata-specific dispatch kevents */
3847 if (__improbable((kev->flags & EV_VANISHED) &&
3848 (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2))) {
3849 error = EINVAL;
3850 goto out;
3851 }
3852
3853 /* Simplify the flags - delete and disable overrule */
3854 if (kev->flags & EV_DELETE) {
3855 kev->flags &= ~EV_ADD;
3856 }
3857 if (kev->flags & EV_DISABLE) {
3858 kev->flags &= ~EV_ENABLE;
3859 }
3860
3861 if (kq->kq_state & KQ_WORKLOOP) {
3862 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER),
3863 ((struct kqworkloop *)kq)->kqwl_dynamicid,
3864 kev->udata, kev->flags, kev->filter);
3865 } else if (kq->kq_state & KQ_WORKQ) {
3866 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER),
3867 0, kev->udata, kev->flags, kev->filter);
3868 } else {
3869 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_REGISTER),
3870 VM_KERNEL_UNSLIDE_OR_PERM(kq),
3871 kev->udata, kev->flags, kev->filter);
3872 }
3873
3874 restart:
3875 /* find the matching knote from the fd tables/hashes */
3876 kn = kq_find_knote_and_kq_lock(kq, kev, fops->f_isfd, p);
3877 error = kevent_register_validate_priority(kq, kn, kev);
3878 result = 0;
3879 if (error) {
3880 if (kn) {
3881 kqunlock(kq);
3882 }
3883 goto out;
3884 }
3885
3886 if (kn == NULL && (kev->flags & EV_ADD) == 0) {
3887 /*
3888 * No knote found, EV_ADD wasn't specified
3889 */
3890
3891 if ((kev_flags & EV_ADD) && (kev_flags & EV_DELETE) &&
3892 (kq->kq_state & KQ_WORKLOOP)) {
3893 /*
3894 * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete
3895 * that doesn't care about ENOENT, so just pretend the deletion
3896 * happened.
3897 */
3898 } else {
3899 error = ENOENT;
3900 }
3901 goto out;
3902 } else if (kn == NULL) {
3903 /*
3904 * No knote found, need to attach a new one (attach)
3905 */
3906
3907 struct fileproc *knote_fp = NULL;
3908
3909 /* grab a file reference for the new knote */
3910 if (fops->f_isfd) {
3911 if ((error = fp_lookup(p, (int)kev->ident, &knote_fp, 0)) != 0) {
3912 goto out;
3913 }
3914 }
3915
3916 kn = knote_alloc();
3917 kn->kn_fp = knote_fp;
3918 kn->kn_is_fd = fops->f_isfd;
3919 kn->kn_kq_packed = VM_PACK_POINTER((vm_offset_t)kq, KNOTE_KQ_PACKED);
3920 kn->kn_status = 0;
3921
3922 /* was vanish support requested */
3923 if (kev->flags & EV_VANISHED) {
3924 kev->flags &= ~EV_VANISHED;
3925 kn->kn_status |= KN_REQVANISH;
3926 }
3927
3928 /* snapshot matching/dispatching protocol flags into knote */
3929 if (kev->flags & EV_DISABLE) {
3930 kn->kn_status |= KN_DISABLED;
3931 }
3932
3933 /*
3934 * copy the kevent state into knote
3935 * protocol is that fflags and data
3936 * are saved off, and cleared before
3937 * calling the attach routine.
3938 *
3939 * - kn->kn_sfflags aliases with kev->xflags
3940 * - kn->kn_sdata aliases with kev->data
3941 * - kn->kn_filter is the top 8 bits of kev->filter
3942 */
3943 kn->kn_kevent = *(struct kevent_internal_s *)kev;
3944 kn->kn_sfflags = kev->fflags;
3945 kn->kn_filtid = (uint8_t)~kev->filter;
3946 kn->kn_fflags = 0;
3947 knote_reset_priority(kq, kn, kev->qos);
3948
3949 /* Add the knote for lookup thru the fd table */
3950 error = kq_add_knote(kq, kn, &knlc, p);
3951 if (error) {
3952 knote_free(kn);
3953 if (knote_fp != NULL) {
3954 fp_drop(p, (int)kev->ident, knote_fp, 0);
3955 }
3956
3957 if (error == ERESTART) {
3958 goto restart;
3959 }
3960 goto out;
3961 }
3962
3963 /* fp reference count now applies to knote */
3964
3965 /*
3966 * we can't use filter_call() because f_attach can change the filter ops
3967 * for a filter that supports f_extended_codes, so we need to reload
3968 * knote_fops() and not use `fops`.
3969 */
3970 result = fops->f_attach(kn, kev);
3971 if (result && !knote_fops(kn)->f_extended_codes) {
3972 result = FILTER_ACTIVE;
3973 }
3974
3975 kqlock(kq);
3976
3977 if (result & FILTER_THREADREQ_NODEFEER) {
3978 enable_preemption();
3979 }
3980
3981 if (kn->kn_flags & EV_ERROR) {
3982 /*
3983 * Failed to attach correctly, so drop.
3984 */
3985 kn->kn_filtid = EVFILTID_DETACHED;
3986 error = (int)kn->kn_sdata;
3987 knote_drop(kq, kn, &knlc);
3988 result = 0;
3989 goto out;
3990 }
3991
3992 /*
3993 * end "attaching" phase - now just attached
3994 *
3995 * Mark the thread request overcommit, if appropos
3996 *
3997 * If the attach routine indicated that an
3998 * event is already fired, activate the knote.
3999 */
4000 if ((kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) &&
4001 (kq->kq_state & KQ_WORKLOOP)) {
4002 kqworkloop_set_overcommit((struct kqworkloop *)kq);
4003 }
4004 } else if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
4005 /*
4006 * The knote was dropped while we were waiting for the lock,
4007 * we need to re-evaluate entirely
4008 */
4009
4010 goto restart;
4011 } else if (kev->flags & EV_DELETE) {
4012 /*
4013 * Deletion of a knote (drop)
4014 *
4015 * If the filter wants to filter drop events, let it do so.
4016 *
4017 * defer-delete: when trying to delete a disabled EV_DISPATCH2 knote,
4018 * we must wait for the knote to be re-enabled (unless it is being
4019 * re-enabled atomically here).
4020 */
4021
4022 if (knote_fops(kn)->f_allow_drop) {
4023 bool drop;
4024
4025 kqunlock(kq);
4026 drop = knote_fops(kn)->f_allow_drop(kn, kev);
4027 kqlock(kq);
4028
4029 if (!drop) {
4030 goto out_unlock;
4031 }
4032 }
4033
4034 if ((kev->flags & EV_ENABLE) == 0 &&
4035 (kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
4036 (kn->kn_status & KN_DISABLED) != 0) {
4037 kn->kn_status |= KN_DEFERDELETE;
4038 error = EINPROGRESS;
4039 goto out_unlock;
4040 }
4041
4042 knote_drop(kq, kn, &knlc);
4043 goto out;
4044 } else {
4045 /*
4046 * Regular update of a knote (touch)
4047 *
4048 * Call touch routine to notify filter of changes in filter values
4049 * (and to re-determine if any events are fired).
4050 *
4051 * If the knote is in defer-delete, avoid calling the filter touch
4052 * routine (it has delivered its last event already).
4053 *
4054 * If the touch routine had no failure,
4055 * apply the requested side effects to the knote.
4056 */
4057
4058 if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
4059 if (kev->flags & EV_ENABLE) {
4060 result = FILTER_ACTIVE;
4061 }
4062 } else {
4063 kqunlock(kq);
4064 result = filter_call(knote_fops(kn), f_touch(kn, kev));
4065 kqlock(kq);
4066 if (result & FILTER_THREADREQ_NODEFEER) {
4067 enable_preemption();
4068 }
4069 }
4070
4071 if (kev->flags & EV_ERROR) {
4072 result = 0;
4073 goto out_unlock;
4074 }
4075
4076 if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0 &&
4077 kn->kn_udata != kev->udata) {
4078 // this allows klist_copy_udata() not to take locks
4079 os_atomic_store_wide(&kn->kn_udata, kev->udata, relaxed);
4080 }
4081 if ((kev->flags & EV_DISABLE) && !(kn->kn_status & KN_DISABLED)) {
4082 kn->kn_status |= KN_DISABLED;
4083 knote_dequeue(kq, kn);
4084 }
4085 }
4086
4087 /* accept new kevent state */
4088 knote_apply_touch(kq, kn, kev, result);
4089
4090 out_unlock:
4091 /*
4092 * When the filter asked for a post-register wait,
4093 * we leave the kqueue locked for kevent_register()
4094 * to call the filter's f_post_register_wait hook.
4095 */
4096 if (result & FILTER_REGISTER_WAIT) {
4097 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4098 *kn_out = kn;
4099 } else {
4100 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
4101 }
4102
4103 out:
4104 /* output local errors through the kevent */
4105 if (error) {
4106 kev->flags |= EV_ERROR;
4107 kev->data = error;
4108 }
4109 return result;
4110 }
4111
4112 /*
4113 * knote_process - process a triggered event
4114 *
4115 * Validate that it is really still a triggered event
4116 * by calling the filter routines (if necessary). Hold
4117 * a use reference on the knote to avoid it being detached.
4118 *
4119 * If it is still considered triggered, we will have taken
4120 * a copy of the state under the filter lock. We use that
4121 * snapshot to dispatch the knote for future processing (or
4122 * not, if this was a lost event).
4123 *
4124 * Our caller assures us that nobody else can be processing
4125 * events from this knote during the whole operation. But
4126 * others can be touching or posting events to the knote
4127 * interspersed with our processing it.
4128 *
4129 * caller holds a reference on the kqueue.
4130 * kqueue locked on entry and exit - but may be dropped
4131 */
4132 static int
knote_process(struct knote * kn,kevent_ctx_t kectx,kevent_callback_t callback)4133 knote_process(struct knote *kn, kevent_ctx_t kectx,
4134 kevent_callback_t callback)
4135 {
4136 struct kevent_qos_s kev;
4137 struct kqueue *kq = knote_get_kq(kn);
4138 KNOTE_LOCK_CTX(knlc);
4139 int result = FILTER_ACTIVE;
4140 int error = 0;
4141 bool drop = false;
4142
4143 /*
4144 * Must be active
4145 * Must be queued and not disabled/suppressed or dropping
4146 */
4147 assert(kn->kn_status & KN_QUEUED);
4148 assert(kn->kn_status & KN_ACTIVE);
4149 assert(!(kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)));
4150
4151 if (kq->kq_state & KQ_WORKLOOP) {
4152 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS),
4153 ((struct kqworkloop *)kq)->kqwl_dynamicid,
4154 kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4155 kn->kn_filtid);
4156 } else if (kq->kq_state & KQ_WORKQ) {
4157 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS),
4158 0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4159 kn->kn_filtid);
4160 } else {
4161 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS),
4162 VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata,
4163 kn->kn_status | (kn->kn_id << 32), kn->kn_filtid);
4164 }
4165
4166 if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS)) {
4167 /*
4168 * When the knote is dropping or has dropped,
4169 * then there's nothing we want to process.
4170 */
4171 return EJUSTRETURN;
4172 }
4173
4174 /*
4175 * While waiting for the knote lock, we may have dropped the kq lock.
4176 * and a touch may have disabled and dequeued the knote.
4177 */
4178 if (!(kn->kn_status & KN_QUEUED)) {
4179 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4180 return EJUSTRETURN;
4181 }
4182
4183 /*
4184 * For deferred-drop or vanished events, we just create a fake
4185 * event to acknowledge end-of-life. Otherwise, we call the
4186 * filter's process routine to snapshot the kevent state under
4187 * the filter's locking protocol.
4188 *
4189 * suppress knotes to avoid returning the same event multiple times in
4190 * a single call.
4191 */
4192 knote_suppress(kq, kn);
4193
4194 if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
4195 uint16_t kev_flags = EV_DISPATCH2 | EV_ONESHOT;
4196 if (kn->kn_status & KN_DEFERDELETE) {
4197 kev_flags |= EV_DELETE;
4198 } else {
4199 kev_flags |= EV_VANISHED;
4200 }
4201
4202 /* create fake event */
4203 kev = (struct kevent_qos_s){
4204 .filter = kn->kn_filter,
4205 .ident = kn->kn_id,
4206 .flags = kev_flags,
4207 .udata = kn->kn_udata,
4208 };
4209 } else {
4210 kqunlock(kq);
4211 kev = (struct kevent_qos_s) { };
4212 result = filter_call(knote_fops(kn), f_process(kn, &kev));
4213 kqlock(kq);
4214 }
4215
4216 /*
4217 * Determine how to dispatch the knote for future event handling.
4218 * not-fired: just return (do not callout, leave deactivated).
4219 * One-shot: If dispatch2, enter deferred-delete mode (unless this is
4220 * is the deferred delete event delivery itself). Otherwise,
4221 * drop it.
4222 * Dispatch: don't clear state, just mark it disabled.
4223 * Cleared: just leave it deactivated.
4224 * Others: re-activate as there may be more events to handle.
4225 * This will not wake up more handlers right now, but
4226 * at the completion of handling events it may trigger
4227 * more handler threads (TODO: optimize based on more than
4228 * just this one event being detected by the filter).
4229 */
4230 if ((result & FILTER_ACTIVE) == 0) {
4231 if ((kn->kn_status & KN_ACTIVE) == 0) {
4232 /*
4233 * Some knotes (like EVFILT_WORKLOOP) can be reactivated from
4234 * within f_process() but that doesn't necessarily make them
4235 * ready to process, so we should leave them be.
4236 *
4237 * For other knotes, since we will not return an event,
4238 * there's no point keeping the knote suppressed.
4239 */
4240 knote_unsuppress(kq, kn);
4241 }
4242 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4243 return EJUSTRETURN;
4244 }
4245
4246 if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
4247 knote_adjust_qos(kq, kn, result);
4248 }
4249
4250 if (result & FILTER_ADJUST_EVENT_IOTIER_BIT) {
4251 kqueue_update_iotier_override(kq);
4252 }
4253
4254 kev.qos = _pthread_priority_combine(kn->kn_qos, kn->kn_qos_override);
4255
4256 if (kev.flags & EV_ONESHOT) {
4257 if ((kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
4258 (kn->kn_status & KN_DEFERDELETE) == 0) {
4259 /* defer dropping non-delete oneshot dispatch2 events */
4260 kn->kn_status |= KN_DEFERDELETE | KN_DISABLED;
4261 } else {
4262 drop = true;
4263 }
4264 } else if (kn->kn_flags & EV_DISPATCH) {
4265 /* disable all dispatch knotes */
4266 kn->kn_status |= KN_DISABLED;
4267 } else if ((kn->kn_flags & EV_CLEAR) == 0) {
4268 /* re-activate in case there are more events */
4269 knote_activate(kq, kn, FILTER_ACTIVE);
4270 }
4271
4272 /*
4273 * callback to handle each event as we find it.
4274 * If we have to detach and drop the knote, do
4275 * it while we have the kq unlocked.
4276 */
4277 if (drop) {
4278 knote_drop(kq, kn, &knlc);
4279 } else {
4280 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
4281 }
4282
4283 if (kev.flags & EV_VANISHED) {
4284 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED),
4285 kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4286 kn->kn_filtid);
4287 }
4288
4289 error = (callback)(&kev, kectx);
4290 kqlock(kq);
4291 return error;
4292 }
4293
4294 /*
4295 * Returns -1 if the kqueue was unbound and processing should not happen
4296 */
4297 #define KQWQAE_BEGIN_PROCESSING 1
4298 #define KQWQAE_END_PROCESSING 2
4299 #define KQWQAE_UNBIND 3
4300 static int
kqworkq_acknowledge_events(struct kqworkq * kqwq,workq_threadreq_t kqr,int kevent_flags,int kqwqae_op)4301 kqworkq_acknowledge_events(struct kqworkq *kqwq, workq_threadreq_t kqr,
4302 int kevent_flags, int kqwqae_op)
4303 {
4304 struct knote *kn;
4305 int rc = 0;
4306 bool unbind;
4307 struct kqtailq *suppressq = &kqwq->kqwq_suppressed[kqr->tr_kq_qos_index - 1];
4308 struct kqtailq *queue = &kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1];
4309
4310 kqlock_held(&kqwq->kqwq_kqueue);
4311
4312 /*
4313 * Return suppressed knotes to their original state.
4314 * For workq kqueues, suppressed ones that are still
4315 * truly active (not just forced into the queue) will
4316 * set flags we check below to see if anything got
4317 * woken up.
4318 */
4319 while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
4320 knote_unsuppress(kqwq, kn);
4321 }
4322
4323 if (kqwqae_op == KQWQAE_UNBIND) {
4324 unbind = true;
4325 } else if ((kevent_flags & KEVENT_FLAG_PARKING) == 0) {
4326 unbind = false;
4327 } else {
4328 unbind = TAILQ_EMPTY(queue);
4329 }
4330 if (unbind) {
4331 thread_t thread = kqr_thread_fast(kqr);
4332 thread_qos_t old_override;
4333
4334 #if DEBUG || DEVELOPMENT
4335 thread_t self = current_thread();
4336 struct uthread *ut = get_bsdthread_info(self);
4337
4338 assert(thread == self);
4339 assert(ut->uu_kqr_bound == kqr);
4340 #endif // DEBUG || DEVELOPMENT
4341
4342 old_override = kqworkq_unbind_locked(kqwq, kqr, thread);
4343 if (!TAILQ_EMPTY(queue)) {
4344 /*
4345 * Request a new thread if we didn't process the whole
4346 * queue.
4347 */
4348 kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr,
4349 kqr->tr_kq_qos_index, 0);
4350 }
4351 if (old_override) {
4352 thread_drop_kevent_override(thread);
4353 }
4354 rc = -1;
4355 }
4356
4357 return rc;
4358 }
4359
4360 /*
4361 * Return 0 to indicate that processing should proceed,
4362 * -1 if there is nothing to process.
4363 *
4364 * Called with kqueue locked and returns the same way,
4365 * but may drop lock temporarily.
4366 */
4367 static int
kqworkq_begin_processing(struct kqworkq * kqwq,workq_threadreq_t kqr,int kevent_flags)4368 kqworkq_begin_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
4369 int kevent_flags)
4370 {
4371 int rc = 0;
4372
4373 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START,
4374 0, kqr->tr_kq_qos_index);
4375
4376 rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
4377 KQWQAE_BEGIN_PROCESSING);
4378
4379 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
4380 thread_tid(kqr_thread(kqr)),
4381 !TAILQ_EMPTY(&kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1]));
4382
4383 return rc;
4384 }
4385
4386 static thread_qos_t
kqworkloop_acknowledge_events(struct kqworkloop * kqwl)4387 kqworkloop_acknowledge_events(struct kqworkloop *kqwl)
4388 {
4389 kq_index_t qos = THREAD_QOS_UNSPECIFIED;
4390 struct knote *kn, *tmp;
4391
4392 kqlock_held(kqwl);
4393
4394 TAILQ_FOREACH_SAFE(kn, &kqwl->kqwl_suppressed, kn_tqe, tmp) {
4395 /*
4396 * If a knote that can adjust QoS is disabled because of the automatic
4397 * behavior of EV_DISPATCH, the knotes should stay suppressed so that
4398 * further overrides keep pushing.
4399 */
4400 if (knote_fops(kn)->f_adjusts_qos &&
4401 (kn->kn_status & KN_DISABLED) != 0 &&
4402 (kn->kn_status & KN_DROPPING) == 0 &&
4403 (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) {
4404 qos = MAX(qos, kn->kn_qos_override);
4405 continue;
4406 }
4407 knote_unsuppress(kqwl, kn);
4408 }
4409
4410 return qos;
4411 }
4412
4413 static int
kqworkloop_begin_processing(struct kqworkloop * kqwl,unsigned int kevent_flags)4414 kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags)
4415 {
4416 workq_threadreq_t kqr = &kqwl->kqwl_request;
4417 struct kqueue *kq = &kqwl->kqwl_kqueue;
4418 int rc = 0, op = KQWL_UTQ_NONE;
4419
4420 kqlock_held(kq);
4421
4422 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START,
4423 kqwl->kqwl_dynamicid, 0, 0);
4424
4425 /* nobody else should still be processing */
4426 assert((kq->kq_state & KQ_PROCESSING) == 0);
4427
4428 kq->kq_state |= KQ_PROCESSING;
4429
4430 if (kevent_flags & KEVENT_FLAG_PARKING) {
4431 /*
4432 * When "parking" we want to process events and if no events are found
4433 * unbind.
4434 *
4435 * However, non overcommit threads sometimes park even when they have
4436 * more work so that the pool can narrow. For these, we need to unbind
4437 * early, so that calling kqworkloop_update_threads_qos() can ask the
4438 * workqueue subsystem whether the thread should park despite having
4439 * pending events.
4440 */
4441 if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
4442 op = KQWL_UTQ_PARKING;
4443 } else {
4444 op = KQWL_UTQ_UNBINDING;
4445 }
4446 } else if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
4447 op = KQWL_UTQ_RESET_WAKEUP_OVERRIDE;
4448 }
4449
4450 if (op != KQWL_UTQ_NONE) {
4451 thread_qos_t qos_override;
4452 thread_t thread = kqr_thread_fast(kqr);
4453
4454 qos_override = kqworkloop_acknowledge_events(kqwl);
4455
4456 if (op == KQWL_UTQ_UNBINDING) {
4457 kqworkloop_unbind_locked(kqwl, thread,
4458 KQWL_OVERRIDE_DROP_IMMEDIATELY);
4459 kqworkloop_release_live(kqwl);
4460 }
4461 kqworkloop_update_threads_qos(kqwl, op, qos_override);
4462 if (op == KQWL_UTQ_PARKING &&
4463 (!kqwl->kqwl_count || kqwl->kqwl_owner)) {
4464 kqworkloop_unbind_locked(kqwl, thread,
4465 KQWL_OVERRIDE_DROP_DELAYED);
4466 kqworkloop_release_live(kqwl);
4467 rc = -1;
4468 } else if (op == KQWL_UTQ_UNBINDING &&
4469 kqr_thread(kqr) != thread) {
4470 rc = -1;
4471 }
4472
4473 if (rc == -1) {
4474 kq->kq_state &= ~KQ_PROCESSING;
4475 kqworkloop_unbind_delayed_override_drop(thread);
4476 }
4477 }
4478
4479 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END,
4480 kqwl->kqwl_dynamicid, 0, 0);
4481
4482 return rc;
4483 }
4484
4485 /*
4486 * Return 0 to indicate that processing should proceed,
4487 * -1 if there is nothing to process.
4488 * EBADF if the kqueue is draining
4489 *
4490 * Called with kqueue locked and returns the same way,
4491 * but may drop lock temporarily.
4492 * May block.
4493 */
4494 static int
kqfile_begin_processing(struct kqfile * kq)4495 kqfile_begin_processing(struct kqfile *kq)
4496 {
4497 kqlock_held(kq);
4498
4499 assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
4500 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START,
4501 VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4502
4503 /* wait to become the exclusive processing thread */
4504 while ((kq->kqf_state & (KQ_PROCESSING | KQ_DRAIN)) == KQ_PROCESSING) {
4505 kq->kqf_state |= KQ_PROCWAIT;
4506 lck_spin_sleep(&kq->kqf_lock, LCK_SLEEP_DEFAULT,
4507 &kq->kqf_suppressed, THREAD_UNINT | THREAD_WAIT_NOREPORT);
4508 }
4509
4510 if (kq->kqf_state & KQ_DRAIN) {
4511 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4512 VM_KERNEL_UNSLIDE_OR_PERM(kq), 2);
4513 return EBADF;
4514 }
4515
4516 /* Nobody else processing */
4517
4518 /* anything left to process? */
4519 if (kq->kqf_count == 0) {
4520 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4521 VM_KERNEL_UNSLIDE_OR_PERM(kq), 1);
4522 return -1;
4523 }
4524
4525 /* convert to processing mode */
4526 kq->kqf_state |= KQ_PROCESSING;
4527
4528 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4529 VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4530 return 0;
4531 }
4532
4533 /*
4534 * Try to end the processing, only called when a workq thread is attempting to
4535 * park (KEVENT_FLAG_PARKING is set).
4536 *
4537 * When returning -1, the kqworkq is setup again so that it is ready to be
4538 * processed.
4539 */
4540 static int
kqworkq_end_processing(struct kqworkq * kqwq,workq_threadreq_t kqr,int kevent_flags)4541 kqworkq_end_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
4542 int kevent_flags)
4543 {
4544 if (kevent_flags & KEVENT_FLAG_PARKING) {
4545 /*
4546 * if acknowledge events "succeeds" it means there are events,
4547 * which is a failure condition for end_processing.
4548 */
4549 int rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
4550 KQWQAE_END_PROCESSING);
4551 if (rc == 0) {
4552 return -1;
4553 }
4554 }
4555
4556 return 0;
4557 }
4558
4559 /*
4560 * Try to end the processing, only called when a workq thread is attempting to
4561 * park (KEVENT_FLAG_PARKING is set).
4562 *
4563 * When returning -1, the kqworkq is setup again so that it is ready to be
4564 * processed (as if kqworkloop_begin_processing had just been called).
4565 *
4566 * If successful and KEVENT_FLAG_PARKING was set in the kevent_flags,
4567 * the kqworkloop is unbound from its servicer as a side effect.
4568 */
4569 static int
kqworkloop_end_processing(struct kqworkloop * kqwl,int flags,int kevent_flags)4570 kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags)
4571 {
4572 struct kqueue *kq = &kqwl->kqwl_kqueue;
4573 workq_threadreq_t kqr = &kqwl->kqwl_request;
4574 int rc = 0;
4575
4576 kqlock_held(kq);
4577
4578 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START,
4579 kqwl->kqwl_dynamicid, 0, 0);
4580
4581 if (kevent_flags & KEVENT_FLAG_PARKING) {
4582 thread_t thread = kqr_thread_fast(kqr);
4583 thread_qos_t qos_override;
4584
4585 /*
4586 * When KEVENT_FLAG_PARKING is set, we need to attempt
4587 * an unbind while still under the lock.
4588 *
4589 * So we do everything kqworkloop_unbind() would do, but because
4590 * we're inside kqueue_process(), if the workloop actually
4591 * received events while our locks were dropped, we have
4592 * the opportunity to fail the end processing and loop again.
4593 *
4594 * This avoids going through the process-wide workqueue lock
4595 * hence scales better.
4596 */
4597 assert(flags & KQ_PROCESSING);
4598 qos_override = kqworkloop_acknowledge_events(kqwl);
4599 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_PARKING, qos_override);
4600
4601 if (kqwl->kqwl_wakeup_qos && !kqwl->kqwl_owner) {
4602 rc = -1;
4603 } else {
4604 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
4605 kqworkloop_release_live(kqwl);
4606 kq->kq_state &= ~flags;
4607 kqworkloop_unbind_delayed_override_drop(thread);
4608 }
4609 } else {
4610 kq->kq_state &= ~flags;
4611 kq->kq_state |= KQ_R2K_ARMED;
4612 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
4613 }
4614
4615 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END,
4616 kqwl->kqwl_dynamicid, 0, 0);
4617
4618 return rc;
4619 }
4620
4621 /*
4622 * Called with kqueue lock held.
4623 *
4624 * 0: no more events
4625 * -1: has more events
4626 * EBADF: kqueue is in draining mode
4627 */
4628 static int
kqfile_end_processing(struct kqfile * kq)4629 kqfile_end_processing(struct kqfile *kq)
4630 {
4631 struct knote *kn;
4632 int procwait;
4633
4634 kqlock_held(kq);
4635
4636 assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
4637
4638 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END),
4639 VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4640
4641 /*
4642 * Return suppressed knotes to their original state.
4643 */
4644 while ((kn = TAILQ_FIRST(&kq->kqf_suppressed)) != NULL) {
4645 knote_unsuppress(kq, kn);
4646 }
4647
4648 procwait = (kq->kqf_state & KQ_PROCWAIT);
4649 kq->kqf_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
4650
4651 if (procwait) {
4652 /* first wake up any thread already waiting to process */
4653 thread_wakeup(&kq->kqf_suppressed);
4654 }
4655
4656 if (kq->kqf_state & KQ_DRAIN) {
4657 return EBADF;
4658 }
4659 return kq->kqf_count != 0 ? -1 : 0;
4660 }
4661
4662 static int
kqueue_workloop_ctl_internal(proc_t p,uintptr_t cmd,uint64_t __unused options,struct kqueue_workloop_params * params,int * retval)4663 kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options,
4664 struct kqueue_workloop_params *params, int *retval)
4665 {
4666 int error = 0;
4667 struct kqworkloop *kqwl;
4668 struct filedesc *fdp = &p->p_fd;
4669 workq_threadreq_param_t trp = { };
4670
4671 switch (cmd) {
4672 case KQ_WORKLOOP_CREATE:
4673 if (!params->kqwlp_flags) {
4674 error = EINVAL;
4675 break;
4676 }
4677
4678 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) &&
4679 (params->kqwlp_sched_pri < 1 ||
4680 params->kqwlp_sched_pri > 63 /* MAXPRI_USER */)) {
4681 error = EINVAL;
4682 break;
4683 }
4684
4685 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) &&
4686 invalid_policy(params->kqwlp_sched_pol)) {
4687 error = EINVAL;
4688 break;
4689 }
4690
4691 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) &&
4692 (params->kqwlp_cpu_percent <= 0 ||
4693 params->kqwlp_cpu_percent > 100 ||
4694 params->kqwlp_cpu_refillms <= 0 ||
4695 params->kqwlp_cpu_refillms > 0x00ffffff)) {
4696 error = EINVAL;
4697 break;
4698 }
4699
4700 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) {
4701 trp.trp_flags |= TRP_PRIORITY;
4702 trp.trp_pri = (uint8_t)params->kqwlp_sched_pri;
4703 }
4704 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) {
4705 trp.trp_flags |= TRP_POLICY;
4706 trp.trp_pol = (uint8_t)params->kqwlp_sched_pol;
4707 }
4708 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) {
4709 trp.trp_flags |= TRP_CPUPERCENT;
4710 trp.trp_cpupercent = (uint8_t)params->kqwlp_cpu_percent;
4711 trp.trp_refillms = params->kqwlp_cpu_refillms;
4712 }
4713
4714 error = kqworkloop_get_or_create(p, params->kqwlp_id, &trp,
4715 KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
4716 KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST, &kqwl);
4717 if (error) {
4718 break;
4719 }
4720
4721 if (!fdt_flag_test(fdp, FD_WORKLOOP)) {
4722 /* FD_WORKLOOP indicates we've ever created a workloop
4723 * via this syscall but its only ever added to a process, never
4724 * removed.
4725 */
4726 proc_fdlock(p);
4727 fdt_flag_set(fdp, FD_WORKLOOP);
4728 proc_fdunlock(p);
4729 }
4730 break;
4731 case KQ_WORKLOOP_DESTROY:
4732 error = kqworkloop_get_or_create(p, params->kqwlp_id, NULL,
4733 KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
4734 KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST, &kqwl);
4735 if (error) {
4736 break;
4737 }
4738 kqlock(kqwl);
4739 trp.trp_value = kqwl->kqwl_params;
4740 if (trp.trp_flags && !(trp.trp_flags & TRP_RELEASED)) {
4741 trp.trp_flags |= TRP_RELEASED;
4742 kqwl->kqwl_params = trp.trp_value;
4743 kqworkloop_release_live(kqwl);
4744 } else {
4745 error = EINVAL;
4746 }
4747 kqunlock(kqwl);
4748 kqworkloop_release(kqwl);
4749 break;
4750 }
4751 *retval = 0;
4752 return error;
4753 }
4754
4755 int
kqueue_workloop_ctl(proc_t p,struct kqueue_workloop_ctl_args * uap,int * retval)4756 kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval)
4757 {
4758 struct kqueue_workloop_params params = {
4759 .kqwlp_id = 0,
4760 };
4761 if (uap->sz < sizeof(params.kqwlp_version)) {
4762 return EINVAL;
4763 }
4764
4765 size_t copyin_sz = MIN(sizeof(params), uap->sz);
4766 int rv = copyin(uap->addr, ¶ms, copyin_sz);
4767 if (rv) {
4768 return rv;
4769 }
4770
4771 if (params.kqwlp_version != (int)uap->sz) {
4772 return EINVAL;
4773 }
4774
4775 return kqueue_workloop_ctl_internal(p, uap->cmd, uap->options, ¶ms,
4776 retval);
4777 }
4778
4779 static int
kqueue_select(struct fileproc * fp,int which,void * wql,__unused vfs_context_t ctx)4780 kqueue_select(struct fileproc *fp, int which, void *wql, __unused vfs_context_t ctx)
4781 {
4782 struct kqfile *kq = (struct kqfile *)fp_get_data(fp);
4783 int retnum = 0;
4784
4785 assert((kq->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4786
4787 if (which == FREAD) {
4788 kqlock(kq);
4789 if (kqfile_begin_processing(kq) == 0) {
4790 retnum = kq->kqf_count;
4791 kqfile_end_processing(kq);
4792 } else if ((kq->kqf_state & KQ_DRAIN) == 0) {
4793 selrecord(kq->kqf_p, &kq->kqf_sel, wql);
4794 }
4795 kqunlock(kq);
4796 }
4797 return retnum;
4798 }
4799
4800 /*
4801 * kqueue_close -
4802 */
4803 static int
kqueue_close(struct fileglob * fg,__unused vfs_context_t ctx)4804 kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
4805 {
4806 struct kqfile *kqf = fg_get_data(fg);
4807
4808 assert((kqf->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4809 kqlock(kqf);
4810 selthreadclear(&kqf->kqf_sel);
4811 kqunlock(kqf);
4812 kqueue_dealloc(&kqf->kqf_kqueue);
4813 fg_set_data(fg, NULL);
4814 return 0;
4815 }
4816
4817 /*
4818 * Max depth of the nested kq path that can be created.
4819 * Note that this has to be less than the size of kq_level
4820 * to avoid wrapping around and mislabeling the level. We also
4821 * want to be aggressive about this so that we don't overflow the
4822 * kernel stack while posting kevents
4823 */
4824 #define MAX_NESTED_KQ 10
4825
4826 /*
4827 * The callers has taken a use-count reference on this kqueue and will donate it
4828 * to the kqueue we are being added to. This keeps the kqueue from closing until
4829 * that relationship is torn down.
4830 */
4831 static int
kqueue_kqfilter(struct fileproc * fp,struct knote * kn,__unused struct kevent_qos_s * kev)4832 kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
4833 __unused struct kevent_qos_s *kev)
4834 {
4835 struct kqfile *kqf = (struct kqfile *)fp_get_data(fp);
4836 struct kqueue *kq = &kqf->kqf_kqueue;
4837 struct kqueue *parentkq = knote_get_kq(kn);
4838
4839 assert((kqf->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4840
4841 if (parentkq == kq || kn->kn_filter != EVFILT_READ) {
4842 knote_set_error(kn, EINVAL);
4843 return 0;
4844 }
4845
4846 /*
4847 * We have to avoid creating a cycle when nesting kqueues
4848 * inside another. Rather than trying to walk the whole
4849 * potential DAG of nested kqueues, we just use a simple
4850 * ceiling protocol. When a kqueue is inserted into another,
4851 * we check that the (future) parent is not already nested
4852 * into another kqueue at a lower level than the potenial
4853 * child (because it could indicate a cycle). If that test
4854 * passes, we just mark the nesting levels accordingly.
4855 *
4856 * Only up to MAX_NESTED_KQ can be nested.
4857 *
4858 * Note: kqworkq and kqworkloop cannot be nested and have reused their
4859 * kq_level field, so ignore these as parent.
4860 */
4861
4862 kqlock(parentkq);
4863
4864 if ((parentkq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) {
4865 if (parentkq->kq_level > 0 &&
4866 parentkq->kq_level < kq->kq_level) {
4867 kqunlock(parentkq);
4868 knote_set_error(kn, EINVAL);
4869 return 0;
4870 }
4871
4872 /* set parent level appropriately */
4873 uint16_t plevel = (parentkq->kq_level == 0)? 2: parentkq->kq_level;
4874 if (plevel < kq->kq_level + 1) {
4875 if (kq->kq_level + 1 > MAX_NESTED_KQ) {
4876 kqunlock(parentkq);
4877 knote_set_error(kn, EINVAL);
4878 return 0;
4879 }
4880 plevel = kq->kq_level + 1;
4881 }
4882
4883 parentkq->kq_level = plevel;
4884 }
4885
4886 kqunlock(parentkq);
4887
4888 kn->kn_filtid = EVFILTID_KQREAD;
4889 kqlock(kq);
4890 KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
4891 /* indicate nesting in child, if needed */
4892 if (kq->kq_level == 0) {
4893 kq->kq_level = 1;
4894 }
4895
4896 int count = kq->kq_count;
4897 kqunlock(kq);
4898 return count > 0;
4899 }
4900
4901 __attribute__((noinline))
4902 static void
kqfile_wakeup(struct kqfile * kqf,long hint,wait_result_t wr)4903 kqfile_wakeup(struct kqfile *kqf, long hint, wait_result_t wr)
4904 {
4905 /* wakeup a thread waiting on this queue */
4906 selwakeup(&kqf->kqf_sel);
4907
4908 /* wake up threads in kqueue_scan() */
4909 if (kqf->kqf_state & KQ_SLEEP) {
4910 kqf->kqf_state &= ~KQ_SLEEP;
4911 thread_wakeup_with_result(&kqf->kqf_count, wr);
4912 }
4913
4914 if (hint == NOTE_REVOKE) {
4915 /* wakeup threads waiting their turn to process */
4916 if (kqf->kqf_state & KQ_PROCWAIT) {
4917 assert(kqf->kqf_state & KQ_PROCESSING);
4918 kqf->kqf_state &= ~KQ_PROCWAIT;
4919 thread_wakeup(&kqf->kqf_suppressed);
4920 }
4921
4922 /* no need to KNOTE: knote_fdclose() takes care of it */
4923 } else {
4924 /* wakeup other kqueues/select sets we're inside */
4925 KNOTE(&kqf->kqf_sel.si_note, hint);
4926 }
4927 }
4928
4929 /*
4930 * kqueue_drain - called when kq is closed
4931 */
4932 static int
kqueue_drain(struct fileproc * fp,__unused vfs_context_t ctx)4933 kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
4934 {
4935 struct kqfile *kqf = (struct kqfile *)fp_get_data(fp);
4936
4937 assert((kqf->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4938
4939 kqlock(kqf);
4940 kqf->kqf_state |= KQ_DRAIN;
4941 kqfile_wakeup(kqf, NOTE_REVOKE, THREAD_RESTART);
4942 kqunlock(kqf);
4943 return 0;
4944 }
4945
4946 int
kqueue_stat(struct kqueue * kq,void * ub,int isstat64,proc_t p)4947 kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
4948 {
4949 assert((kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4950
4951 kqlock(kq);
4952 if (isstat64 != 0) {
4953 struct stat64 *sb64 = (struct stat64 *)ub;
4954
4955 bzero((void *)sb64, sizeof(*sb64));
4956 sb64->st_size = kq->kq_count;
4957 if (kq->kq_state & KQ_KEV_QOS) {
4958 sb64->st_blksize = sizeof(struct kevent_qos_s);
4959 } else if (kq->kq_state & KQ_KEV64) {
4960 sb64->st_blksize = sizeof(struct kevent64_s);
4961 } else if (IS_64BIT_PROCESS(p)) {
4962 sb64->st_blksize = sizeof(struct user64_kevent);
4963 } else {
4964 sb64->st_blksize = sizeof(struct user32_kevent);
4965 }
4966 sb64->st_mode = S_IFIFO;
4967 } else {
4968 struct stat *sb = (struct stat *)ub;
4969
4970 bzero((void *)sb, sizeof(*sb));
4971 sb->st_size = kq->kq_count;
4972 if (kq->kq_state & KQ_KEV_QOS) {
4973 sb->st_blksize = sizeof(struct kevent_qos_s);
4974 } else if (kq->kq_state & KQ_KEV64) {
4975 sb->st_blksize = sizeof(struct kevent64_s);
4976 } else if (IS_64BIT_PROCESS(p)) {
4977 sb->st_blksize = sizeof(struct user64_kevent);
4978 } else {
4979 sb->st_blksize = sizeof(struct user32_kevent);
4980 }
4981 sb->st_mode = S_IFIFO;
4982 }
4983 kqunlock(kq);
4984 return 0;
4985 }
4986
4987 static inline bool
kqueue_threadreq_can_use_ast(struct kqueue * kq)4988 kqueue_threadreq_can_use_ast(struct kqueue *kq)
4989 {
4990 if (current_proc() == kq->kq_p) {
4991 /*
4992 * Setting an AST from a non BSD syscall is unsafe: mach_msg_trap() can
4993 * do combined send/receive and in the case of self-IPC, the AST may bet
4994 * set on a thread that will not return to userspace and needs the
4995 * thread the AST would create to unblock itself.
4996 *
4997 * At this time, we really want to target:
4998 *
4999 * - kevent variants that can cause thread creations, and dispatch
5000 * really only uses kevent_qos and kevent_id,
5001 *
5002 * - workq_kernreturn (directly about thread creations)
5003 *
5004 * - bsdthread_ctl which is used for qos changes and has direct impact
5005 * on the creator thread scheduling decisions.
5006 */
5007 switch (current_uthread()->syscall_code) {
5008 case SYS_kevent_qos:
5009 case SYS_kevent_id:
5010 case SYS_workq_kernreturn:
5011 case SYS_bsdthread_ctl:
5012 return true;
5013 }
5014 }
5015 return false;
5016 }
5017
5018 /*
5019 * Interact with the pthread kext to request a servicing there at a specific QoS
5020 * level.
5021 *
5022 * - Caller holds the kqlock
5023 *
5024 * - May be called with the kqueue's wait queue set locked,
5025 * so cannot do anything that could recurse on that.
5026 */
5027 static void
kqueue_threadreq_initiate(kqueue_t kqu,workq_threadreq_t kqr,kq_index_t qos,int flags)5028 kqueue_threadreq_initiate(kqueue_t kqu, workq_threadreq_t kqr,
5029 kq_index_t qos, int flags)
5030 {
5031 assert(kqr_thread(kqr) == THREAD_NULL);
5032 assert(!kqr_thread_requested(kqr));
5033 struct turnstile *ts = TURNSTILE_NULL;
5034
5035 if (workq_is_exiting(kqu.kq->kq_p)) {
5036 return;
5037 }
5038
5039 kqlock_held(kqu);
5040
5041 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5042 struct kqworkloop *kqwl = kqu.kqwl;
5043
5044 assert(kqwl->kqwl_owner == THREAD_NULL);
5045 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST),
5046 kqwl->kqwl_dynamicid, 0, qos, kqwl->kqwl_wakeup_qos);
5047 ts = kqwl->kqwl_turnstile;
5048 /* Add a thread request reference on the kqueue. */
5049 kqworkloop_retain(kqwl);
5050
5051 #if CONFIG_PREADOPT_TG
5052 /* This thread is the one which is ack-ing the thread group on the kqwl
5053 * under the kqlock and will take action accordingly, pairs with the
5054 * release barrier in kqueue_set_preadopted_thread_group */
5055 uint16_t tg_acknowledged;
5056 if (os_atomic_cmpxchgv(&kqwl->kqwl_preadopt_tg_needs_redrive,
5057 KQWL_PREADOPT_TG_NEEDS_REDRIVE, KQWL_PREADOPT_TG_CLEAR_REDRIVE,
5058 &tg_acknowledged, acquire)) {
5059 flags |= WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG;
5060 }
5061 #endif
5062 } else {
5063 assert(kqu.kq->kq_state & KQ_WORKQ);
5064 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST), -1, 0, qos,
5065 !TAILQ_EMPTY(&kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1]));
5066 }
5067
5068 /*
5069 * New-style thread request supported.
5070 * Provide the pthread kext a pointer to a workq_threadreq_s structure for
5071 * its use until a corresponding kqueue_threadreq_bind callback.
5072 */
5073 if (kqueue_threadreq_can_use_ast(kqu.kq)) {
5074 flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
5075 }
5076 if (qos == KQWQ_QOS_MANAGER) {
5077 qos = WORKQ_THREAD_QOS_MANAGER;
5078 }
5079
5080 if (!workq_kern_threadreq_initiate(kqu.kq->kq_p, kqr, ts, qos, flags)) {
5081 /*
5082 * Process is shutting down or exec'ing.
5083 * All the kqueues are going to be cleaned up
5084 * soon. Forget we even asked for a thread -
5085 * and make sure we don't ask for more.
5086 */
5087 kqu.kq->kq_state &= ~KQ_R2K_ARMED;
5088 kqueue_release_live(kqu);
5089 }
5090 }
5091
5092 /*
5093 * kqueue_threadreq_bind_prepost - prepost the bind to kevent
5094 *
5095 * This is used when kqueue_threadreq_bind may cause a lock inversion.
5096 */
5097 __attribute__((always_inline))
5098 void
kqueue_threadreq_bind_prepost(struct proc * p __unused,workq_threadreq_t kqr,struct uthread * ut)5099 kqueue_threadreq_bind_prepost(struct proc *p __unused, workq_threadreq_t kqr,
5100 struct uthread *ut)
5101 {
5102 ut->uu_kqr_bound = kqr;
5103 kqr->tr_thread = get_machthread(ut);
5104 kqr->tr_state = WORKQ_TR_STATE_BINDING;
5105 }
5106
5107 /*
5108 * kqueue_threadreq_bind_commit - commit a bind prepost
5109 *
5110 * The workq code has to commit any binding prepost before the thread has
5111 * a chance to come back to userspace (and do kevent syscalls) or be aborted.
5112 */
5113 void
kqueue_threadreq_bind_commit(struct proc * p,thread_t thread)5114 kqueue_threadreq_bind_commit(struct proc *p, thread_t thread)
5115 {
5116 struct uthread *ut = get_bsdthread_info(thread);
5117 workq_threadreq_t kqr = ut->uu_kqr_bound;
5118 kqueue_t kqu = kqr_kqueue(p, kqr);
5119
5120 kqlock(kqu);
5121 if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
5122 kqueue_threadreq_bind(p, kqr, thread, 0);
5123 }
5124 kqunlock(kqu);
5125 }
5126
5127 static void
kqueue_threadreq_modify(kqueue_t kqu,workq_threadreq_t kqr,kq_index_t qos,workq_kern_threadreq_flags_t flags)5128 kqueue_threadreq_modify(kqueue_t kqu, workq_threadreq_t kqr, kq_index_t qos,
5129 workq_kern_threadreq_flags_t flags)
5130 {
5131 assert(kqr_thread_requested_pending(kqr));
5132
5133 kqlock_held(kqu);
5134
5135 if (kqueue_threadreq_can_use_ast(kqu.kq)) {
5136 flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
5137 }
5138
5139 #if CONFIG_PREADOPT_TG
5140 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5141 uint16_t tg_ack_status;
5142 struct kqworkloop *kqwl = kqu.kqwl;
5143
5144 /* This thread is the one which is ack-ing the thread group on the kqwl
5145 * under the kqlock and will take action accordingly, needs acquire
5146 * barrier */
5147 if (os_atomic_cmpxchgv(&kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_NEEDS_REDRIVE,
5148 KQWL_PREADOPT_TG_CLEAR_REDRIVE, &tg_ack_status, acquire)) {
5149 flags |= WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG;
5150 }
5151 }
5152 #endif
5153
5154 workq_kern_threadreq_modify(kqu.kq->kq_p, kqr, qos, flags);
5155 }
5156
5157 /*
5158 * kqueue_threadreq_bind - bind thread to processing kqrequest
5159 *
5160 * The provided thread will be responsible for delivering events
5161 * associated with the given kqrequest. Bind it and get ready for
5162 * the thread to eventually arrive.
5163 */
5164 void
kqueue_threadreq_bind(struct proc * p,workq_threadreq_t kqr,thread_t thread,unsigned int flags)5165 kqueue_threadreq_bind(struct proc *p, workq_threadreq_t kqr, thread_t thread,
5166 unsigned int flags)
5167 {
5168 kqueue_t kqu = kqr_kqueue(p, kqr);
5169 struct uthread *ut = get_bsdthread_info(thread);
5170
5171 kqlock_held(kqu);
5172
5173 assert(ut->uu_kqueue_override == 0);
5174
5175 if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
5176 assert(ut->uu_kqr_bound == kqr);
5177 assert(kqr->tr_thread == thread);
5178 } else {
5179 assert(kqr_thread_requested_pending(kqr));
5180 assert(kqr->tr_thread == THREAD_NULL);
5181 assert(ut->uu_kqr_bound == NULL);
5182 ut->uu_kqr_bound = kqr;
5183 kqr->tr_thread = thread;
5184 }
5185
5186 kqr->tr_state = WORKQ_TR_STATE_BOUND;
5187
5188 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5189 struct turnstile *ts = kqu.kqwl->kqwl_turnstile;
5190
5191 if (__improbable(thread == kqu.kqwl->kqwl_owner)) {
5192 /*
5193 * <rdar://problem/38626999> shows that asserting here is not ok.
5194 *
5195 * This is not supposed to happen for correct use of the interface,
5196 * but it is sadly possible for userspace (with the help of memory
5197 * corruption, such as over-release of a dispatch queue) to make
5198 * the creator thread the "owner" of a workloop.
5199 *
5200 * Once that happens, and that creator thread picks up the same
5201 * workloop as a servicer, we trip this codepath. We need to fixup
5202 * the state to forget about this thread being the owner, as the
5203 * entire workloop state machine expects servicers to never be
5204 * owners and everything would basically go downhill from here.
5205 */
5206 kqu.kqwl->kqwl_owner = THREAD_NULL;
5207 if (kqworkloop_override(kqu.kqwl)) {
5208 thread_drop_kevent_override(thread);
5209 }
5210 }
5211
5212 if (ts && (flags & KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE) == 0) {
5213 /*
5214 * Past this point, the interlock is the kq req lock again,
5215 * so we can fix the inheritor for good.
5216 */
5217 filt_wlupdate_inheritor(kqu.kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
5218 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
5219 }
5220
5221 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_BIND), kqu.kqwl->kqwl_dynamicid,
5222 thread_tid(thread), kqr->tr_kq_qos_index,
5223 (kqr->tr_kq_override_index << 16) | kqwl->kqwl_wakeup_qos);
5224
5225 ut->uu_kqueue_override = kqr->tr_kq_override_index;
5226 if (kqr->tr_kq_override_index) {
5227 thread_add_servicer_override(thread, kqr->tr_kq_override_index);
5228 }
5229
5230 #if CONFIG_PREADOPT_TG
5231 /* Remove reference from kqwl and mark it as bound with the SENTINEL */
5232 thread_group_qos_t old_tg;
5233 thread_group_qos_t new_tg;
5234 int ret = os_atomic_rmw_loop(kqr_preadopt_thread_group_addr(kqr), old_tg, new_tg, relaxed, {
5235 if (old_tg == KQWL_PREADOPTED_TG_NEVER) {
5236 os_atomic_rmw_loop_give_up(break); // It's an app, nothing to do
5237 }
5238 assert(old_tg != KQWL_PREADOPTED_TG_PROCESSED);
5239 new_tg = KQWL_PREADOPTED_TG_SENTINEL;
5240 });
5241
5242 if (ret) {
5243 KQWL_PREADOPT_TG_HISTORY_WRITE_ENTRY(kqu.kqwl, KQWL_PREADOPT_OP_SERVICER_BIND, old_tg, new_tg);
5244
5245 if (KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) {
5246 struct thread_group *tg = KQWL_GET_PREADOPTED_TG(old_tg);
5247 assert(tg != NULL);
5248
5249 thread_set_preadopt_thread_group(thread, tg);
5250 thread_group_release_live(tg); // The thread has a reference
5251 } else {
5252 /*
5253 * The thread may already have a preadopt thread group on it -
5254 * we need to make sure to clear that.
5255 */
5256 thread_set_preadopt_thread_group(thread, NULL);
5257 }
5258
5259 /* We have taken action on the preadopted thread group set on the
5260 * set on the kqwl, clear any redrive requests */
5261 os_atomic_store(&kqu.kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_CLEAR_REDRIVE, relaxed);
5262 }
5263 #endif
5264 kqueue_update_iotier_override(kqu);
5265 } else {
5266 assert(kqr->tr_kq_override_index == 0);
5267
5268 #if CONFIG_PREADOPT_TG
5269 /*
5270 * The thread may have a preadopt thread group on it already because it
5271 * got tagged with it as a creator thread. So we need to make sure to
5272 * clear that since we don't have preadopt thread groups for non-kqwl
5273 * cases
5274 */
5275 thread_set_preadopt_thread_group(thread, NULL);
5276 #endif
5277 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), -1,
5278 thread_tid(thread), kqr->tr_kq_qos_index,
5279 (kqr->tr_kq_override_index << 16) |
5280 !TAILQ_EMPTY(&kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1]));
5281 }
5282 }
5283
5284 /*
5285 * kqueue_threadreq_cancel - abort a pending thread request
5286 *
5287 * Called when exiting/exec'ing. Forget our pending request.
5288 */
5289 void
kqueue_threadreq_cancel(struct proc * p,workq_threadreq_t kqr)5290 kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t kqr)
5291 {
5292 kqueue_release(kqr_kqueue(p, kqr));
5293 }
5294
5295 workq_threadreq_param_t
kqueue_threadreq_workloop_param(workq_threadreq_t kqr)5296 kqueue_threadreq_workloop_param(workq_threadreq_t kqr)
5297 {
5298 struct kqworkloop *kqwl;
5299 workq_threadreq_param_t trp;
5300
5301 assert(kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
5302 kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
5303 trp.trp_value = kqwl->kqwl_params;
5304 return trp;
5305 }
5306
5307 /*
5308 * kqueue_threadreq_unbind - unbind thread from processing kqueue
5309 *
5310 * End processing the per-QoS bucket of events and allow other threads
5311 * to be requested for future servicing.
5312 *
5313 * caller holds a reference on the kqueue.
5314 */
5315 void
kqueue_threadreq_unbind(struct proc * p,workq_threadreq_t kqr)5316 kqueue_threadreq_unbind(struct proc *p, workq_threadreq_t kqr)
5317 {
5318 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
5319 kqworkloop_unbind(kqr_kqworkloop(kqr));
5320 } else {
5321 kqworkq_unbind(p, kqr);
5322 }
5323 }
5324
5325 /*
5326 * If we aren't already busy processing events [for this QoS],
5327 * request workq thread support as appropriate.
5328 *
5329 * TBD - for now, we don't segregate out processing by QoS.
5330 *
5331 * - May be called with the kqueue's wait queue set locked,
5332 * so cannot do anything that could recurse on that.
5333 */
5334 static void
kqworkq_wakeup(struct kqworkq * kqwq,kq_index_t qos_index)5335 kqworkq_wakeup(struct kqworkq *kqwq, kq_index_t qos_index)
5336 {
5337 workq_threadreq_t kqr = kqworkq_get_request(kqwq, qos_index);
5338
5339 /* convert to thread qos value */
5340 assert(qos_index > 0 && qos_index <= KQWQ_NBUCKETS);
5341
5342 if (!kqr_thread_requested(kqr)) {
5343 kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr, qos_index, 0);
5344 }
5345 }
5346
5347 /*
5348 * This represent the asynchronous QoS a given workloop contributes,
5349 * hence is the max of the current active knotes (override index)
5350 * and the workloop max qos (userspace async qos).
5351 */
5352 static kq_index_t
kqworkloop_override(struct kqworkloop * kqwl)5353 kqworkloop_override(struct kqworkloop *kqwl)
5354 {
5355 workq_threadreq_t kqr = &kqwl->kqwl_request;
5356 return MAX(kqr->tr_kq_qos_index, kqr->tr_kq_override_index);
5357 }
5358
5359 static inline void
kqworkloop_request_fire_r2k_notification(struct kqworkloop * kqwl)5360 kqworkloop_request_fire_r2k_notification(struct kqworkloop *kqwl)
5361 {
5362 workq_threadreq_t kqr = &kqwl->kqwl_request;
5363
5364 kqlock_held(kqwl);
5365
5366 if (kqwl->kqwl_state & KQ_R2K_ARMED) {
5367 kqwl->kqwl_state &= ~KQ_R2K_ARMED;
5368 act_set_astkevent(kqr_thread_fast(kqr), AST_KEVENT_RETURN_TO_KERNEL);
5369 }
5370 }
5371
5372 static void
kqworkloop_update_threads_qos(struct kqworkloop * kqwl,int op,kq_index_t qos)5373 kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos)
5374 {
5375 workq_threadreq_t kqr = &kqwl->kqwl_request;
5376 struct kqueue *kq = &kqwl->kqwl_kqueue;
5377 kq_index_t old_override = kqworkloop_override(kqwl);
5378
5379 kqlock_held(kqwl);
5380
5381 switch (op) {
5382 case KQWL_UTQ_UPDATE_WAKEUP_QOS:
5383 kqwl->kqwl_wakeup_qos = qos;
5384 kqworkloop_request_fire_r2k_notification(kqwl);
5385 goto recompute;
5386
5387 case KQWL_UTQ_RESET_WAKEUP_OVERRIDE:
5388 kqr->tr_kq_override_index = qos;
5389 goto recompute;
5390
5391 case KQWL_UTQ_PARKING:
5392 case KQWL_UTQ_UNBINDING:
5393 kqr->tr_kq_override_index = qos;
5394 OS_FALLTHROUGH;
5395
5396 case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS:
5397 if (op == KQWL_UTQ_RECOMPUTE_WAKEUP_QOS) {
5398 assert(qos == THREAD_QOS_UNSPECIFIED);
5399 }
5400 if (TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
5401 kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
5402 }
5403 kqwl->kqwl_wakeup_qos = 0;
5404 for (kq_index_t i = KQWL_NBUCKETS; i > 0; i--) {
5405 if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i - 1])) {
5406 kqwl->kqwl_wakeup_qos = i;
5407 kqworkloop_request_fire_r2k_notification(kqwl);
5408 break;
5409 }
5410 }
5411 OS_FALLTHROUGH;
5412
5413 case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE:
5414 recompute:
5415 /*
5416 * When modifying the wakeup QoS or the override QoS, we always need to
5417 * maintain our invariant that kqr_override_index is at least as large
5418 * as the highest QoS for which an event is fired.
5419 *
5420 * However this override index can be larger when there is an overriden
5421 * suppressed knote pushing on the kqueue.
5422 */
5423 if (qos < kqwl->kqwl_wakeup_qos) {
5424 qos = kqwl->kqwl_wakeup_qos;
5425 }
5426 if (kqr->tr_kq_override_index < qos) {
5427 kqr->tr_kq_override_index = qos;
5428 }
5429 break;
5430
5431 case KQWL_UTQ_REDRIVE_EVENTS:
5432 break;
5433
5434 case KQWL_UTQ_SET_QOS_INDEX:
5435 kqr->tr_kq_qos_index = qos;
5436 break;
5437
5438 default:
5439 panic("unknown kqwl thread qos update operation: %d", op);
5440 }
5441
5442 thread_t kqwl_owner = kqwl->kqwl_owner;
5443 thread_t servicer = kqr_thread(kqr);
5444 boolean_t qos_changed = FALSE;
5445 kq_index_t new_override = kqworkloop_override(kqwl);
5446
5447 /*
5448 * Apply the diffs to the owner if applicable
5449 */
5450 if (kqwl_owner) {
5451 #if 0
5452 /* JMM - need new trace hooks for owner overrides */
5453 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
5454 kqwl->kqwl_dynamicid, thread_tid(kqwl_owner), kqr->tr_kq_qos_index,
5455 (kqr->tr_kq_override_index << 16) | kqwl->kqwl_wakeup_qos);
5456 #endif
5457 if (new_override == old_override) {
5458 // nothing to do
5459 } else if (old_override == THREAD_QOS_UNSPECIFIED) {
5460 thread_add_kevent_override(kqwl_owner, new_override);
5461 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5462 thread_drop_kevent_override(kqwl_owner);
5463 } else { /* old_override != new_override */
5464 thread_update_kevent_override(kqwl_owner, new_override);
5465 }
5466 }
5467
5468 /*
5469 * apply the diffs to the servicer
5470 */
5471
5472 if (!kqr_thread_requested(kqr)) {
5473 /*
5474 * No servicer, nor thread-request
5475 *
5476 * Make a new thread request, unless there is an owner (or the workloop
5477 * is suspended in userland) or if there is no asynchronous work in the
5478 * first place.
5479 */
5480
5481 if (kqwl_owner == NULL && kqwl->kqwl_wakeup_qos) {
5482 int initiate_flags = 0;
5483 if (op == KQWL_UTQ_UNBINDING) {
5484 initiate_flags = WORKQ_THREADREQ_ATTEMPT_REBIND;
5485 }
5486
5487 /* kqueue_threadreq_initiate handles the acknowledgement of the TG
5488 * if needed */
5489 kqueue_threadreq_initiate(kq, kqr, new_override, initiate_flags);
5490 }
5491 } else if (servicer) {
5492 /*
5493 * Servicer in flight
5494 *
5495 * Just apply the diff to the servicer
5496 */
5497
5498 #if CONFIG_PREADOPT_TG
5499 /* When there's a servicer for the kqwl already, then the servicer will
5500 * adopt the thread group in the kqr, we don't need to poke the
5501 * workqueue subsystem to make different decisions due to the thread
5502 * group. Consider the current request ack-ed.
5503 */
5504 os_atomic_store(&kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_CLEAR_REDRIVE, relaxed);
5505 #endif
5506
5507 struct uthread *ut = get_bsdthread_info(servicer);
5508 if (ut->uu_kqueue_override != new_override) {
5509 if (ut->uu_kqueue_override == THREAD_QOS_UNSPECIFIED) {
5510 thread_add_servicer_override(servicer, new_override);
5511 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5512 thread_drop_servicer_override(servicer);
5513 } else { /* ut->uu_kqueue_override != new_override */
5514 thread_update_servicer_override(servicer, new_override);
5515 }
5516 ut->uu_kqueue_override = new_override;
5517 qos_changed = TRUE;
5518 }
5519 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5520 /*
5521 * No events to deliver anymore.
5522 *
5523 * However canceling with turnstiles is challenging, so the fact that
5524 * the request isn't useful will be discovered by the servicer himself
5525 * later on.
5526 */
5527 } else if (old_override != new_override) {
5528 /*
5529 * Request is in flight
5530 *
5531 * Apply the diff to the thread request.
5532 */
5533 kqueue_threadreq_modify(kq, kqr, new_override, WORKQ_THREADREQ_NONE);
5534 qos_changed = TRUE;
5535 }
5536
5537 if (qos_changed) {
5538 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), kqwl->kqwl_dynamicid,
5539 thread_tid(servicer), kqr->tr_kq_qos_index,
5540 (kqr->tr_kq_override_index << 16) | kqwl->kqwl_wakeup_qos);
5541 }
5542 }
5543
5544 static void
kqworkloop_update_iotier_override(struct kqworkloop * kqwl)5545 kqworkloop_update_iotier_override(struct kqworkloop *kqwl)
5546 {
5547 workq_threadreq_t kqr = &kqwl->kqwl_request;
5548 thread_t servicer = kqr_thread(kqr);
5549 uint8_t iotier = os_atomic_load(&kqwl->kqwl_iotier_override, relaxed);
5550
5551 kqlock_held(kqwl);
5552
5553 if (servicer) {
5554 thread_update_servicer_iotier_override(servicer, iotier);
5555 }
5556 }
5557
5558 static void
kqworkloop_wakeup(struct kqworkloop * kqwl,kq_index_t qos)5559 kqworkloop_wakeup(struct kqworkloop *kqwl, kq_index_t qos)
5560 {
5561 if (qos <= kqwl->kqwl_wakeup_qos) {
5562 /*
5563 * Shortcut wakeups that really do nothing useful
5564 */
5565 return;
5566 }
5567
5568 if ((kqwl->kqwl_state & KQ_PROCESSING) &&
5569 kqr_thread(&kqwl->kqwl_request) == current_thread()) {
5570 /*
5571 * kqworkloop_end_processing() will perform the required QoS
5572 * computations when it unsets the processing mode.
5573 */
5574 return;
5575 }
5576
5577 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, qos);
5578 }
5579
5580 static struct kqtailq *
kqueue_get_suppressed_queue(kqueue_t kq,struct knote * kn)5581 kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn)
5582 {
5583 if (kq.kq->kq_state & KQ_WORKLOOP) {
5584 return &kq.kqwl->kqwl_suppressed;
5585 } else if (kq.kq->kq_state & KQ_WORKQ) {
5586 return &kq.kqwq->kqwq_suppressed[kn->kn_qos_index - 1];
5587 } else {
5588 return &kq.kqf->kqf_suppressed;
5589 }
5590 }
5591
5592 struct turnstile *
kqueue_alloc_turnstile(kqueue_t kqu)5593 kqueue_alloc_turnstile(kqueue_t kqu)
5594 {
5595 struct kqworkloop *kqwl = kqu.kqwl;
5596 kq_state_t kq_state;
5597
5598 kq_state = os_atomic_load(&kqu.kq->kq_state, dependency);
5599 if (kq_state & KQ_HAS_TURNSTILE) {
5600 /* force a dependency to pair with the atomic or with release below */
5601 return os_atomic_load_with_dependency_on(&kqwl->kqwl_turnstile,
5602 (uintptr_t)kq_state);
5603 }
5604
5605 if (!(kq_state & KQ_WORKLOOP)) {
5606 return TURNSTILE_NULL;
5607 }
5608
5609 struct turnstile *ts = turnstile_alloc(), *free_ts = TURNSTILE_NULL;
5610 bool workq_locked = false;
5611
5612 kqlock(kqu);
5613
5614 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
5615 workq_locked = true;
5616 workq_kern_threadreq_lock(kqwl->kqwl_p);
5617 }
5618
5619 if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
5620 free_ts = ts;
5621 ts = kqwl->kqwl_turnstile;
5622 } else {
5623 ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
5624 ts, TURNSTILE_WORKLOOPS);
5625
5626 /* release-barrier to pair with the unlocked load of kqwl_turnstile above */
5627 os_atomic_or(&kqwl->kqwl_state, KQ_HAS_TURNSTILE, release);
5628
5629 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
5630 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
5631 &kqwl->kqwl_request, kqwl->kqwl_owner,
5632 ts, TURNSTILE_IMMEDIATE_UPDATE);
5633 /*
5634 * The workq may no longer be the interlock after this.
5635 * In which case the inheritor wasn't updated.
5636 */
5637 }
5638 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
5639 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
5640 }
5641 }
5642
5643 if (workq_locked) {
5644 workq_kern_threadreq_unlock(kqwl->kqwl_p);
5645 }
5646
5647 kqunlock(kqu);
5648
5649 if (free_ts) {
5650 turnstile_deallocate(free_ts);
5651 } else {
5652 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
5653 }
5654 return ts;
5655 }
5656
5657 __attribute__((always_inline))
5658 struct turnstile *
kqueue_turnstile(kqueue_t kqu)5659 kqueue_turnstile(kqueue_t kqu)
5660 {
5661 kq_state_t kq_state = os_atomic_load(&kqu.kq->kq_state, relaxed);
5662 if (kq_state & KQ_WORKLOOP) {
5663 return os_atomic_load(&kqu.kqwl->kqwl_turnstile, relaxed);
5664 }
5665 return TURNSTILE_NULL;
5666 }
5667
5668 __attribute__((always_inline))
5669 struct turnstile *
kqueue_threadreq_get_turnstile(workq_threadreq_t kqr)5670 kqueue_threadreq_get_turnstile(workq_threadreq_t kqr)
5671 {
5672 struct kqworkloop *kqwl = kqr_kqworkloop(kqr);
5673 if (kqwl) {
5674 return os_atomic_load(&kqwl->kqwl_turnstile, relaxed);
5675 }
5676 return TURNSTILE_NULL;
5677 }
5678
5679 static void
kqworkloop_set_overcommit(struct kqworkloop * kqwl)5680 kqworkloop_set_overcommit(struct kqworkloop *kqwl)
5681 {
5682 workq_threadreq_t kqr = &kqwl->kqwl_request;
5683
5684 /*
5685 * This test is racy, but since we never remove this bit,
5686 * it allows us to avoid taking a lock.
5687 */
5688 if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
5689 return;
5690 }
5691
5692 kqlock_held(kqwl);
5693
5694 if (kqr_thread_requested_pending(kqr)) {
5695 kqueue_threadreq_modify(kqwl, kqr, kqr->tr_qos,
5696 WORKQ_THREADREQ_MAKE_OVERCOMMIT);
5697 } else {
5698 kqr->tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
5699 }
5700 }
5701
5702 static void
kqworkq_update_override(struct kqworkq * kqwq,struct knote * kn,kq_index_t override_index)5703 kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn,
5704 kq_index_t override_index)
5705 {
5706 workq_threadreq_t kqr;
5707 kq_index_t old_override_index;
5708 kq_index_t queue_index = kn->kn_qos_index;
5709
5710 if (override_index <= queue_index) {
5711 return;
5712 }
5713
5714 kqr = kqworkq_get_request(kqwq, queue_index);
5715
5716 kqlock_held(kqwq);
5717
5718 old_override_index = kqr->tr_kq_override_index;
5719 if (override_index > MAX(kqr->tr_kq_qos_index, old_override_index)) {
5720 thread_t servicer = kqr_thread(kqr);
5721 kqr->tr_kq_override_index = override_index;
5722
5723 /* apply the override to [incoming?] servicing thread */
5724 if (servicer) {
5725 if (old_override_index) {
5726 thread_update_kevent_override(servicer, override_index);
5727 } else {
5728 thread_add_kevent_override(servicer, override_index);
5729 }
5730 }
5731 }
5732 }
5733
5734 static void
kqueue_update_iotier_override(kqueue_t kqu)5735 kqueue_update_iotier_override(kqueue_t kqu)
5736 {
5737 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5738 kqworkloop_update_iotier_override(kqu.kqwl);
5739 }
5740 }
5741
5742 static void
kqueue_update_override(kqueue_t kqu,struct knote * kn,thread_qos_t qos)5743 kqueue_update_override(kqueue_t kqu, struct knote *kn, thread_qos_t qos)
5744 {
5745 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5746 kqworkloop_update_threads_qos(kqu.kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
5747 qos);
5748 } else {
5749 kqworkq_update_override(kqu.kqwq, kn, qos);
5750 }
5751 }
5752
5753 static void
kqworkloop_unbind_locked(struct kqworkloop * kqwl,thread_t thread,enum kqwl_unbind_locked_mode how)5754 kqworkloop_unbind_locked(struct kqworkloop *kqwl, thread_t thread,
5755 enum kqwl_unbind_locked_mode how)
5756 {
5757 struct uthread *ut = get_bsdthread_info(thread);
5758 workq_threadreq_t kqr = &kqwl->kqwl_request;
5759
5760 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND), kqwl->kqwl_dynamicid,
5761 thread_tid(thread), 0, 0);
5762
5763 kqlock_held(kqwl);
5764
5765 assert(ut->uu_kqr_bound == kqr);
5766 ut->uu_kqr_bound = NULL;
5767 if (how == KQWL_OVERRIDE_DROP_IMMEDIATELY &&
5768 ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
5769 thread_drop_servicer_override(thread);
5770 ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
5771 }
5772
5773 if (kqwl->kqwl_owner == NULL && kqwl->kqwl_turnstile) {
5774 turnstile_update_inheritor(kqwl->kqwl_turnstile,
5775 TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
5776 turnstile_update_inheritor_complete(kqwl->kqwl_turnstile,
5777 TURNSTILE_INTERLOCK_HELD);
5778 }
5779
5780 #if CONFIG_PREADOPT_TG
5781 /* The kqueue is able to adopt a thread group again */
5782
5783 thread_group_qos_t old_tg, new_tg = NULL;
5784 int ret = os_atomic_rmw_loop(kqr_preadopt_thread_group_addr(kqr), old_tg, new_tg, relaxed, {
5785 new_tg = old_tg;
5786 if (old_tg == KQWL_PREADOPTED_TG_SENTINEL || old_tg == KQWL_PREADOPTED_TG_PROCESSED) {
5787 new_tg = KQWL_PREADOPTED_TG_NULL;
5788 }
5789 });
5790 KQWL_PREADOPT_TG_HISTORY_WRITE_ENTRY(kqwl, KQWL_PREADOPT_OP_SERVICER_UNBIND, old_tg, KQWL_PREADOPTED_TG_NULL);
5791
5792 if (ret) {
5793 // Servicer can drop any preadopt thread group it has since it has
5794 // unbound.
5795 thread_set_preadopt_thread_group(thread, NULL);
5796 }
5797 #endif
5798 thread_update_servicer_iotier_override(thread, THROTTLE_LEVEL_END);
5799
5800 kqr->tr_thread = THREAD_NULL;
5801 kqr->tr_state = WORKQ_TR_STATE_IDLE;
5802 kqwl->kqwl_state &= ~KQ_R2K_ARMED;
5803 }
5804
5805 static void
kqworkloop_unbind_delayed_override_drop(thread_t thread)5806 kqworkloop_unbind_delayed_override_drop(thread_t thread)
5807 {
5808 struct uthread *ut = get_bsdthread_info(thread);
5809 assert(ut->uu_kqr_bound == NULL);
5810 if (ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
5811 thread_drop_servicer_override(thread);
5812 ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
5813 }
5814 }
5815
5816 /*
5817 * kqworkloop_unbind - Unbind the servicer thread of a workloop kqueue
5818 *
5819 * It will acknowledge events, and possibly request a new thread if:
5820 * - there were active events left
5821 * - we pended waitq hook callouts during processing
5822 * - we pended wakeups while processing (or unsuppressing)
5823 *
5824 * Called with kqueue lock held.
5825 */
5826 static void
kqworkloop_unbind(struct kqworkloop * kqwl)5827 kqworkloop_unbind(struct kqworkloop *kqwl)
5828 {
5829 struct kqueue *kq = &kqwl->kqwl_kqueue;
5830 workq_threadreq_t kqr = &kqwl->kqwl_request;
5831 thread_t thread = kqr_thread_fast(kqr);
5832 int op = KQWL_UTQ_PARKING;
5833 kq_index_t qos_override = THREAD_QOS_UNSPECIFIED;
5834
5835 assert(thread == current_thread());
5836
5837 kqlock(kqwl);
5838
5839 /*
5840 * Forcing the KQ_PROCESSING flag allows for QoS updates because of
5841 * unsuppressing knotes not to be applied until the eventual call to
5842 * kqworkloop_update_threads_qos() below.
5843 */
5844 assert((kq->kq_state & KQ_PROCESSING) == 0);
5845 if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
5846 kq->kq_state |= KQ_PROCESSING;
5847 qos_override = kqworkloop_acknowledge_events(kqwl);
5848 kq->kq_state &= ~KQ_PROCESSING;
5849 }
5850
5851 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
5852 kqworkloop_update_threads_qos(kqwl, op, qos_override);
5853
5854 kqunlock(kqwl);
5855
5856 /*
5857 * Drop the override on the current thread last, after the call to
5858 * kqworkloop_update_threads_qos above.
5859 */
5860 kqworkloop_unbind_delayed_override_drop(thread);
5861
5862 /* If last reference, dealloc the workloop kq */
5863 kqworkloop_release(kqwl);
5864 }
5865
5866 static thread_qos_t
kqworkq_unbind_locked(struct kqworkq * kqwq,workq_threadreq_t kqr,thread_t thread)5867 kqworkq_unbind_locked(struct kqworkq *kqwq,
5868 workq_threadreq_t kqr, thread_t thread)
5869 {
5870 struct uthread *ut = get_bsdthread_info(thread);
5871 kq_index_t old_override = kqr->tr_kq_override_index;
5872
5873 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND), -1,
5874 thread_tid(kqr_thread(kqr)), kqr->tr_kq_qos_index, 0);
5875
5876 kqlock_held(kqwq);
5877
5878 assert(ut->uu_kqr_bound == kqr);
5879 ut->uu_kqr_bound = NULL;
5880 kqr->tr_thread = THREAD_NULL;
5881 kqr->tr_state = WORKQ_TR_STATE_IDLE;
5882 kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
5883 kqwq->kqwq_state &= ~KQ_R2K_ARMED;
5884
5885 return old_override;
5886 }
5887
5888 /*
5889 * kqworkq_unbind - unbind of a workq kqueue from a thread
5890 *
5891 * We may have to request new threads.
5892 * This can happen there are no waiting processing threads and:
5893 * - there were active events we never got to (count > 0)
5894 * - we pended waitq hook callouts during processing
5895 * - we pended wakeups while processing (or unsuppressing)
5896 */
5897 static void
kqworkq_unbind(proc_t p,workq_threadreq_t kqr)5898 kqworkq_unbind(proc_t p, workq_threadreq_t kqr)
5899 {
5900 struct kqworkq *kqwq = (struct kqworkq *)p->p_fd.fd_wqkqueue;
5901 __assert_only int rc;
5902
5903 kqlock(kqwq);
5904 rc = kqworkq_acknowledge_events(kqwq, kqr, 0, KQWQAE_UNBIND);
5905 assert(rc == -1);
5906 kqunlock(kqwq);
5907 }
5908
5909 workq_threadreq_t
kqworkq_get_request(struct kqworkq * kqwq,kq_index_t qos_index)5910 kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
5911 {
5912 assert(qos_index > 0 && qos_index <= KQWQ_NBUCKETS);
5913 return &kqwq->kqwq_request[qos_index - 1];
5914 }
5915
5916 static void
knote_reset_priority(kqueue_t kqu,struct knote * kn,pthread_priority_t pp)5917 knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp)
5918 {
5919 kq_index_t qos = _pthread_priority_thread_qos(pp);
5920
5921 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5922 assert((pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) == 0);
5923 pp = _pthread_priority_normalize(pp);
5924 } else if (kqu.kq->kq_state & KQ_WORKQ) {
5925 if (qos == THREAD_QOS_UNSPECIFIED) {
5926 /* On workqueues, outside of QoS means MANAGER */
5927 qos = KQWQ_QOS_MANAGER;
5928 pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
5929 } else {
5930 pp = _pthread_priority_normalize(pp);
5931 }
5932 } else {
5933 pp = _pthread_unspecified_priority();
5934 qos = THREAD_QOS_UNSPECIFIED;
5935 }
5936
5937 kn->kn_qos = (int32_t)pp;
5938
5939 if ((kn->kn_status & KN_MERGE_QOS) == 0 || qos > kn->kn_qos_override) {
5940 /* Never lower QoS when in "Merge" mode */
5941 kn->kn_qos_override = qos;
5942 }
5943
5944 /* only adjust in-use qos index when not suppressed */
5945 if (kn->kn_status & KN_SUPPRESSED) {
5946 kqueue_update_override(kqu, kn, qos);
5947 } else if (kn->kn_qos_index != qos) {
5948 knote_dequeue(kqu, kn);
5949 kn->kn_qos_index = qos;
5950 }
5951 }
5952
5953 static void
knote_adjust_qos(struct kqueue * kq,struct knote * kn,int result)5954 knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result)
5955 {
5956 thread_qos_t qos_index = (result >> FILTER_ADJUST_EVENT_QOS_SHIFT) & 7;
5957
5958 kqlock_held(kq);
5959
5960 assert(result & FILTER_ADJUST_EVENT_QOS_BIT);
5961 assert(qos_index < THREAD_QOS_LAST);
5962
5963 /*
5964 * Early exit for knotes that should not change QoS
5965 */
5966 if (__improbable(!knote_fops(kn)->f_adjusts_qos)) {
5967 panic("filter %d cannot change QoS", kn->kn_filtid);
5968 } else if (__improbable(!knote_has_qos(kn))) {
5969 return;
5970 }
5971
5972 /*
5973 * knotes with the FALLBACK flag will only use their registration QoS if the
5974 * incoming event has no QoS, else, the registration QoS acts as a floor.
5975 */
5976 thread_qos_t req_qos = _pthread_priority_thread_qos_fast(kn->kn_qos);
5977 if (kn->kn_qos & _PTHREAD_PRIORITY_FALLBACK_FLAG) {
5978 if (qos_index == THREAD_QOS_UNSPECIFIED) {
5979 qos_index = req_qos;
5980 }
5981 } else {
5982 if (qos_index < req_qos) {
5983 qos_index = req_qos;
5984 }
5985 }
5986 if ((kn->kn_status & KN_MERGE_QOS) && (qos_index < kn->kn_qos_override)) {
5987 /* Never lower QoS when in "Merge" mode */
5988 return;
5989 }
5990
5991 if ((kn->kn_status & KN_LOCKED) && (kn->kn_status & KN_POSTING)) {
5992 /*
5993 * When we're trying to update the QoS override and that both an
5994 * f_event() and other f_* calls are running concurrently, any of these
5995 * in flight calls may want to perform overrides that aren't properly
5996 * serialized with each other.
5997 *
5998 * The first update that observes this racy situation enters a "Merge"
5999 * mode which causes subsequent override requests to saturate the
6000 * override instead of replacing its value.
6001 *
6002 * This mode is left when knote_unlock() or knote_post()
6003 * observe that no other f_* routine is in flight.
6004 */
6005 kn->kn_status |= KN_MERGE_QOS;
6006 }
6007
6008 /*
6009 * Now apply the override if it changed.
6010 */
6011
6012 if (kn->kn_qos_override == qos_index) {
6013 return;
6014 }
6015
6016 kn->kn_qos_override = qos_index;
6017
6018 if (kn->kn_status & KN_SUPPRESSED) {
6019 /*
6020 * For suppressed events, the kn_qos_index field cannot be touched as it
6021 * allows us to know on which supress queue the knote is for a kqworkq.
6022 *
6023 * Also, there's no natural push applied on the kqueues when this field
6024 * changes anyway. We hence need to apply manual overrides in this case,
6025 * which will be cleared when the events are later acknowledged.
6026 */
6027 kqueue_update_override(kq, kn, qos_index);
6028 } else if (kn->kn_qos_index != qos_index) {
6029 knote_dequeue(kq, kn);
6030 kn->kn_qos_index = qos_index;
6031 }
6032 }
6033
6034 void
klist_init(struct klist * list)6035 klist_init(struct klist *list)
6036 {
6037 SLIST_INIT(list);
6038 }
6039
6040
6041 /*
6042 * Query/Post each knote in the object's list
6043 *
6044 * The object lock protects the list. It is assumed that the filter/event
6045 * routine for the object can determine that the object is already locked (via
6046 * the hint) and not deadlock itself.
6047 *
6048 * Autodetach is a specific contract which will detach all knotes from the
6049 * object prior to posting the final event for that knote. This is done while
6050 * under the object lock. A breadcrumb is left in the knote's next pointer to
6051 * indicate to future calls to f_detach routines that they need not reattempt
6052 * to knote_detach from the object's klist again. This is currently used by
6053 * EVFILTID_SPEC, EVFILTID_TTY, EVFILTID_PTMX
6054 *
6055 */
6056 void
knote(struct klist * list,long hint,bool autodetach)6057 knote(struct klist *list, long hint, bool autodetach)
6058 {
6059 struct knote *kn;
6060 struct knote *tmp_kn;
6061 SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmp_kn) {
6062 /*
6063 * We can modify the knote's next pointer since since we are holding the
6064 * object lock and the list can't be concurrently modified. Anyone
6065 * determining auto-detached-ness of a knote should take the primitive lock
6066 * to synchronize.
6067 *
6068 * Note that we do this here instead of the filter's f_event since we may
6069 * not even post the event if the knote is being dropped.
6070 */
6071 if (autodetach) {
6072 kn->kn_selnext.sle_next = KNOTE_AUTODETACHED;
6073 }
6074 knote_post(kn, hint);
6075 }
6076
6077 /* Blast away the entire klist */
6078 if (autodetach) {
6079 klist_init(list);
6080 }
6081 }
6082
6083 /*
6084 * attach a knote to the specified list. Return true if this is the first entry.
6085 * The list is protected by whatever lock the object it is associated with uses.
6086 */
6087 int
knote_attach(struct klist * list,struct knote * kn)6088 knote_attach(struct klist *list, struct knote *kn)
6089 {
6090 int ret = SLIST_EMPTY(list);
6091 SLIST_INSERT_HEAD(list, kn, kn_selnext);
6092 return ret;
6093 }
6094
6095 /*
6096 * detach a knote from the specified list. Return true if that was the last
6097 * entry. The list is protected by whatever lock the object it is associated
6098 * with uses.
6099 */
6100 int
knote_detach(struct klist * list,struct knote * kn)6101 knote_detach(struct klist *list, struct knote *kn)
6102 {
6103 assert(!KNOTE_IS_AUTODETACHED(kn));
6104
6105 SLIST_REMOVE(list, kn, knote, kn_selnext);
6106 return SLIST_EMPTY(list);
6107 }
6108
6109 /*
6110 * knote_vanish - Indicate that the source has vanished
6111 *
6112 * Used only for vanishing ports - vanishing fds go
6113 * through knote_fdclose()
6114 *
6115 * If the knote has requested EV_VANISHED delivery,
6116 * arrange for that. Otherwise, deliver a NOTE_REVOKE
6117 * event for backward compatibility.
6118 *
6119 * The knote is marked as having vanished. The source's
6120 * reference to the knote is dropped by caller, but the knote's
6121 * source reference is only cleaned up later when the knote is dropped.
6122 *
6123 * Our caller already has the object lock held. Calling
6124 * the detach routine would try to take that lock
6125 * recursively - which likely is not supported.
6126 */
6127 void
knote_vanish(struct klist * list,bool make_active)6128 knote_vanish(struct klist *list, bool make_active)
6129 {
6130 struct knote *kn;
6131 struct knote *kn_next;
6132
6133 SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
6134 struct kqueue *kq = knote_get_kq(kn);
6135
6136 kqlock(kq);
6137 if (__probable(kn->kn_status & KN_REQVANISH)) {
6138 /*
6139 * If EV_VANISH supported - prepare to deliver one
6140 */
6141 kn->kn_status |= KN_VANISHED;
6142 } else {
6143 /*
6144 * Handle the legacy way to indicate that the port/portset was
6145 * deallocated or left the current Mach portspace (modern technique
6146 * is with an EV_VANISHED protocol).
6147 *
6148 * Deliver an EV_EOF event for these changes (hopefully it will get
6149 * delivered before the port name recycles to the same generation
6150 * count and someone tries to re-register a kevent for it or the
6151 * events are udata-specific - avoiding a conflict).
6152 */
6153 kn->kn_flags |= EV_EOF | EV_ONESHOT;
6154 }
6155 if (make_active) {
6156 knote_activate(kq, kn, FILTER_ACTIVE);
6157 }
6158 kqunlock(kq);
6159 }
6160 }
6161
6162 /*
6163 * remove all knotes referencing a specified fd
6164 *
6165 * Entered with the proc_fd lock already held.
6166 * It returns the same way, but may drop it temporarily.
6167 */
6168 void
knote_fdclose(struct proc * p,int fd)6169 knote_fdclose(struct proc *p, int fd)
6170 {
6171 struct filedesc *fdt = &p->p_fd;
6172 struct klist *list;
6173 struct knote *kn;
6174 KNOTE_LOCK_CTX(knlc);
6175
6176 restart:
6177 list = &fdt->fd_knlist[fd];
6178 SLIST_FOREACH(kn, list, kn_link) {
6179 struct kqueue *kq = knote_get_kq(kn);
6180
6181 kqlock(kq);
6182
6183 if (kq->kq_p != p) {
6184 panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
6185 __func__, kq->kq_p, p);
6186 }
6187
6188 /*
6189 * If the knote supports EV_VANISHED delivery,
6190 * transition it to vanished mode (or skip over
6191 * it if already vanished).
6192 */
6193 if (kn->kn_status & KN_VANISHED) {
6194 kqunlock(kq);
6195 continue;
6196 }
6197
6198 proc_fdunlock(p);
6199 if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
6200 /* the knote was dropped by someone, nothing to do */
6201 } else if (kn->kn_status & KN_REQVANISH) {
6202 /*
6203 * Since we have REQVANISH for this knote, we need to notify clients about
6204 * the EV_VANISHED.
6205 *
6206 * But unlike mach ports, we want to do the detach here as well and not
6207 * defer it so that we can release the iocount that is on the knote and
6208 * close the fp.
6209 */
6210 kn->kn_status |= KN_VANISHED;
6211
6212 /*
6213 * There may be a concurrent post happening, make sure to wait for it
6214 * before we detach. knote_wait_for_post() unlocks on kq on exit
6215 */
6216 knote_wait_for_post(kq, kn);
6217
6218 knote_fops(kn)->f_detach(kn);
6219 if (kn->kn_is_fd) {
6220 fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0);
6221 }
6222 kn->kn_filtid = EVFILTID_DETACHED;
6223 kqlock(kq);
6224
6225 knote_activate(kq, kn, FILTER_ACTIVE);
6226 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
6227 } else {
6228 knote_drop(kq, kn, &knlc);
6229 }
6230
6231 proc_fdlock(p);
6232 goto restart;
6233 }
6234 }
6235
6236 /*
6237 * knote_fdfind - lookup a knote in the fd table for process
6238 *
6239 * If the filter is file-based, lookup based on fd index.
6240 * Otherwise use a hash based on the ident.
6241 *
6242 * Matching is based on kq, filter, and ident. Optionally,
6243 * it may also be based on the udata field in the kevent -
6244 * allowing multiple event registration for the file object
6245 * per kqueue.
6246 *
6247 * fd_knhashlock or fdlock held on entry (and exit)
6248 */
6249 static struct knote *
knote_fdfind(struct kqueue * kq,const struct kevent_internal_s * kev,bool is_fd,struct proc * p)6250 knote_fdfind(struct kqueue *kq,
6251 const struct kevent_internal_s *kev,
6252 bool is_fd,
6253 struct proc *p)
6254 {
6255 struct filedesc *fdp = &p->p_fd;
6256 struct klist *list = NULL;
6257 struct knote *kn = NULL;
6258
6259 /*
6260 * determine where to look for the knote
6261 */
6262 if (is_fd) {
6263 /* fd-based knotes are linked off the fd table */
6264 if (kev->kei_ident < (u_int)fdp->fd_knlistsize) {
6265 list = &fdp->fd_knlist[kev->kei_ident];
6266 }
6267 } else if (fdp->fd_knhashmask != 0) {
6268 /* hash non-fd knotes here too */
6269 list = &fdp->fd_knhash[KN_HASH((u_long)kev->kei_ident, fdp->fd_knhashmask)];
6270 }
6271
6272 /*
6273 * scan the selected list looking for a match
6274 */
6275 if (list != NULL) {
6276 SLIST_FOREACH(kn, list, kn_link) {
6277 if (kq == knote_get_kq(kn) &&
6278 kev->kei_ident == kn->kn_id &&
6279 kev->kei_filter == kn->kn_filter) {
6280 if (kev->kei_flags & EV_UDATA_SPECIFIC) {
6281 if ((kn->kn_flags & EV_UDATA_SPECIFIC) &&
6282 kev->kei_udata == kn->kn_udata) {
6283 break; /* matching udata-specific knote */
6284 }
6285 } else if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0) {
6286 break; /* matching non-udata-specific knote */
6287 }
6288 }
6289 }
6290 }
6291 return kn;
6292 }
6293
6294 /*
6295 * kq_add_knote- Add knote to the fd table for process
6296 * while checking for duplicates.
6297 *
6298 * All file-based filters associate a list of knotes by file
6299 * descriptor index. All other filters hash the knote by ident.
6300 *
6301 * May have to grow the table of knote lists to cover the
6302 * file descriptor index presented.
6303 *
6304 * fd_knhashlock and fdlock unheld on entry (and exit).
6305 *
6306 * Takes a rwlock boost if inserting the knote is successful.
6307 */
6308 static int
kq_add_knote(struct kqueue * kq,struct knote * kn,struct knote_lock_ctx * knlc,struct proc * p)6309 kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc,
6310 struct proc *p)
6311 {
6312 struct filedesc *fdp = &p->p_fd;
6313 struct klist *list = NULL;
6314 int ret = 0;
6315 bool is_fd = kn->kn_is_fd;
6316
6317 if (is_fd) {
6318 proc_fdlock(p);
6319 } else {
6320 knhash_lock(fdp);
6321 }
6322
6323 if (knote_fdfind(kq, &kn->kn_kevent, is_fd, p) != NULL) {
6324 /* found an existing knote: we can't add this one */
6325 ret = ERESTART;
6326 goto out_locked;
6327 }
6328
6329 /* knote was not found: add it now */
6330 if (!is_fd) {
6331 if (fdp->fd_knhashmask == 0) {
6332 u_long size = 0;
6333
6334 list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE, &size);
6335 if (list == NULL) {
6336 ret = ENOMEM;
6337 goto out_locked;
6338 }
6339
6340 fdp->fd_knhash = list;
6341 fdp->fd_knhashmask = size;
6342 }
6343
6344 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
6345 SLIST_INSERT_HEAD(list, kn, kn_link);
6346 ret = 0;
6347 goto out_locked;
6348 } else {
6349 /* knote is fd based */
6350
6351 if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
6352 u_int size = 0;
6353
6354 /* Make sure that fd stays below current process's soft limit AND system allowed per-process limits */
6355 if (kn->kn_id >= (uint64_t)proc_limitgetcur_nofile(p)) {
6356 ret = EINVAL;
6357 goto out_locked;
6358 }
6359 /* have to grow the fd_knlist */
6360 size = fdp->fd_knlistsize;
6361 while (size <= kn->kn_id) {
6362 size += KQEXTENT;
6363 }
6364
6365 if (size >= (UINT_MAX / sizeof(struct klist))) {
6366 ret = EINVAL;
6367 goto out_locked;
6368 }
6369
6370 list = kalloc_type(struct klist, size, Z_WAITOK | Z_ZERO);
6371 if (list == NULL) {
6372 ret = ENOMEM;
6373 goto out_locked;
6374 }
6375
6376 bcopy(fdp->fd_knlist, list,
6377 fdp->fd_knlistsize * sizeof(struct klist));
6378 kfree_type(struct klist, fdp->fd_knlistsize, fdp->fd_knlist);
6379 fdp->fd_knlist = list;
6380 fdp->fd_knlistsize = size;
6381 }
6382
6383 list = &fdp->fd_knlist[kn->kn_id];
6384 SLIST_INSERT_HEAD(list, kn, kn_link);
6385 ret = 0;
6386 goto out_locked;
6387 }
6388
6389 out_locked:
6390 if (ret == 0) {
6391 kqlock(kq);
6392 assert((kn->kn_status & KN_LOCKED) == 0);
6393 (void)knote_lock(kq, kn, knlc, KNOTE_KQ_UNLOCK);
6394 kqueue_retain(kq); /* retain a kq ref */
6395 }
6396 if (is_fd) {
6397 proc_fdunlock(p);
6398 } else {
6399 knhash_unlock(fdp);
6400 }
6401
6402 return ret;
6403 }
6404
6405 /*
6406 * kq_remove_knote - remove a knote from the fd table for process
6407 *
6408 * If the filter is file-based, remove based on fd index.
6409 * Otherwise remove from the hash based on the ident.
6410 *
6411 * fd_knhashlock and fdlock unheld on entry (and exit).
6412 */
6413 static void
kq_remove_knote(struct kqueue * kq,struct knote * kn,struct proc * p,struct knote_lock_ctx * knlc)6414 kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p,
6415 struct knote_lock_ctx *knlc)
6416 {
6417 struct filedesc *fdp = &p->p_fd;
6418 struct klist *list = NULL;
6419 uint16_t kq_state;
6420 bool is_fd = kn->kn_is_fd;
6421
6422 if (is_fd) {
6423 proc_fdlock(p);
6424 } else {
6425 knhash_lock(fdp);
6426 }
6427
6428 if (is_fd) {
6429 assert((u_int)fdp->fd_knlistsize > kn->kn_id);
6430 list = &fdp->fd_knlist[kn->kn_id];
6431 } else {
6432 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
6433 }
6434 SLIST_REMOVE(list, kn, knote, kn_link);
6435
6436 kqlock(kq);
6437
6438 /* Update the servicer iotier override */
6439 kqueue_update_iotier_override(kq);
6440
6441 kq_state = kq->kq_state;
6442 if (knlc) {
6443 knote_unlock_cancel(kq, kn, knlc);
6444 } else {
6445 kqunlock(kq);
6446 }
6447 if (is_fd) {
6448 proc_fdunlock(p);
6449 } else {
6450 knhash_unlock(fdp);
6451 }
6452
6453 if (kq_state & KQ_DYNAMIC) {
6454 kqworkloop_release((struct kqworkloop *)kq);
6455 }
6456 }
6457
6458 /*
6459 * kq_find_knote_and_kq_lock - lookup a knote in the fd table for process
6460 * and, if the knote is found, acquires the kqlock while holding the fd table lock/spinlock.
6461 *
6462 * fd_knhashlock or fdlock unheld on entry (and exit)
6463 */
6464
6465 static struct knote *
kq_find_knote_and_kq_lock(struct kqueue * kq,struct kevent_qos_s * kev,bool is_fd,struct proc * p)6466 kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_qos_s *kev,
6467 bool is_fd, struct proc *p)
6468 {
6469 struct filedesc *fdp = &p->p_fd;
6470 struct knote *kn;
6471
6472 if (is_fd) {
6473 proc_fdlock(p);
6474 } else {
6475 knhash_lock(fdp);
6476 }
6477
6478 /*
6479 * Temporary horrible hack:
6480 * this cast is gross and will go away in a future change.
6481 * It is OK to do because we don't look at xflags/s_fflags,
6482 * and that when we cast down the kev this way,
6483 * the truncated filter field works.
6484 */
6485 kn = knote_fdfind(kq, (struct kevent_internal_s *)kev, is_fd, p);
6486
6487 if (kn) {
6488 kqlock(kq);
6489 assert(knote_get_kq(kn) == kq);
6490 }
6491
6492 if (is_fd) {
6493 proc_fdunlock(p);
6494 } else {
6495 knhash_unlock(fdp);
6496 }
6497
6498 return kn;
6499 }
6500
6501 static struct kqtailq *
knote_get_tailq(kqueue_t kqu,struct knote * kn)6502 knote_get_tailq(kqueue_t kqu, struct knote *kn)
6503 {
6504 kq_index_t qos_index = kn->kn_qos_index;
6505
6506 if (kqu.kq->kq_state & KQ_WORKLOOP) {
6507 assert(qos_index > 0 && qos_index <= KQWL_NBUCKETS);
6508 return &kqu.kqwl->kqwl_queue[qos_index - 1];
6509 } else if (kqu.kq->kq_state & KQ_WORKQ) {
6510 assert(qos_index > 0 && qos_index <= KQWQ_NBUCKETS);
6511 return &kqu.kqwq->kqwq_queue[qos_index - 1];
6512 } else {
6513 assert(qos_index == QOS_INDEX_KQFILE);
6514 return &kqu.kqf->kqf_queue;
6515 }
6516 }
6517
6518 static void
knote_enqueue(kqueue_t kqu,struct knote * kn)6519 knote_enqueue(kqueue_t kqu, struct knote *kn)
6520 {
6521 kqlock_held(kqu);
6522
6523 if ((kn->kn_status & KN_ACTIVE) == 0) {
6524 return;
6525 }
6526
6527 if (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING | KN_QUEUED)) {
6528 return;
6529 }
6530
6531 struct kqtailq *queue = knote_get_tailq(kqu, kn);
6532 bool wakeup = TAILQ_EMPTY(queue);
6533
6534 TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
6535 kn->kn_status |= KN_QUEUED;
6536 kqu.kq->kq_count++;
6537
6538 if (wakeup) {
6539 if (kqu.kq->kq_state & KQ_WORKLOOP) {
6540 kqworkloop_wakeup(kqu.kqwl, kn->kn_qos_index);
6541 } else if (kqu.kq->kq_state & KQ_WORKQ) {
6542 kqworkq_wakeup(kqu.kqwq, kn->kn_qos_index);
6543 } else {
6544 kqfile_wakeup(kqu.kqf, 0, THREAD_AWAKENED);
6545 }
6546 }
6547 }
6548
6549 __attribute__((always_inline))
6550 static inline void
knote_dequeue(kqueue_t kqu,struct knote * kn)6551 knote_dequeue(kqueue_t kqu, struct knote *kn)
6552 {
6553 if (kn->kn_status & KN_QUEUED) {
6554 struct kqtailq *queue = knote_get_tailq(kqu, kn);
6555
6556 // attaching the knote calls knote_reset_priority() without
6557 // the kqlock which is fine, so we can't call kqlock_held()
6558 // if we're not queued.
6559 kqlock_held(kqu);
6560
6561 TAILQ_REMOVE(queue, kn, kn_tqe);
6562 kn->kn_status &= ~KN_QUEUED;
6563 kqu.kq->kq_count--;
6564 if ((kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) {
6565 assert((kqu.kq->kq_count == 0) ==
6566 (bool)TAILQ_EMPTY(queue));
6567 }
6568 }
6569 }
6570
6571 /* called with kqueue lock held */
6572 static void
knote_suppress(kqueue_t kqu,struct knote * kn)6573 knote_suppress(kqueue_t kqu, struct knote *kn)
6574 {
6575 struct kqtailq *suppressq;
6576
6577 kqlock_held(kqu);
6578
6579 assert((kn->kn_status & KN_SUPPRESSED) == 0);
6580 assert(kn->kn_status & KN_QUEUED);
6581
6582 knote_dequeue(kqu, kn);
6583 /* deactivate - so new activations indicate a wakeup */
6584 kn->kn_status &= ~KN_ACTIVE;
6585 kn->kn_status |= KN_SUPPRESSED;
6586 suppressq = kqueue_get_suppressed_queue(kqu, kn);
6587 TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
6588 }
6589
6590 __attribute__((always_inline))
6591 static inline void
knote_unsuppress_noqueue(kqueue_t kqu,struct knote * kn)6592 knote_unsuppress_noqueue(kqueue_t kqu, struct knote *kn)
6593 {
6594 struct kqtailq *suppressq;
6595
6596 kqlock_held(kqu);
6597
6598 assert(kn->kn_status & KN_SUPPRESSED);
6599
6600 kn->kn_status &= ~KN_SUPPRESSED;
6601 suppressq = kqueue_get_suppressed_queue(kqu, kn);
6602 TAILQ_REMOVE(suppressq, kn, kn_tqe);
6603
6604 /*
6605 * If the knote is no longer active, reset its push,
6606 * and resynchronize kn_qos_index with kn_qos_override
6607 * for knotes with a real qos.
6608 */
6609 if ((kn->kn_status & KN_ACTIVE) == 0 && knote_has_qos(kn)) {
6610 kn->kn_qos_override = _pthread_priority_thread_qos_fast(kn->kn_qos);
6611 }
6612 kn->kn_qos_index = kn->kn_qos_override;
6613 }
6614
6615 /* called with kqueue lock held */
6616 static void
knote_unsuppress(kqueue_t kqu,struct knote * kn)6617 knote_unsuppress(kqueue_t kqu, struct knote *kn)
6618 {
6619 knote_unsuppress_noqueue(kqu, kn);
6620 knote_enqueue(kqu, kn);
6621 }
6622
6623 __attribute__((always_inline))
6624 static inline void
knote_mark_active(struct knote * kn)6625 knote_mark_active(struct knote *kn)
6626 {
6627 if ((kn->kn_status & KN_ACTIVE) == 0) {
6628 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_ACTIVATE),
6629 kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
6630 kn->kn_filtid);
6631 }
6632
6633 kn->kn_status |= KN_ACTIVE;
6634 }
6635
6636 /* called with kqueue lock held */
6637 static void
knote_activate(kqueue_t kqu,struct knote * kn,int result)6638 knote_activate(kqueue_t kqu, struct knote *kn, int result)
6639 {
6640 assert(result & FILTER_ACTIVE);
6641 if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
6642 // may dequeue the knote
6643 knote_adjust_qos(kqu.kq, kn, result);
6644 }
6645 knote_mark_active(kn);
6646 knote_enqueue(kqu, kn);
6647 }
6648
6649 /*
6650 * This function applies changes requested by f_attach or f_touch for
6651 * a given filter. It proceeds in a carefully chosen order to help
6652 * every single transition do the minimal amount of work possible.
6653 */
6654 static void
knote_apply_touch(kqueue_t kqu,struct knote * kn,struct kevent_qos_s * kev,int result)6655 knote_apply_touch(kqueue_t kqu, struct knote *kn, struct kevent_qos_s *kev,
6656 int result)
6657 {
6658 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
6659 kn->kn_status &= ~KN_DISABLED;
6660
6661 /*
6662 * it is possible for userland to have knotes registered for a given
6663 * workloop `wl_orig` but really handled on another workloop `wl_new`.
6664 *
6665 * In that case, rearming will happen from the servicer thread of
6666 * `wl_new` which if `wl_orig` is no longer being serviced, would cause
6667 * this knote to stay suppressed forever if we only relied on
6668 * kqworkloop_acknowledge_events to be called by `wl_orig`.
6669 *
6670 * However if we see the KQ_PROCESSING bit on `wl_orig` set, we can't
6671 * unsuppress because that would mess with the processing phase of
6672 * `wl_orig`, however it also means kqworkloop_acknowledge_events()
6673 * will be called.
6674 */
6675 if (__improbable(kn->kn_status & KN_SUPPRESSED)) {
6676 if ((kqu.kq->kq_state & KQ_PROCESSING) == 0) {
6677 knote_unsuppress_noqueue(kqu, kn);
6678 }
6679 }
6680 }
6681
6682 if (result & FILTER_ADJUST_EVENT_IOTIER_BIT) {
6683 kqueue_update_iotier_override(kqu);
6684 }
6685
6686 if ((result & FILTER_UPDATE_REQ_QOS) && kev->qos && kev->qos != kn->kn_qos) {
6687 // may dequeue the knote
6688 knote_reset_priority(kqu, kn, kev->qos);
6689 }
6690
6691 /*
6692 * When we unsuppress above, or because of knote_reset_priority(),
6693 * the knote may have been dequeued, we need to restore the invariant
6694 * that if the knote is active it needs to be queued now that
6695 * we're done applying changes.
6696 */
6697 if (result & FILTER_ACTIVE) {
6698 knote_activate(kqu, kn, result);
6699 } else {
6700 knote_enqueue(kqu, kn);
6701 }
6702
6703 if ((result & FILTER_THREADREQ_NODEFEER) &&
6704 act_clear_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ)) {
6705 workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
6706 }
6707 }
6708
6709 /*
6710 * knote_drop - disconnect and drop the knote
6711 *
6712 * Called with the kqueue locked, returns with the kqueue unlocked.
6713 *
6714 * If a knote locking context is passed, it is canceled.
6715 *
6716 * The knote may have already been detached from
6717 * (or not yet attached to) its source object.
6718 */
6719 static void
knote_drop(struct kqueue * kq,struct knote * kn,struct knote_lock_ctx * knlc)6720 knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc)
6721 {
6722 struct proc *p = kq->kq_p;
6723
6724 kqlock_held(kq);
6725
6726 assert((kn->kn_status & KN_DROPPING) == 0);
6727 if (knlc == NULL) {
6728 assert((kn->kn_status & KN_LOCKED) == 0);
6729 }
6730 kn->kn_status |= KN_DROPPING;
6731
6732 if (kn->kn_status & KN_SUPPRESSED) {
6733 knote_unsuppress_noqueue(kq, kn);
6734 } else {
6735 knote_dequeue(kq, kn);
6736 }
6737 knote_wait_for_post(kq, kn);
6738
6739 /* Even if we are autodetached, the filter may need to do cleanups of any
6740 * stuff stashed on the knote so always make the call and let each filter
6741 * handle the possibility of autodetached-ness */
6742 knote_fops(kn)->f_detach(kn);
6743
6744 /* kq may be freed when kq_remove_knote() returns */
6745 kq_remove_knote(kq, kn, p, knlc);
6746 if (kn->kn_is_fd && ((kn->kn_status & KN_VANISHED) == 0)) {
6747 fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0);
6748 }
6749
6750 knote_free(kn);
6751 }
6752
6753 void
knote_init(void)6754 knote_init(void)
6755 {
6756 #if CONFIG_MEMORYSTATUS
6757 /* Initialize the memorystatus list lock */
6758 memorystatus_kevent_init(&kq_lck_grp, LCK_ATTR_NULL);
6759 #endif
6760 }
6761 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
6762
6763 const struct filterops *
knote_fops(struct knote * kn)6764 knote_fops(struct knote *kn)
6765 {
6766 return sysfilt_ops[kn->kn_filtid];
6767 }
6768
6769 static struct knote *
knote_alloc(void)6770 knote_alloc(void)
6771 {
6772 return zalloc_flags(knote_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
6773 }
6774
6775 static void
knote_free(struct knote * kn)6776 knote_free(struct knote *kn)
6777 {
6778 assert((kn->kn_status & (KN_LOCKED | KN_POSTING)) == 0);
6779 zfree(knote_zone, kn);
6780 }
6781
6782 #pragma mark - syscalls: kevent, kevent64, kevent_qos, kevent_id
6783
6784 kevent_ctx_t
kevent_get_context(thread_t thread)6785 kevent_get_context(thread_t thread)
6786 {
6787 uthread_t ut = get_bsdthread_info(thread);
6788 return &ut->uu_save.uus_kevent;
6789 }
6790
6791 static inline bool
kevent_args_requesting_events(unsigned int flags,int nevents)6792 kevent_args_requesting_events(unsigned int flags, int nevents)
6793 {
6794 return !(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0;
6795 }
6796
6797 static inline int
kevent_adjust_flags_for_proc(proc_t p,int flags)6798 kevent_adjust_flags_for_proc(proc_t p, int flags)
6799 {
6800 __builtin_assume(p);
6801 return flags | (IS_64BIT_PROCESS(p) ? KEVENT_FLAG_PROC64 : 0);
6802 }
6803
6804 /*!
6805 * @function kevent_get_kqfile
6806 *
6807 * @brief
6808 * Lookup a kqfile by fd.
6809 *
6810 * @discussion
6811 * Callers: kevent, kevent64, kevent_qos
6812 *
6813 * This is not assumed to be a fastpath (kqfile interfaces are legacy)
6814 */
6815 OS_NOINLINE
6816 static int
kevent_get_kqfile(struct proc * p,int fd,int flags,struct fileproc ** fpp,struct kqueue ** kqp)6817 kevent_get_kqfile(struct proc *p, int fd, int flags,
6818 struct fileproc **fpp, struct kqueue **kqp)
6819 {
6820 int error = 0;
6821 struct kqueue *kq;
6822
6823 error = fp_get_ftype(p, fd, DTYPE_KQUEUE, EBADF, fpp);
6824 if (__improbable(error)) {
6825 return error;
6826 }
6827 kq = (struct kqueue *)fp_get_data((*fpp));
6828
6829 uint16_t kq_state = os_atomic_load(&kq->kq_state, relaxed);
6830 if (__improbable((kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) == 0)) {
6831 kqlock(kq);
6832 kq_state = kq->kq_state;
6833 if (!(kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS))) {
6834 if (flags & KEVENT_FLAG_LEGACY32) {
6835 kq_state |= KQ_KEV32;
6836 } else if (flags & KEVENT_FLAG_LEGACY64) {
6837 kq_state |= KQ_KEV64;
6838 } else {
6839 kq_state |= KQ_KEV_QOS;
6840 }
6841 kq->kq_state = kq_state;
6842 }
6843 kqunlock(kq);
6844 }
6845
6846 /*
6847 * kqfiles can't be used through the legacy kevent()
6848 * and other interfaces at the same time.
6849 */
6850 if (__improbable((bool)(flags & KEVENT_FLAG_LEGACY32) !=
6851 (bool)(kq_state & KQ_KEV32))) {
6852 fp_drop(p, fd, *fpp, 0);
6853 return EINVAL;
6854 }
6855
6856 *kqp = kq;
6857 return 0;
6858 }
6859
6860 /*!
6861 * @function kevent_get_kqwq
6862 *
6863 * @brief
6864 * Lookup or create the process kqwq (faspath).
6865 *
6866 * @discussion
6867 * Callers: kevent64, kevent_qos
6868 */
6869 OS_ALWAYS_INLINE
6870 static int
kevent_get_kqwq(proc_t p,int flags,int nevents,struct kqueue ** kqp)6871 kevent_get_kqwq(proc_t p, int flags, int nevents, struct kqueue **kqp)
6872 {
6873 struct kqworkq *kqwq = p->p_fd.fd_wqkqueue;
6874
6875 if (__improbable(kevent_args_requesting_events(flags, nevents))) {
6876 return EINVAL;
6877 }
6878 if (__improbable(kqwq == NULL)) {
6879 kqwq = kqworkq_alloc(p, flags);
6880 if (__improbable(kqwq == NULL)) {
6881 return ENOMEM;
6882 }
6883 }
6884
6885 *kqp = &kqwq->kqwq_kqueue;
6886 return 0;
6887 }
6888
6889 #pragma mark kevent copyio
6890
6891 /*!
6892 * @function kevent_get_data_size
6893 *
6894 * @brief
6895 * Copies in the extra data size from user-space.
6896 */
6897 static int
kevent_get_data_size(int flags,user_addr_t data_avail,user_addr_t data_out,kevent_ctx_t kectx)6898 kevent_get_data_size(int flags, user_addr_t data_avail, user_addr_t data_out,
6899 kevent_ctx_t kectx)
6900 {
6901 if (!data_avail || !data_out) {
6902 kectx->kec_data_size = 0;
6903 kectx->kec_data_resid = 0;
6904 } else if (flags & KEVENT_FLAG_PROC64) {
6905 user64_size_t usize = 0;
6906 int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
6907 if (__improbable(error)) {
6908 return error;
6909 }
6910 kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
6911 } else {
6912 user32_size_t usize = 0;
6913 int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
6914 if (__improbable(error)) {
6915 return error;
6916 }
6917 kectx->kec_data_avail = data_avail;
6918 kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
6919 }
6920 kectx->kec_data_out = data_out;
6921 kectx->kec_data_avail = data_avail;
6922 return 0;
6923 }
6924
6925 /*!
6926 * @function kevent_put_data_size
6927 *
6928 * @brief
6929 * Copies out the residual data size to user-space if any has been used.
6930 */
6931 static int
kevent_put_data_size(unsigned int flags,kevent_ctx_t kectx)6932 kevent_put_data_size(unsigned int flags, kevent_ctx_t kectx)
6933 {
6934 if (kectx->kec_data_resid == kectx->kec_data_size) {
6935 return 0;
6936 }
6937 if (flags & KEVENT_FLAG_KERNEL) {
6938 *(user_size_t *)(uintptr_t)kectx->kec_data_avail = kectx->kec_data_resid;
6939 return 0;
6940 }
6941 if (flags & KEVENT_FLAG_PROC64) {
6942 user64_size_t usize = (user64_size_t)kectx->kec_data_resid;
6943 return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
6944 } else {
6945 user32_size_t usize = (user32_size_t)kectx->kec_data_resid;
6946 return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
6947 }
6948 }
6949
6950 /*!
6951 * @function kevent_legacy_copyin
6952 *
6953 * @brief
6954 * Handles the copyin of a kevent/kevent64 event.
6955 */
6956 static int
kevent_legacy_copyin(user_addr_t * addrp,struct kevent_qos_s * kevp,unsigned int flags)6957 kevent_legacy_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp, unsigned int flags)
6958 {
6959 int error;
6960
6961 assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
6962
6963 if (flags & KEVENT_FLAG_LEGACY64) {
6964 struct kevent64_s kev64;
6965
6966 error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
6967 if (__improbable(error)) {
6968 return error;
6969 }
6970 *addrp += sizeof(kev64);
6971 *kevp = (struct kevent_qos_s){
6972 .ident = kev64.ident,
6973 .filter = kev64.filter,
6974 /* Make sure user doesn't pass in any system flags */
6975 .flags = kev64.flags & ~EV_SYSFLAGS,
6976 .udata = kev64.udata,
6977 .fflags = kev64.fflags,
6978 .data = kev64.data,
6979 .ext[0] = kev64.ext[0],
6980 .ext[1] = kev64.ext[1],
6981 };
6982 } else if (flags & KEVENT_FLAG_PROC64) {
6983 struct user64_kevent kev64;
6984
6985 error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
6986 if (__improbable(error)) {
6987 return error;
6988 }
6989 *addrp += sizeof(kev64);
6990 *kevp = (struct kevent_qos_s){
6991 .ident = kev64.ident,
6992 .filter = kev64.filter,
6993 /* Make sure user doesn't pass in any system flags */
6994 .flags = kev64.flags & ~EV_SYSFLAGS,
6995 .udata = kev64.udata,
6996 .fflags = kev64.fflags,
6997 .data = kev64.data,
6998 };
6999 } else {
7000 struct user32_kevent kev32;
7001
7002 error = copyin(*addrp, (caddr_t)&kev32, sizeof(kev32));
7003 if (__improbable(error)) {
7004 return error;
7005 }
7006 *addrp += sizeof(kev32);
7007 *kevp = (struct kevent_qos_s){
7008 .ident = (uintptr_t)kev32.ident,
7009 .filter = kev32.filter,
7010 /* Make sure user doesn't pass in any system flags */
7011 .flags = kev32.flags & ~EV_SYSFLAGS,
7012 .udata = CAST_USER_ADDR_T(kev32.udata),
7013 .fflags = kev32.fflags,
7014 .data = (intptr_t)kev32.data,
7015 };
7016 }
7017
7018 return 0;
7019 }
7020
7021 /*!
7022 * @function kevent_modern_copyin
7023 *
7024 * @brief
7025 * Handles the copyin of a kevent_qos/kevent_id event.
7026 */
7027 static int
kevent_modern_copyin(user_addr_t * addrp,struct kevent_qos_s * kevp)7028 kevent_modern_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp)
7029 {
7030 int error = copyin(*addrp, (caddr_t)kevp, sizeof(struct kevent_qos_s));
7031 if (__probable(!error)) {
7032 /* Make sure user doesn't pass in any system flags */
7033 *addrp += sizeof(struct kevent_qos_s);
7034 kevp->flags &= ~EV_SYSFLAGS;
7035 }
7036 return error;
7037 }
7038
7039 /*!
7040 * @function kevent_legacy_copyout
7041 *
7042 * @brief
7043 * Handles the copyout of a kevent/kevent64 event.
7044 */
7045 static int
kevent_legacy_copyout(struct kevent_qos_s * kevp,user_addr_t * addrp,unsigned int flags)7046 kevent_legacy_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp, unsigned int flags)
7047 {
7048 int advance;
7049 int error;
7050
7051 assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
7052
7053 /*
7054 * fully initialize the differnt output event structure
7055 * types from the internal kevent (and some universal
7056 * defaults for fields not represented in the internal
7057 * form).
7058 *
7059 * Note: these structures have no padding hence the C99
7060 * initializers below do not leak kernel info.
7061 */
7062 if (flags & KEVENT_FLAG_LEGACY64) {
7063 struct kevent64_s kev64 = {
7064 .ident = kevp->ident,
7065 .filter = kevp->filter,
7066 .flags = kevp->flags,
7067 .fflags = kevp->fflags,
7068 .data = (int64_t)kevp->data,
7069 .udata = kevp->udata,
7070 .ext[0] = kevp->ext[0],
7071 .ext[1] = kevp->ext[1],
7072 };
7073 advance = sizeof(struct kevent64_s);
7074 error = copyout((caddr_t)&kev64, *addrp, advance);
7075 } else if (flags & KEVENT_FLAG_PROC64) {
7076 /*
7077 * deal with the special case of a user-supplied
7078 * value of (uintptr_t)-1.
7079 */
7080 uint64_t ident = (kevp->ident == (uintptr_t)-1) ?
7081 (uint64_t)-1LL : (uint64_t)kevp->ident;
7082 struct user64_kevent kev64 = {
7083 .ident = ident,
7084 .filter = kevp->filter,
7085 .flags = kevp->flags,
7086 .fflags = kevp->fflags,
7087 .data = (int64_t) kevp->data,
7088 .udata = (user_addr_t) kevp->udata,
7089 };
7090 advance = sizeof(kev64);
7091 error = copyout((caddr_t)&kev64, *addrp, advance);
7092 } else {
7093 struct user32_kevent kev32 = {
7094 .ident = (uint32_t)kevp->ident,
7095 .filter = kevp->filter,
7096 .flags = kevp->flags,
7097 .fflags = kevp->fflags,
7098 .data = (int32_t)kevp->data,
7099 .udata = (uint32_t)kevp->udata,
7100 };
7101 advance = sizeof(kev32);
7102 error = copyout((caddr_t)&kev32, *addrp, advance);
7103 }
7104 if (__probable(!error)) {
7105 *addrp += advance;
7106 }
7107 return error;
7108 }
7109
7110 /*!
7111 * @function kevent_modern_copyout
7112 *
7113 * @brief
7114 * Handles the copyout of a kevent_qos/kevent_id event.
7115 */
7116 OS_ALWAYS_INLINE
7117 static inline int
kevent_modern_copyout(struct kevent_qos_s * kevp,user_addr_t * addrp)7118 kevent_modern_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp)
7119 {
7120 int error = copyout((caddr_t)kevp, *addrp, sizeof(struct kevent_qos_s));
7121 if (__probable(!error)) {
7122 *addrp += sizeof(struct kevent_qos_s);
7123 }
7124 return error;
7125 }
7126
7127 #pragma mark kevent core implementation
7128
7129 /*!
7130 * @function kevent_callback_inline
7131 *
7132 * @brief
7133 * Callback for each individual event
7134 *
7135 * @discussion
7136 * This is meant to be inlined in kevent_modern_callback and
7137 * kevent_legacy_callback.
7138 */
7139 OS_ALWAYS_INLINE
7140 static inline int
kevent_callback_inline(struct kevent_qos_s * kevp,kevent_ctx_t kectx,bool legacy)7141 kevent_callback_inline(struct kevent_qos_s *kevp, kevent_ctx_t kectx, bool legacy)
7142 {
7143 int error;
7144
7145 assert(kectx->kec_process_noutputs < kectx->kec_process_nevents);
7146
7147 /*
7148 * Copy out the appropriate amount of event data for this user.
7149 */
7150 if (legacy) {
7151 error = kevent_legacy_copyout(kevp, &kectx->kec_process_eventlist,
7152 kectx->kec_process_flags);
7153 } else {
7154 error = kevent_modern_copyout(kevp, &kectx->kec_process_eventlist);
7155 }
7156
7157 /*
7158 * If there isn't space for additional events, return
7159 * a harmless error to stop the processing here
7160 */
7161 if (error == 0 && ++kectx->kec_process_noutputs == kectx->kec_process_nevents) {
7162 error = EWOULDBLOCK;
7163 }
7164 return error;
7165 }
7166
7167 /*!
7168 * @function kevent_modern_callback
7169 *
7170 * @brief
7171 * Callback for each individual modern event.
7172 *
7173 * @discussion
7174 * This callback handles kevent_qos/kevent_id events.
7175 */
7176 static int
kevent_modern_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)7177 kevent_modern_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
7178 {
7179 return kevent_callback_inline(kevp, kectx, /*legacy*/ false);
7180 }
7181
7182 /*!
7183 * @function kevent_legacy_callback
7184 *
7185 * @brief
7186 * Callback for each individual legacy event.
7187 *
7188 * @discussion
7189 * This callback handles kevent/kevent64 events.
7190 */
7191 static int
kevent_legacy_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)7192 kevent_legacy_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
7193 {
7194 return kevent_callback_inline(kevp, kectx, /*legacy*/ true);
7195 }
7196
7197 /*!
7198 * @function kevent_cleanup
7199 *
7200 * @brief
7201 * Handles the cleanup returning from a kevent call.
7202 *
7203 * @discussion
7204 * kevent entry points will take a reference on workloops,
7205 * and a usecount on the fileglob of kqfiles.
7206 *
7207 * This function undoes this on the exit paths of kevents.
7208 *
7209 * @returns
7210 * The error to return to userspace.
7211 */
7212 static int
kevent_cleanup(kqueue_t kqu,int flags,int error,kevent_ctx_t kectx)7213 kevent_cleanup(kqueue_t kqu, int flags, int error, kevent_ctx_t kectx)
7214 {
7215 // poll should not call any codepath leading to this
7216 assert((flags & KEVENT_FLAG_POLL) == 0);
7217
7218 if (flags & KEVENT_FLAG_WORKLOOP) {
7219 kqworkloop_release(kqu.kqwl);
7220 } else if (flags & KEVENT_FLAG_WORKQ) {
7221 /* nothing held */
7222 } else {
7223 fp_drop(kqu.kqf->kqf_p, kectx->kec_fd, kectx->kec_fp, 0);
7224 }
7225
7226 /* don't restart after signals... */
7227 if (error == ERESTART) {
7228 error = EINTR;
7229 } else if (error == 0) {
7230 /* don't abandon other output just because of residual copyout failures */
7231 (void)kevent_put_data_size(flags, kectx);
7232 }
7233
7234 if (flags & KEVENT_FLAG_PARKING) {
7235 thread_t th = current_thread();
7236 struct uthread *uth = get_bsdthread_info(th);
7237 if (uth->uu_kqr_bound) {
7238 thread_unfreeze_base_pri(th);
7239 }
7240 }
7241 return error;
7242 }
7243
7244 /*!
7245 * @function kqueue_process
7246 *
7247 * @brief
7248 * Process the triggered events in a kqueue.
7249 *
7250 * @discussion
7251 * Walk the queued knotes and validate that they are really still triggered
7252 * events by calling the filter routines (if necessary).
7253 *
7254 * For each event that is still considered triggered, invoke the callback
7255 * routine provided.
7256 *
7257 * caller holds a reference on the kqueue.
7258 * kqueue locked on entry and exit - but may be dropped
7259 * kqueue list locked (held for duration of call)
7260 *
7261 * This is only called by kqueue_scan() so that the compiler can inline it.
7262 *
7263 * @returns
7264 * - 0: no event was returned, no other error occured
7265 * - EBADF: the kqueue is being destroyed (KQ_DRAIN is set)
7266 * - EWOULDBLOCK: (not an error) events have been found and we should return
7267 * - EFAULT: copyout failed
7268 * - filter specific errors
7269 */
7270 static int
kqueue_process(kqueue_t kqu,int flags,kevent_ctx_t kectx,kevent_callback_t callback)7271 kqueue_process(kqueue_t kqu, int flags, kevent_ctx_t kectx,
7272 kevent_callback_t callback)
7273 {
7274 workq_threadreq_t kqr = current_uthread()->uu_kqr_bound;
7275 struct knote *kn;
7276 int error = 0, rc = 0;
7277 struct kqtailq *base_queue, *queue;
7278 uint16_t kq_type = (kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
7279
7280 if (kq_type & KQ_WORKQ) {
7281 rc = kqworkq_begin_processing(kqu.kqwq, kqr, flags);
7282 } else if (kq_type & KQ_WORKLOOP) {
7283 rc = kqworkloop_begin_processing(kqu.kqwl, flags);
7284 } else {
7285 kqfile_retry:
7286 rc = kqfile_begin_processing(kqu.kqf);
7287 if (rc == EBADF) {
7288 return EBADF;
7289 }
7290 }
7291
7292 if (rc == -1) {
7293 /* Nothing to process */
7294 return 0;
7295 }
7296
7297 /*
7298 * loop through the enqueued knotes associated with this request,
7299 * processing each one. Each request may have several queues
7300 * of knotes to process (depending on the type of kqueue) so we
7301 * have to loop through all the queues as long as we have additional
7302 * space.
7303 */
7304
7305 process_again:
7306 if (kq_type & KQ_WORKQ) {
7307 base_queue = queue = &kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1];
7308 } else if (kq_type & KQ_WORKLOOP) {
7309 base_queue = &kqu.kqwl->kqwl_queue[0];
7310 queue = &kqu.kqwl->kqwl_queue[KQWL_NBUCKETS - 1];
7311 } else {
7312 base_queue = queue = &kqu.kqf->kqf_queue;
7313 }
7314
7315 do {
7316 while ((kn = TAILQ_FIRST(queue)) != NULL) {
7317 error = knote_process(kn, kectx, callback);
7318 if (error == EJUSTRETURN) {
7319 error = 0;
7320 } else if (__improbable(error)) {
7321 /* error is EWOULDBLOCK when the out event array is full */
7322 goto stop_processing;
7323 }
7324 }
7325 } while (queue-- > base_queue);
7326
7327 if (kectx->kec_process_noutputs) {
7328 /* callers will transform this into no error */
7329 error = EWOULDBLOCK;
7330 }
7331
7332 stop_processing:
7333 /*
7334 * If KEVENT_FLAG_PARKING is set, and no kevents have been returned,
7335 * we want to unbind the kqrequest from the thread.
7336 *
7337 * However, because the kq locks are dropped several times during process,
7338 * new knotes may have fired again, in which case, we want to fail the end
7339 * processing and process again, until it converges.
7340 *
7341 * If we have an error or returned events, end processing never fails.
7342 */
7343 if (error) {
7344 flags &= ~KEVENT_FLAG_PARKING;
7345 }
7346 if (kq_type & KQ_WORKQ) {
7347 rc = kqworkq_end_processing(kqu.kqwq, kqr, flags);
7348 } else if (kq_type & KQ_WORKLOOP) {
7349 rc = kqworkloop_end_processing(kqu.kqwl, KQ_PROCESSING, flags);
7350 } else {
7351 rc = kqfile_end_processing(kqu.kqf);
7352 }
7353
7354 if (__probable(error)) {
7355 return error;
7356 }
7357
7358 if (__probable(rc >= 0)) {
7359 assert(rc == 0 || rc == EBADF);
7360 return rc;
7361 }
7362
7363 if (kq_type & (KQ_WORKQ | KQ_WORKLOOP)) {
7364 assert(flags & KEVENT_FLAG_PARKING);
7365 goto process_again;
7366 } else {
7367 goto kqfile_retry;
7368 }
7369 }
7370
7371 /*!
7372 * @function kqueue_scan_continue
7373 *
7374 * @brief
7375 * The continuation used by kqueue_scan for kevent entry points.
7376 *
7377 * @discussion
7378 * Assumes we inherit a use/ref count on the kq or its fileglob.
7379 *
7380 * This is called by kqueue_scan if neither KEVENT_FLAG_POLL nor
7381 * KEVENT_FLAG_KERNEL was set, and the caller had to wait.
7382 */
7383 OS_NORETURN OS_NOINLINE
7384 static void
kqueue_scan_continue(void * data,wait_result_t wait_result)7385 kqueue_scan_continue(void *data, wait_result_t wait_result)
7386 {
7387 uthread_t ut = current_uthread();
7388 kevent_ctx_t kectx = &ut->uu_save.uus_kevent;
7389 int error = 0, flags = kectx->kec_process_flags;
7390 struct kqueue *kq = data;
7391
7392 /*
7393 * only kevent variants call in here, so we know the callback is
7394 * kevent_legacy_callback or kevent_modern_callback.
7395 */
7396 assert((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0);
7397
7398 switch (wait_result) {
7399 case THREAD_AWAKENED:
7400 if (__improbable(flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64))) {
7401 error = kqueue_scan(kq, flags, kectx, kevent_legacy_callback);
7402 } else {
7403 error = kqueue_scan(kq, flags, kectx, kevent_modern_callback);
7404 }
7405 break;
7406 case THREAD_TIMED_OUT:
7407 error = 0;
7408 break;
7409 case THREAD_INTERRUPTED:
7410 error = EINTR;
7411 break;
7412 case THREAD_RESTART:
7413 error = EBADF;
7414 break;
7415 default:
7416 panic("%s: - invalid wait_result (%d)", __func__, wait_result);
7417 }
7418
7419
7420 error = kevent_cleanup(kq, flags, error, kectx);
7421 *(int32_t *)&ut->uu_rval = kectx->kec_process_noutputs;
7422 unix_syscall_return(error);
7423 }
7424
7425 /*!
7426 * @function kqueue_scan
7427 *
7428 * @brief
7429 * Scan and wait for events in a kqueue (used by poll & kevent).
7430 *
7431 * @discussion
7432 * Process the triggered events in a kqueue.
7433 *
7434 * If there are no events triggered arrange to wait for them:
7435 * - unless KEVENT_FLAG_IMMEDIATE is set in kectx->kec_process_flags
7436 * - possibly until kectx->kec_deadline expires
7437 *
7438 * When it waits, and that neither KEVENT_FLAG_POLL nor KEVENT_FLAG_KERNEL
7439 * are set, then it will wait in the kqueue_scan_continue continuation.
7440 *
7441 * poll() will block in place, and KEVENT_FLAG_KERNEL calls
7442 * all pass KEVENT_FLAG_IMMEDIATE and will not wait.
7443 *
7444 * @param kqu
7445 * The kqueue being scanned.
7446 *
7447 * @param flags
7448 * The KEVENT_FLAG_* flags for this call.
7449 *
7450 * @param kectx
7451 * The context used for this scan.
7452 * The uthread_t::uu_save.uus_kevent storage is used for this purpose.
7453 *
7454 * @param callback
7455 * The callback to be called on events sucessfully processed.
7456 * (Either kevent_legacy_callback, kevent_modern_callback or poll_callback)
7457 */
7458 int
kqueue_scan(kqueue_t kqu,int flags,kevent_ctx_t kectx,kevent_callback_t callback)7459 kqueue_scan(kqueue_t kqu, int flags, kevent_ctx_t kectx,
7460 kevent_callback_t callback)
7461 {
7462 int error;
7463
7464 for (;;) {
7465 kqlock(kqu);
7466 error = kqueue_process(kqu, flags, kectx, callback);
7467
7468 /*
7469 * If we got an error, events returned (EWOULDBLOCK)
7470 * or blocking was disallowed (KEVENT_FLAG_IMMEDIATE),
7471 * just return.
7472 */
7473 if (__probable(error || (flags & KEVENT_FLAG_IMMEDIATE))) {
7474 kqunlock(kqu);
7475 return error == EWOULDBLOCK ? 0 : error;
7476 }
7477
7478 assert((kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
7479
7480 kqu.kqf->kqf_state |= KQ_SLEEP;
7481 assert_wait_deadline(&kqu.kqf->kqf_count, THREAD_ABORTSAFE,
7482 kectx->kec_deadline);
7483 kqunlock(kqu);
7484
7485 if (__probable((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0)) {
7486 thread_block_parameter(kqueue_scan_continue, kqu.kqf);
7487 __builtin_unreachable();
7488 }
7489
7490 wait_result_t wr = thread_block(THREAD_CONTINUE_NULL);
7491 switch (wr) {
7492 case THREAD_AWAKENED:
7493 break;
7494 case THREAD_TIMED_OUT:
7495 return 0;
7496 case THREAD_INTERRUPTED:
7497 return EINTR;
7498 case THREAD_RESTART:
7499 return EBADF;
7500 default:
7501 panic("%s: - bad wait_result (%d)", __func__, wr);
7502 }
7503 }
7504 }
7505
7506 /*!
7507 * @function kevent_internal
7508 *
7509 * @brief
7510 * Common kevent code.
7511 *
7512 * @discussion
7513 * Needs to be inlined to specialize for legacy or modern and
7514 * eliminate dead code.
7515 *
7516 * This is the core logic of kevent entry points, that will:
7517 * - register kevents
7518 * - optionally scan the kqueue for events
7519 *
7520 * The caller is giving kevent_internal a reference on the kqueue
7521 * or its fileproc that needs to be cleaned up by kevent_cleanup().
7522 */
7523 OS_ALWAYS_INLINE
7524 static inline int
kevent_internal(kqueue_t kqu,user_addr_t changelist,int nchanges,user_addr_t ueventlist,int nevents,int flags,kevent_ctx_t kectx,int32_t * retval,bool legacy)7525 kevent_internal(kqueue_t kqu,
7526 user_addr_t changelist, int nchanges,
7527 user_addr_t ueventlist, int nevents,
7528 int flags, kevent_ctx_t kectx, int32_t *retval,
7529 bool legacy)
7530 {
7531 int error = 0, noutputs = 0, register_rc;
7532
7533 /* only bound threads can receive events on workloops */
7534 if (!legacy && (flags & KEVENT_FLAG_WORKLOOP)) {
7535 #if CONFIG_WORKLOOP_DEBUG
7536 UU_KEVENT_HISTORY_WRITE_ENTRY(current_uthread(), {
7537 .uu_kqid = kqu.kqwl->kqwl_dynamicid,
7538 .uu_kq = error ? NULL : kqu.kq,
7539 .uu_error = error,
7540 .uu_nchanges = nchanges,
7541 .uu_nevents = nevents,
7542 .uu_flags = flags,
7543 });
7544 #endif // CONFIG_WORKLOOP_DEBUG
7545
7546 if (flags & KEVENT_FLAG_KERNEL) {
7547 /* see kevent_workq_internal */
7548 error = copyout(&kqu.kqwl->kqwl_dynamicid,
7549 ueventlist - sizeof(kqueue_id_t), sizeof(kqueue_id_t));
7550 kectx->kec_data_resid -= sizeof(kqueue_id_t);
7551 if (__improbable(error)) {
7552 goto out;
7553 }
7554 }
7555
7556 if (kevent_args_requesting_events(flags, nevents)) {
7557 /*
7558 * Disable the R2K notification while doing a register, if the
7559 * caller wants events too, we don't want the AST to be set if we
7560 * will process these events soon.
7561 */
7562 kqlock(kqu);
7563 kqu.kq->kq_state &= ~KQ_R2K_ARMED;
7564 kqunlock(kqu);
7565 flags |= KEVENT_FLAG_NEEDS_END_PROCESSING;
7566 }
7567 }
7568
7569 /* register all the change requests the user provided... */
7570 while (nchanges > 0 && error == 0) {
7571 struct kevent_qos_s kev;
7572 struct knote *kn = NULL;
7573
7574 if (legacy) {
7575 error = kevent_legacy_copyin(&changelist, &kev, flags);
7576 } else {
7577 error = kevent_modern_copyin(&changelist, &kev);
7578 }
7579 if (error) {
7580 break;
7581 }
7582
7583 register_rc = kevent_register(kqu.kq, &kev, &kn);
7584 if (__improbable(!legacy && (register_rc & FILTER_REGISTER_WAIT))) {
7585 thread_t thread = current_thread();
7586
7587 kqlock_held(kqu);
7588
7589 if (act_clear_astkevent(thread, AST_KEVENT_REDRIVE_THREADREQ)) {
7590 workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
7591 }
7592
7593 // f_post_register_wait is meant to call a continuation and not to
7594 // return, which is why we don't support FILTER_REGISTER_WAIT if
7595 // KEVENT_FLAG_ERROR_EVENTS is not passed, or if the event that
7596 // waits isn't the last.
7597 //
7598 // It is implementable, but not used by any userspace code at the
7599 // moment, so for now return ENOTSUP if someone tries to do it.
7600 if (nchanges == 1 && noutputs < nevents &&
7601 (flags & KEVENT_FLAG_KERNEL) == 0 &&
7602 (flags & KEVENT_FLAG_PARKING) == 0 &&
7603 (flags & KEVENT_FLAG_ERROR_EVENTS) &&
7604 (flags & KEVENT_FLAG_WORKLOOP)) {
7605 uthread_t ut = get_bsdthread_info(thread);
7606
7607 /*
7608 * store the continuation/completion data in the uthread
7609 *
7610 * Note: the kectx aliases with this,
7611 * and is destroyed in the process.
7612 */
7613 ut->uu_save.uus_kevent_register = (struct _kevent_register){
7614 .kev = kev,
7615 .kqwl = kqu.kqwl,
7616 .eventout = noutputs,
7617 .ueventlist = ueventlist,
7618 };
7619 knote_fops(kn)->f_post_register_wait(ut, kn,
7620 &ut->uu_save.uus_kevent_register);
7621 __builtin_unreachable();
7622 }
7623 kqunlock(kqu);
7624
7625 kev.flags |= EV_ERROR;
7626 kev.data = ENOTSUP;
7627 } else {
7628 assert((register_rc & FILTER_REGISTER_WAIT) == 0);
7629 }
7630
7631 // keep in sync with kevent_register_wait_return()
7632 if (noutputs < nevents && (kev.flags & (EV_ERROR | EV_RECEIPT))) {
7633 if ((kev.flags & EV_ERROR) == 0) {
7634 kev.flags |= EV_ERROR;
7635 kev.data = 0;
7636 }
7637 if (legacy) {
7638 error = kevent_legacy_copyout(&kev, &ueventlist, flags);
7639 } else {
7640 error = kevent_modern_copyout(&kev, &ueventlist);
7641 }
7642 if (error == 0) {
7643 noutputs++;
7644 }
7645 } else if (kev.flags & EV_ERROR) {
7646 error = (int)kev.data;
7647 }
7648 nchanges--;
7649 }
7650
7651 if ((flags & KEVENT_FLAG_ERROR_EVENTS) == 0 &&
7652 nevents > 0 && noutputs == 0 && error == 0) {
7653 kectx->kec_process_flags = flags;
7654 kectx->kec_process_nevents = nevents;
7655 kectx->kec_process_noutputs = 0;
7656 kectx->kec_process_eventlist = ueventlist;
7657
7658 if (legacy) {
7659 error = kqueue_scan(kqu.kq, flags, kectx, kevent_legacy_callback);
7660 } else {
7661 error = kqueue_scan(kqu.kq, flags, kectx, kevent_modern_callback);
7662 }
7663
7664 noutputs = kectx->kec_process_noutputs;
7665 } else if (!legacy && (flags & KEVENT_FLAG_NEEDS_END_PROCESSING)) {
7666 /*
7667 * If we didn't through kqworkloop_end_processing(),
7668 * we need to do it here.
7669 *
7670 * kqueue_scan will call kqworkloop_end_processing(),
7671 * so we only need to do it if we didn't scan.
7672 */
7673 kqlock(kqu);
7674 kqworkloop_end_processing(kqu.kqwl, 0, 0);
7675 kqunlock(kqu);
7676 }
7677
7678 *retval = noutputs;
7679 out:
7680 return kevent_cleanup(kqu.kq, flags, error, kectx);
7681 }
7682
7683 #pragma mark modern syscalls: kevent_qos, kevent_id, kevent_workq_internal
7684
7685 /*!
7686 * @function kevent_modern_internal
7687 *
7688 * @brief
7689 * The backend of the kevent_id and kevent_workq_internal entry points.
7690 *
7691 * @discussion
7692 * Needs to be inline due to the number of arguments.
7693 */
7694 OS_NOINLINE
7695 static int
kevent_modern_internal(kqueue_t kqu,user_addr_t changelist,int nchanges,user_addr_t ueventlist,int nevents,int flags,kevent_ctx_t kectx,int32_t * retval)7696 kevent_modern_internal(kqueue_t kqu,
7697 user_addr_t changelist, int nchanges,
7698 user_addr_t ueventlist, int nevents,
7699 int flags, kevent_ctx_t kectx, int32_t *retval)
7700 {
7701 return kevent_internal(kqu.kq, changelist, nchanges,
7702 ueventlist, nevents, flags, kectx, retval, /*legacy*/ false);
7703 }
7704
7705 /*!
7706 * @function kevent_id
7707 *
7708 * @brief
7709 * The kevent_id() syscall.
7710 */
7711 int
kevent_id(struct proc * p,struct kevent_id_args * uap,int32_t * retval)7712 kevent_id(struct proc *p, struct kevent_id_args *uap, int32_t *retval)
7713 {
7714 int error, flags = uap->flags & KEVENT_FLAG_USER;
7715 uthread_t uth = current_uthread();
7716 workq_threadreq_t kqr = uth->uu_kqr_bound;
7717 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7718 kqueue_t kqu;
7719
7720 flags = kevent_adjust_flags_for_proc(p, flags);
7721 flags |= KEVENT_FLAG_DYNAMIC_KQUEUE;
7722
7723 if (__improbable((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP)) !=
7724 KEVENT_FLAG_WORKLOOP)) {
7725 return EINVAL;
7726 }
7727
7728 error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
7729 if (__improbable(error)) {
7730 return error;
7731 }
7732
7733 kectx->kec_deadline = 0;
7734 kectx->kec_fp = NULL;
7735 kectx->kec_fd = -1;
7736 /* the kec_process_* fields are filled if kqueue_scann is called only */
7737
7738 /*
7739 * Get the kq we are going to be working on
7740 * As a fastpath, look at the currently bound workloop.
7741 */
7742 kqu.kqwl = kqr ? kqr_kqworkloop(kqr) : NULL;
7743 if (kqu.kqwl && kqu.kqwl->kqwl_dynamicid == uap->id) {
7744 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
7745 return EEXIST;
7746 }
7747 kqworkloop_retain(kqu.kqwl);
7748 } else if (__improbable(kevent_args_requesting_events(flags, uap->nevents))) {
7749 return EXDEV;
7750 } else {
7751 error = kqworkloop_get_or_create(p, uap->id, NULL, flags, &kqu.kqwl);
7752 if (__improbable(error)) {
7753 return error;
7754 }
7755 }
7756
7757 return kevent_modern_internal(kqu, uap->changelist, uap->nchanges,
7758 uap->eventlist, uap->nevents, flags, kectx, retval);
7759 }
7760
7761 /**!
7762 * @function kevent_workq_internal
7763 *
7764 * @discussion
7765 * This function is exported for the sake of the workqueue subsystem.
7766 *
7767 * It is called in two ways:
7768 * - when a thread is about to go to userspace to ask for pending event
7769 * - when a thread is returning from userspace with events back
7770 *
7771 * the workqueue subsystem will only use the following flags:
7772 * - KEVENT_FLAG_STACK_DATA (always)
7773 * - KEVENT_FLAG_IMMEDIATE (always)
7774 * - KEVENT_FLAG_PARKING (depending on whether it is going to or returning from
7775 * userspace).
7776 *
7777 * It implicitly acts on the bound kqueue, and for the case of workloops
7778 * will copyout the kqueue ID before anything else.
7779 *
7780 *
7781 * Pthread will have setup the various arguments to fit this stack layout:
7782 *
7783 * +-------....----+--------------+-----------+--------------------+
7784 * | user stack | data avail | nevents | pthread_self() |
7785 * +-------....----+--------------+-----------+--------------------+
7786 * ^ ^
7787 * data_out eventlist
7788 *
7789 * When a workloop is used, the workloop ID is copied out right before
7790 * the eventlist and is taken from the data buffer.
7791 *
7792 * @warning
7793 * This function is carefuly tailored to not make any call except the final tail
7794 * call into kevent_modern_internal. (LTO inlines current_uthread()).
7795 *
7796 * This function is performance sensitive due to the workq subsystem.
7797 */
7798 int
kevent_workq_internal(struct proc * p,user_addr_t changelist,int nchanges,user_addr_t eventlist,int nevents,user_addr_t data_out,user_size_t * data_available,unsigned int flags,int32_t * retval)7799 kevent_workq_internal(struct proc *p,
7800 user_addr_t changelist, int nchanges,
7801 user_addr_t eventlist, int nevents,
7802 user_addr_t data_out, user_size_t *data_available,
7803 unsigned int flags, int32_t *retval)
7804 {
7805 uthread_t uth = current_uthread();
7806 workq_threadreq_t kqr = uth->uu_kqr_bound;
7807 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7808 kqueue_t kqu;
7809
7810 assert(flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE) ||
7811 flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_PARKING));
7812
7813 kectx->kec_data_out = data_out;
7814 kectx->kec_data_avail = (uint64_t)data_available;
7815 kectx->kec_data_size = *data_available;
7816 kectx->kec_data_resid = *data_available;
7817 kectx->kec_deadline = 0;
7818 kectx->kec_fp = NULL;
7819 kectx->kec_fd = -1;
7820 /* the kec_process_* fields are filled if kqueue_scann is called only */
7821
7822 flags = kevent_adjust_flags_for_proc(p, flags);
7823
7824 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
7825 kqu.kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
7826 kqworkloop_retain(kqu.kqwl);
7827
7828 flags |= KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_DYNAMIC_KQUEUE |
7829 KEVENT_FLAG_KERNEL;
7830 } else {
7831 kqu.kqwq = p->p_fd.fd_wqkqueue;
7832
7833 flags |= KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL;
7834 }
7835
7836 return kevent_modern_internal(kqu, changelist, nchanges,
7837 eventlist, nevents, flags, kectx, retval);
7838 }
7839
7840 /*!
7841 * @function kevent_qos
7842 *
7843 * @brief
7844 * The kevent_qos() syscall.
7845 */
7846 int
kevent_qos(struct proc * p,struct kevent_qos_args * uap,int32_t * retval)7847 kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
7848 {
7849 uthread_t uth = current_uthread();
7850 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7851 int error, flags = uap->flags & KEVENT_FLAG_USER;
7852 struct kqueue *kq;
7853
7854 if (__improbable(flags & KEVENT_ID_FLAG_USER)) {
7855 return EINVAL;
7856 }
7857
7858 flags = kevent_adjust_flags_for_proc(p, flags);
7859
7860 error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
7861 if (__improbable(error)) {
7862 return error;
7863 }
7864
7865 kectx->kec_deadline = 0;
7866 kectx->kec_fp = NULL;
7867 kectx->kec_fd = uap->fd;
7868 /* the kec_process_* fields are filled if kqueue_scann is called only */
7869
7870 /* get the kq we are going to be working on */
7871 if (__probable(flags & KEVENT_FLAG_WORKQ)) {
7872 error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
7873 } else {
7874 error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
7875 }
7876 if (__improbable(error)) {
7877 return error;
7878 }
7879
7880 return kevent_modern_internal(kq, uap->changelist, uap->nchanges,
7881 uap->eventlist, uap->nevents, flags, kectx, retval);
7882 }
7883
7884 #pragma mark legacy syscalls: kevent, kevent64
7885
7886 /*!
7887 * @function kevent_legacy_get_deadline
7888 *
7889 * @brief
7890 * Compute the deadline for the legacy kevent syscalls.
7891 *
7892 * @discussion
7893 * This is not necessary if KEVENT_FLAG_IMMEDIATE is specified,
7894 * as this takes precedence over the deadline.
7895 *
7896 * This function will fail if utimeout is USER_ADDR_NULL
7897 * (the caller should check).
7898 */
7899 static int
kevent_legacy_get_deadline(int flags,user_addr_t utimeout,uint64_t * deadline)7900 kevent_legacy_get_deadline(int flags, user_addr_t utimeout, uint64_t *deadline)
7901 {
7902 struct timespec ts;
7903
7904 if (flags & KEVENT_FLAG_PROC64) {
7905 struct user64_timespec ts64;
7906 int error = copyin(utimeout, &ts64, sizeof(ts64));
7907 if (__improbable(error)) {
7908 return error;
7909 }
7910 ts.tv_sec = (unsigned long)ts64.tv_sec;
7911 ts.tv_nsec = (long)ts64.tv_nsec;
7912 } else {
7913 struct user32_timespec ts32;
7914 int error = copyin(utimeout, &ts32, sizeof(ts32));
7915 if (__improbable(error)) {
7916 return error;
7917 }
7918 ts.tv_sec = ts32.tv_sec;
7919 ts.tv_nsec = ts32.tv_nsec;
7920 }
7921 if (!timespec_is_valid(&ts)) {
7922 return EINVAL;
7923 }
7924
7925 clock_absolutetime_interval_to_deadline(tstoabstime(&ts), deadline);
7926 return 0;
7927 }
7928
7929 /*!
7930 * @function kevent_legacy_internal
7931 *
7932 * @brief
7933 * The core implementation for kevent and kevent64
7934 */
7935 OS_NOINLINE
7936 static int
kevent_legacy_internal(struct proc * p,struct kevent64_args * uap,int32_t * retval,int flags)7937 kevent_legacy_internal(struct proc *p, struct kevent64_args *uap,
7938 int32_t *retval, int flags)
7939 {
7940 uthread_t uth = current_uthread();
7941 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7942 struct kqueue *kq;
7943 int error;
7944
7945 if (__improbable(uap->flags & KEVENT_ID_FLAG_USER)) {
7946 return EINVAL;
7947 }
7948
7949 flags = kevent_adjust_flags_for_proc(p, flags);
7950
7951 kectx->kec_data_out = 0;
7952 kectx->kec_data_avail = 0;
7953 kectx->kec_data_size = 0;
7954 kectx->kec_data_resid = 0;
7955 kectx->kec_deadline = 0;
7956 kectx->kec_fp = NULL;
7957 kectx->kec_fd = uap->fd;
7958 /* the kec_process_* fields are filled if kqueue_scann is called only */
7959
7960 /* convert timeout to absolute - if we have one (and not immediate) */
7961 if (__improbable(uap->timeout && !(flags & KEVENT_FLAG_IMMEDIATE))) {
7962 error = kevent_legacy_get_deadline(flags, uap->timeout,
7963 &kectx->kec_deadline);
7964 if (__improbable(error)) {
7965 return error;
7966 }
7967 }
7968
7969 /* get the kq we are going to be working on */
7970 if (flags & KEVENT_FLAG_WORKQ) {
7971 error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
7972 } else {
7973 error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
7974 }
7975 if (__improbable(error)) {
7976 return error;
7977 }
7978
7979 return kevent_internal(kq, uap->changelist, uap->nchanges,
7980 uap->eventlist, uap->nevents, flags, kectx, retval,
7981 /*legacy*/ true);
7982 }
7983
7984 /*!
7985 * @function kevent
7986 *
7987 * @brief
7988 * The legacy kevent() syscall.
7989 */
7990 int
kevent(struct proc * p,struct kevent_args * uap,int32_t * retval)7991 kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
7992 {
7993 struct kevent64_args args = {
7994 .fd = uap->fd,
7995 .changelist = uap->changelist,
7996 .nchanges = uap->nchanges,
7997 .eventlist = uap->eventlist,
7998 .nevents = uap->nevents,
7999 .timeout = uap->timeout,
8000 };
8001
8002 return kevent_legacy_internal(p, &args, retval, KEVENT_FLAG_LEGACY32);
8003 }
8004
8005 /*!
8006 * @function kevent64
8007 *
8008 * @brief
8009 * The legacy kevent64() syscall.
8010 */
8011 int
kevent64(struct proc * p,struct kevent64_args * uap,int32_t * retval)8012 kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
8013 {
8014 int flags = (uap->flags & KEVENT_FLAG_USER) | KEVENT_FLAG_LEGACY64;
8015 return kevent_legacy_internal(p, uap, retval, flags);
8016 }
8017
8018 #pragma mark - socket interface
8019
8020 #if SOCKETS
8021 #include <sys/param.h>
8022 #include <sys/socket.h>
8023 #include <sys/protosw.h>
8024 #include <sys/domain.h>
8025 #include <sys/mbuf.h>
8026 #include <sys/kern_event.h>
8027 #include <sys/malloc.h>
8028 #include <sys/sys_domain.h>
8029 #include <sys/syslog.h>
8030
8031 #ifndef ROUNDUP64
8032 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
8033 #endif
8034
8035 #ifndef ADVANCE64
8036 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
8037 #endif
8038
8039 static LCK_GRP_DECLARE(kev_lck_grp, "Kernel Event Protocol");
8040 static LCK_RW_DECLARE(kev_rwlock, &kev_lck_grp);
8041
8042 static int kev_attach(struct socket *so, int proto, struct proc *p);
8043 static int kev_detach(struct socket *so);
8044 static int kev_control(struct socket *so, u_long cmd, caddr_t data,
8045 struct ifnet *ifp, struct proc *p);
8046 static lck_mtx_t * event_getlock(struct socket *, int);
8047 static int event_lock(struct socket *, int, void *);
8048 static int event_unlock(struct socket *, int, void *);
8049
8050 static int event_sofreelastref(struct socket *);
8051 static void kev_delete(struct kern_event_pcb *);
8052
8053 static struct pr_usrreqs event_usrreqs = {
8054 .pru_attach = kev_attach,
8055 .pru_control = kev_control,
8056 .pru_detach = kev_detach,
8057 .pru_soreceive = soreceive,
8058 };
8059
8060 static struct protosw eventsw[] = {
8061 {
8062 .pr_type = SOCK_RAW,
8063 .pr_protocol = SYSPROTO_EVENT,
8064 .pr_flags = PR_ATOMIC,
8065 .pr_usrreqs = &event_usrreqs,
8066 .pr_lock = event_lock,
8067 .pr_unlock = event_unlock,
8068 .pr_getlock = event_getlock,
8069 }
8070 };
8071
8072 __private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
8073 __private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
8074
8075 SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
8076 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel event family");
8077
8078 struct kevtstat kevtstat;
8079 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
8080 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8081 kevt_getstat, "S,kevtstat", "");
8082
8083 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
8084 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8085 kevt_pcblist, "S,xkevtpcb", "");
8086
8087 static lck_mtx_t *
event_getlock(struct socket * so,int flags)8088 event_getlock(struct socket *so, int flags)
8089 {
8090 #pragma unused(flags)
8091 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
8092
8093 if (so->so_pcb != NULL) {
8094 if (so->so_usecount < 0) {
8095 panic("%s: so=%p usecount=%d lrh= %s", __func__,
8096 so, so->so_usecount, solockhistory_nr(so));
8097 }
8098 /* NOTREACHED */
8099 } else {
8100 panic("%s: so=%p NULL NO so_pcb %s", __func__,
8101 so, solockhistory_nr(so));
8102 /* NOTREACHED */
8103 }
8104 return &ev_pcb->evp_mtx;
8105 }
8106
8107 static int
event_lock(struct socket * so,int refcount,void * lr)8108 event_lock(struct socket *so, int refcount, void *lr)
8109 {
8110 void *lr_saved;
8111
8112 if (lr == NULL) {
8113 lr_saved = __builtin_return_address(0);
8114 } else {
8115 lr_saved = lr;
8116 }
8117
8118 if (so->so_pcb != NULL) {
8119 lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
8120 } else {
8121 panic("%s: so=%p NO PCB! lr=%p lrh= %s", __func__,
8122 so, lr_saved, solockhistory_nr(so));
8123 /* NOTREACHED */
8124 }
8125
8126 if (so->so_usecount < 0) {
8127 panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s", __func__,
8128 so, so->so_pcb, lr_saved, so->so_usecount,
8129 solockhistory_nr(so));
8130 /* NOTREACHED */
8131 }
8132
8133 if (refcount) {
8134 so->so_usecount++;
8135 }
8136
8137 so->lock_lr[so->next_lock_lr] = lr_saved;
8138 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
8139 return 0;
8140 }
8141
8142 static int
event_unlock(struct socket * so,int refcount,void * lr)8143 event_unlock(struct socket *so, int refcount, void *lr)
8144 {
8145 void *lr_saved;
8146 lck_mtx_t *mutex_held;
8147
8148 if (lr == NULL) {
8149 lr_saved = __builtin_return_address(0);
8150 } else {
8151 lr_saved = lr;
8152 }
8153
8154 if (refcount) {
8155 so->so_usecount--;
8156 }
8157 if (so->so_usecount < 0) {
8158 panic("%s: so=%p usecount=%d lrh= %s", __func__,
8159 so, so->so_usecount, solockhistory_nr(so));
8160 /* NOTREACHED */
8161 }
8162 if (so->so_pcb == NULL) {
8163 panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s", __func__,
8164 so, so->so_usecount, (void *)lr_saved,
8165 solockhistory_nr(so));
8166 /* NOTREACHED */
8167 }
8168 mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
8169
8170 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
8171 so->unlock_lr[so->next_unlock_lr] = lr_saved;
8172 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
8173
8174 if (so->so_usecount == 0) {
8175 VERIFY(so->so_flags & SOF_PCBCLEARING);
8176 event_sofreelastref(so);
8177 } else {
8178 lck_mtx_unlock(mutex_held);
8179 }
8180
8181 return 0;
8182 }
8183
8184 static int
event_sofreelastref(struct socket * so)8185 event_sofreelastref(struct socket *so)
8186 {
8187 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
8188
8189 LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
8190
8191 so->so_pcb = NULL;
8192
8193 /*
8194 * Disable upcall in the event another thread is in kev_post_msg()
8195 * appending record to the receive socket buffer, since sbwakeup()
8196 * may release the socket lock otherwise.
8197 */
8198 so->so_rcv.sb_flags &= ~SB_UPCALL;
8199 so->so_snd.sb_flags &= ~SB_UPCALL;
8200 so->so_event = sonullevent;
8201 lck_mtx_unlock(&(ev_pcb->evp_mtx));
8202
8203 LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
8204 lck_rw_lock_exclusive(&kev_rwlock);
8205 LIST_REMOVE(ev_pcb, evp_link);
8206 kevtstat.kes_pcbcount--;
8207 kevtstat.kes_gencnt++;
8208 lck_rw_done(&kev_rwlock);
8209 kev_delete(ev_pcb);
8210
8211 sofreelastref(so, 1);
8212 return 0;
8213 }
8214
8215 static int event_proto_count = (sizeof(eventsw) / sizeof(struct protosw));
8216
8217 static
8218 struct kern_event_head kern_event_head;
8219
8220 static u_int32_t static_event_id = 0;
8221
8222 static KALLOC_TYPE_DEFINE(ev_pcb_zone, struct kern_event_pcb, NET_KT_DEFAULT);
8223
8224 /*
8225 * Install the protosw's for the NKE manager. Invoked at extension load time
8226 */
8227 void
kern_event_init(struct domain * dp)8228 kern_event_init(struct domain *dp)
8229 {
8230 struct protosw *pr;
8231 int i;
8232
8233 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
8234 VERIFY(dp == systemdomain);
8235
8236 for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++) {
8237 net_add_proto(pr, dp, 1);
8238 }
8239 }
8240
8241 static int
kev_attach(struct socket * so,__unused int proto,__unused struct proc * p)8242 kev_attach(struct socket *so, __unused int proto, __unused struct proc *p)
8243 {
8244 int error = 0;
8245 struct kern_event_pcb *ev_pcb;
8246
8247 error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
8248 if (error != 0) {
8249 return error;
8250 }
8251
8252 ev_pcb = zalloc_flags(ev_pcb_zone, Z_WAITOK | Z_ZERO);
8253 lck_mtx_init(&ev_pcb->evp_mtx, &kev_lck_grp, LCK_ATTR_NULL);
8254
8255 ev_pcb->evp_socket = so;
8256 ev_pcb->evp_vendor_code_filter = 0xffffffff;
8257
8258 so->so_pcb = (caddr_t) ev_pcb;
8259 lck_rw_lock_exclusive(&kev_rwlock);
8260 LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link);
8261 kevtstat.kes_pcbcount++;
8262 kevtstat.kes_gencnt++;
8263 lck_rw_done(&kev_rwlock);
8264
8265 return error;
8266 }
8267
8268 static void
kev_delete(struct kern_event_pcb * ev_pcb)8269 kev_delete(struct kern_event_pcb *ev_pcb)
8270 {
8271 VERIFY(ev_pcb != NULL);
8272 lck_mtx_destroy(&ev_pcb->evp_mtx, &kev_lck_grp);
8273 zfree(ev_pcb_zone, ev_pcb);
8274 }
8275
8276 static int
kev_detach(struct socket * so)8277 kev_detach(struct socket *so)
8278 {
8279 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8280
8281 if (ev_pcb != NULL) {
8282 soisdisconnected(so);
8283 so->so_flags |= SOF_PCBCLEARING;
8284 }
8285
8286 return 0;
8287 }
8288
8289 /*
8290 * For now, kev_vendor_code and mbuf_tags use the same
8291 * mechanism.
8292 */
8293 errno_t
kev_vendor_code_find(const char * string,u_int32_t * out_vendor_code)8294 kev_vendor_code_find(
8295 const char *string,
8296 u_int32_t *out_vendor_code)
8297 {
8298 if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
8299 return EINVAL;
8300 }
8301 return net_str_id_find_internal(string, out_vendor_code,
8302 NSI_VENDOR_CODE, 1);
8303 }
8304
8305 errno_t
kev_msg_post(struct kev_msg * event_msg)8306 kev_msg_post(struct kev_msg *event_msg)
8307 {
8308 mbuf_tag_id_t min_vendor, max_vendor;
8309
8310 net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
8311
8312 if (event_msg == NULL) {
8313 return EINVAL;
8314 }
8315
8316 /*
8317 * Limit third parties to posting events for registered vendor codes
8318 * only
8319 */
8320 if (event_msg->vendor_code < min_vendor ||
8321 event_msg->vendor_code > max_vendor) {
8322 os_atomic_inc(&kevtstat.kes_badvendor, relaxed);
8323 return EINVAL;
8324 }
8325 return kev_post_msg(event_msg);
8326 }
8327
8328 static int
kev_post_msg_internal(struct kev_msg * event_msg,int wait)8329 kev_post_msg_internal(struct kev_msg *event_msg, int wait)
8330 {
8331 struct mbuf *m, *m2;
8332 struct kern_event_pcb *ev_pcb;
8333 struct kern_event_msg *ev;
8334 char *tmp;
8335 u_int32_t total_size;
8336 int i;
8337
8338 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
8339 /*
8340 * Special hook for ALF state updates
8341 */
8342 if (event_msg->vendor_code == KEV_VENDOR_APPLE &&
8343 event_msg->kev_class == KEV_NKE_CLASS &&
8344 event_msg->kev_subclass == KEV_NKE_ALF_SUBCLASS &&
8345 event_msg->event_code == KEV_NKE_ALF_STATE_CHANGED) {
8346 #if (DEBUG || DEVELOPMENT)
8347 os_log_info(OS_LOG_DEFAULT, "KEV_NKE_ALF_STATE_CHANGED posted");
8348 #endif /* DEBUG || DEVELOPMENT */
8349 net_filter_event_mark(NET_FILTER_EVENT_ALF,
8350 net_check_compatible_alf());
8351 }
8352 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
8353
8354 /* Verify the message is small enough to fit in one mbuf w/o cluster */
8355 total_size = KEV_MSG_HEADER_SIZE;
8356
8357 for (i = 0; i < 5; i++) {
8358 if (event_msg->dv[i].data_length == 0) {
8359 break;
8360 }
8361 total_size += event_msg->dv[i].data_length;
8362 }
8363
8364 if (total_size > MLEN) {
8365 os_atomic_inc(&kevtstat.kes_toobig, relaxed);
8366 return EMSGSIZE;
8367 }
8368
8369 m = m_get(wait, MT_DATA);
8370 if (m == 0) {
8371 os_atomic_inc(&kevtstat.kes_nomem, relaxed);
8372 return ENOMEM;
8373 }
8374 ev = mtod(m, struct kern_event_msg *);
8375 total_size = KEV_MSG_HEADER_SIZE;
8376
8377 tmp = (char *) &ev->event_data[0];
8378 for (i = 0; i < 5; i++) {
8379 if (event_msg->dv[i].data_length == 0) {
8380 break;
8381 }
8382
8383 total_size += event_msg->dv[i].data_length;
8384 bcopy(event_msg->dv[i].data_ptr, tmp,
8385 event_msg->dv[i].data_length);
8386 tmp += event_msg->dv[i].data_length;
8387 }
8388
8389 ev->id = ++static_event_id;
8390 ev->total_size = total_size;
8391 ev->vendor_code = event_msg->vendor_code;
8392 ev->kev_class = event_msg->kev_class;
8393 ev->kev_subclass = event_msg->kev_subclass;
8394 ev->event_code = event_msg->event_code;
8395
8396 m->m_len = total_size;
8397 lck_rw_lock_shared(&kev_rwlock);
8398 for (ev_pcb = LIST_FIRST(&kern_event_head);
8399 ev_pcb;
8400 ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
8401 lck_mtx_lock(&ev_pcb->evp_mtx);
8402 if (ev_pcb->evp_socket->so_pcb == NULL) {
8403 lck_mtx_unlock(&ev_pcb->evp_mtx);
8404 continue;
8405 }
8406 if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) {
8407 if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) {
8408 lck_mtx_unlock(&ev_pcb->evp_mtx);
8409 continue;
8410 }
8411
8412 if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) {
8413 if (ev_pcb->evp_class_filter != ev->kev_class) {
8414 lck_mtx_unlock(&ev_pcb->evp_mtx);
8415 continue;
8416 }
8417
8418 if ((ev_pcb->evp_subclass_filter !=
8419 KEV_ANY_SUBCLASS) &&
8420 (ev_pcb->evp_subclass_filter !=
8421 ev->kev_subclass)) {
8422 lck_mtx_unlock(&ev_pcb->evp_mtx);
8423 continue;
8424 }
8425 }
8426 }
8427
8428 m2 = m_copym(m, 0, m->m_len, wait);
8429 if (m2 == 0) {
8430 os_atomic_inc(&kevtstat.kes_nomem, relaxed);
8431 m_free(m);
8432 lck_mtx_unlock(&ev_pcb->evp_mtx);
8433 lck_rw_done(&kev_rwlock);
8434 return ENOMEM;
8435 }
8436 if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) {
8437 /*
8438 * We use "m" for the socket stats as it would be
8439 * unsafe to use "m2"
8440 */
8441 so_inc_recv_data_stat(ev_pcb->evp_socket,
8442 1, m->m_len, MBUF_TC_BE);
8443
8444 sorwakeup(ev_pcb->evp_socket);
8445 os_atomic_inc(&kevtstat.kes_posted, relaxed);
8446 } else {
8447 os_atomic_inc(&kevtstat.kes_fullsock, relaxed);
8448 }
8449 lck_mtx_unlock(&ev_pcb->evp_mtx);
8450 }
8451 m_free(m);
8452 lck_rw_done(&kev_rwlock);
8453
8454 return 0;
8455 }
8456
8457 int
kev_post_msg(struct kev_msg * event_msg)8458 kev_post_msg(struct kev_msg *event_msg)
8459 {
8460 return kev_post_msg_internal(event_msg, M_WAIT);
8461 }
8462
8463 int
kev_post_msg_nowait(struct kev_msg * event_msg)8464 kev_post_msg_nowait(struct kev_msg *event_msg)
8465 {
8466 return kev_post_msg_internal(event_msg, M_NOWAIT);
8467 }
8468
8469 static int
kev_control(struct socket * so,u_long cmd,caddr_t data,__unused struct ifnet * ifp,__unused struct proc * p)8470 kev_control(struct socket *so,
8471 u_long cmd,
8472 caddr_t data,
8473 __unused struct ifnet *ifp,
8474 __unused struct proc *p)
8475 {
8476 struct kev_request *kev_req = (struct kev_request *) data;
8477 struct kern_event_pcb *ev_pcb;
8478 struct kev_vendor_code *kev_vendor;
8479 u_int32_t *id_value = (u_int32_t *) data;
8480
8481 switch (cmd) {
8482 case SIOCGKEVID:
8483 *id_value = static_event_id;
8484 break;
8485 case SIOCSKEVFILT:
8486 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8487 ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
8488 ev_pcb->evp_class_filter = kev_req->kev_class;
8489 ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
8490 break;
8491 case SIOCGKEVFILT:
8492 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8493 kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
8494 kev_req->kev_class = ev_pcb->evp_class_filter;
8495 kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
8496 break;
8497 case SIOCGKEVVENDOR:
8498 kev_vendor = (struct kev_vendor_code *)data;
8499 /* Make sure string is NULL terminated */
8500 kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN - 1] = 0;
8501 return net_str_id_find_internal(kev_vendor->vendor_string,
8502 &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0);
8503 default:
8504 return ENOTSUP;
8505 }
8506
8507 return 0;
8508 }
8509
8510 int
8511 kevt_getstat SYSCTL_HANDLER_ARGS
8512 {
8513 #pragma unused(oidp, arg1, arg2)
8514 int error = 0;
8515
8516 lck_rw_lock_shared(&kev_rwlock);
8517
8518 if (req->newptr != USER_ADDR_NULL) {
8519 error = EPERM;
8520 goto done;
8521 }
8522 if (req->oldptr == USER_ADDR_NULL) {
8523 req->oldidx = sizeof(struct kevtstat);
8524 goto done;
8525 }
8526
8527 error = SYSCTL_OUT(req, &kevtstat,
8528 MIN(sizeof(struct kevtstat), req->oldlen));
8529 done:
8530 lck_rw_done(&kev_rwlock);
8531
8532 return error;
8533 }
8534
8535 __private_extern__ int
8536 kevt_pcblist SYSCTL_HANDLER_ARGS
8537 {
8538 #pragma unused(oidp, arg1, arg2)
8539 int error = 0;
8540 uint64_t n, i;
8541 struct xsystmgen xsg;
8542 void *buf = NULL;
8543 size_t item_size = ROUNDUP64(sizeof(struct xkevtpcb)) +
8544 ROUNDUP64(sizeof(struct xsocket_n)) +
8545 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
8546 ROUNDUP64(sizeof(struct xsockstat_n));
8547 struct kern_event_pcb *ev_pcb;
8548
8549 buf = kalloc_data(item_size, Z_WAITOK | Z_ZERO);
8550 if (buf == NULL) {
8551 return ENOMEM;
8552 }
8553
8554 lck_rw_lock_shared(&kev_rwlock);
8555
8556 n = kevtstat.kes_pcbcount;
8557
8558 if (req->oldptr == USER_ADDR_NULL) {
8559 req->oldidx = (size_t) ((n + n / 8) * item_size);
8560 goto done;
8561 }
8562 if (req->newptr != USER_ADDR_NULL) {
8563 error = EPERM;
8564 goto done;
8565 }
8566 bzero(&xsg, sizeof(xsg));
8567 xsg.xg_len = sizeof(xsg);
8568 xsg.xg_count = n;
8569 xsg.xg_gen = kevtstat.kes_gencnt;
8570 xsg.xg_sogen = so_gencnt;
8571 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
8572 if (error) {
8573 goto done;
8574 }
8575 /*
8576 * We are done if there is no pcb
8577 */
8578 if (n == 0) {
8579 goto done;
8580 }
8581
8582 i = 0;
8583 for (i = 0, ev_pcb = LIST_FIRST(&kern_event_head);
8584 i < n && ev_pcb != NULL;
8585 i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
8586 struct xkevtpcb *xk = (struct xkevtpcb *)buf;
8587 struct xsocket_n *xso = (struct xsocket_n *)
8588 ADVANCE64(xk, sizeof(*xk));
8589 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
8590 ADVANCE64(xso, sizeof(*xso));
8591 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
8592 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
8593 struct xsockstat_n *xsostats = (struct xsockstat_n *)
8594 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
8595
8596 bzero(buf, item_size);
8597
8598 lck_mtx_lock(&ev_pcb->evp_mtx);
8599
8600 xk->kep_len = sizeof(struct xkevtpcb);
8601 xk->kep_kind = XSO_EVT;
8602 xk->kep_evtpcb = (uint64_t)VM_KERNEL_ADDRPERM(ev_pcb);
8603 xk->kep_vendor_code_filter = ev_pcb->evp_vendor_code_filter;
8604 xk->kep_class_filter = ev_pcb->evp_class_filter;
8605 xk->kep_subclass_filter = ev_pcb->evp_subclass_filter;
8606
8607 sotoxsocket_n(ev_pcb->evp_socket, xso);
8608 sbtoxsockbuf_n(ev_pcb->evp_socket ?
8609 &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
8610 sbtoxsockbuf_n(ev_pcb->evp_socket ?
8611 &ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
8612 sbtoxsockstat_n(ev_pcb->evp_socket, xsostats);
8613
8614 lck_mtx_unlock(&ev_pcb->evp_mtx);
8615
8616 error = SYSCTL_OUT(req, buf, item_size);
8617 }
8618
8619 if (error == 0) {
8620 /*
8621 * Give the user an updated idea of our state.
8622 * If the generation differs from what we told
8623 * her before, she knows that something happened
8624 * while we were processing this request, and it
8625 * might be necessary to retry.
8626 */
8627 bzero(&xsg, sizeof(xsg));
8628 xsg.xg_len = sizeof(xsg);
8629 xsg.xg_count = n;
8630 xsg.xg_gen = kevtstat.kes_gencnt;
8631 xsg.xg_sogen = so_gencnt;
8632 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
8633 if (error) {
8634 goto done;
8635 }
8636 }
8637
8638 done:
8639 lck_rw_done(&kev_rwlock);
8640
8641 kfree_data(buf, item_size);
8642 return error;
8643 }
8644
8645 #endif /* SOCKETS */
8646
8647
8648 int
fill_kqueueinfo(kqueue_t kqu,struct kqueue_info * kinfo)8649 fill_kqueueinfo(kqueue_t kqu, struct kqueue_info * kinfo)
8650 {
8651 struct vinfo_stat * st;
8652
8653 st = &kinfo->kq_stat;
8654
8655 st->vst_size = kqu.kq->kq_count;
8656 if (kqu.kq->kq_state & KQ_KEV_QOS) {
8657 st->vst_blksize = sizeof(struct kevent_qos_s);
8658 } else if (kqu.kq->kq_state & KQ_KEV64) {
8659 st->vst_blksize = sizeof(struct kevent64_s);
8660 } else {
8661 st->vst_blksize = sizeof(struct kevent);
8662 }
8663 st->vst_mode = S_IFIFO;
8664 st->vst_ino = (kqu.kq->kq_state & KQ_DYNAMIC) ?
8665 kqu.kqwl->kqwl_dynamicid : 0;
8666
8667 /* flags exported to libproc as PROC_KQUEUE_* (sys/proc_info.h) */
8668 #define PROC_KQUEUE_MASK (KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS|KQ_WORKQ|KQ_WORKLOOP)
8669 static_assert(PROC_KQUEUE_SLEEP == KQ_SLEEP);
8670 static_assert(PROC_KQUEUE_32 == KQ_KEV32);
8671 static_assert(PROC_KQUEUE_64 == KQ_KEV64);
8672 static_assert(PROC_KQUEUE_QOS == KQ_KEV_QOS);
8673 static_assert(PROC_KQUEUE_WORKQ == KQ_WORKQ);
8674 static_assert(PROC_KQUEUE_WORKLOOP == KQ_WORKLOOP);
8675 kinfo->kq_state = kqu.kq->kq_state & PROC_KQUEUE_MASK;
8676 if ((kqu.kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0) {
8677 if (kqu.kqf->kqf_sel.si_flags & SI_RECORDED) {
8678 kinfo->kq_state |= PROC_KQUEUE_SELECT;
8679 }
8680 }
8681
8682 return 0;
8683 }
8684
8685 static int
fill_kqueue_dyninfo(struct kqworkloop * kqwl,struct kqueue_dyninfo * kqdi)8686 fill_kqueue_dyninfo(struct kqworkloop *kqwl, struct kqueue_dyninfo *kqdi)
8687 {
8688 workq_threadreq_t kqr = &kqwl->kqwl_request;
8689 workq_threadreq_param_t trp = {};
8690 int err;
8691
8692 if ((kqwl->kqwl_state & KQ_WORKLOOP) == 0) {
8693 return EINVAL;
8694 }
8695
8696 if ((err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi->kqdi_info))) {
8697 return err;
8698 }
8699
8700 kqlock(kqwl);
8701
8702 kqdi->kqdi_servicer = thread_tid(kqr_thread(kqr));
8703 kqdi->kqdi_owner = thread_tid(kqwl->kqwl_owner);
8704 kqdi->kqdi_request_state = kqr->tr_state;
8705 kqdi->kqdi_async_qos = kqr->tr_kq_qos_index;
8706 kqdi->kqdi_events_qos = kqr->tr_kq_override_index;
8707 kqdi->kqdi_sync_waiters = 0;
8708 kqdi->kqdi_sync_waiter_qos = 0;
8709
8710 trp.trp_value = kqwl->kqwl_params;
8711 if (trp.trp_flags & TRP_PRIORITY) {
8712 kqdi->kqdi_pri = trp.trp_pri;
8713 } else {
8714 kqdi->kqdi_pri = 0;
8715 }
8716
8717 if (trp.trp_flags & TRP_POLICY) {
8718 kqdi->kqdi_pol = trp.trp_pol;
8719 } else {
8720 kqdi->kqdi_pol = 0;
8721 }
8722
8723 if (trp.trp_flags & TRP_CPUPERCENT) {
8724 kqdi->kqdi_cpupercent = trp.trp_cpupercent;
8725 } else {
8726 kqdi->kqdi_cpupercent = 0;
8727 }
8728
8729 kqunlock(kqwl);
8730
8731 return 0;
8732 }
8733
8734
8735 static unsigned long
kevent_extinfo_emit(struct kqueue * kq,struct knote * kn,struct kevent_extinfo * buf,unsigned long buflen,unsigned long nknotes)8736 kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf,
8737 unsigned long buflen, unsigned long nknotes)
8738 {
8739 for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
8740 if (kq == knote_get_kq(kn)) {
8741 if (nknotes < buflen) {
8742 struct kevent_extinfo *info = &buf[nknotes];
8743
8744 kqlock(kq);
8745
8746 if (knote_fops(kn)->f_sanitized_copyout) {
8747 knote_fops(kn)->f_sanitized_copyout(kn, &info->kqext_kev);
8748 } else {
8749 info->kqext_kev = *(struct kevent_qos_s *)&kn->kn_kevent;
8750 }
8751
8752 if (knote_has_qos(kn)) {
8753 info->kqext_kev.qos =
8754 _pthread_priority_thread_qos_fast(kn->kn_qos);
8755 } else {
8756 info->kqext_kev.qos = kn->kn_qos_override;
8757 }
8758 info->kqext_kev.filter |= 0xff00; /* sign extend filter */
8759 info->kqext_kev.xflags = 0; /* this is where sfflags lives */
8760 info->kqext_kev.data = 0; /* this is where sdata lives */
8761 info->kqext_sdata = kn->kn_sdata;
8762 info->kqext_status = kn->kn_status;
8763 info->kqext_sfflags = kn->kn_sfflags;
8764
8765 kqunlock(kq);
8766 }
8767
8768 /* we return total number of knotes, which may be more than requested */
8769 nknotes++;
8770 }
8771 }
8772
8773 return nknotes;
8774 }
8775
8776 int
kevent_copyout_proc_dynkqids(void * proc,user_addr_t ubuf,uint32_t ubufsize,int32_t * nkqueues_out)8777 kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize,
8778 int32_t *nkqueues_out)
8779 {
8780 proc_t p = (proc_t)proc;
8781 struct filedesc *fdp = &p->p_fd;
8782 unsigned int nkqueues = 0;
8783 unsigned long ubuflen = ubufsize / sizeof(kqueue_id_t);
8784 size_t buflen, bufsize;
8785 kqueue_id_t *kq_ids = NULL;
8786 int err = 0;
8787
8788 assert(p != NULL);
8789
8790 if (ubuf == USER_ADDR_NULL && ubufsize != 0) {
8791 err = EINVAL;
8792 goto out;
8793 }
8794
8795 buflen = MIN(ubuflen, PROC_PIDDYNKQUEUES_MAX);
8796
8797 if (ubuflen != 0) {
8798 if (os_mul_overflow(sizeof(kqueue_id_t), buflen, &bufsize)) {
8799 err = ERANGE;
8800 goto out;
8801 }
8802 kq_ids = (kqueue_id_t *)kalloc_data(bufsize, Z_WAITOK | Z_ZERO);
8803 if (!kq_ids) {
8804 err = ENOMEM;
8805 goto out;
8806 }
8807 }
8808
8809 kqhash_lock(fdp);
8810
8811 u_long kqhashmask = fdp->fd_kqhashmask;
8812 if (kqhashmask > 0) {
8813 for (uint32_t i = 0; i < kqhashmask + 1; i++) {
8814 struct kqworkloop *kqwl;
8815
8816 LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
8817 /* report the number of kqueues, even if they don't all fit */
8818 if (nkqueues < buflen) {
8819 kq_ids[nkqueues] = kqwl->kqwl_dynamicid;
8820 }
8821 nkqueues++;
8822 }
8823
8824 /*
8825 * Drop the kqhash lock and take it again to give some breathing room
8826 */
8827 kqhash_unlock(fdp);
8828 kqhash_lock(fdp);
8829
8830 /*
8831 * Reevaluate to see if we have raced with someone who changed this -
8832 * if we have, we should bail out with the set of info captured so far
8833 */
8834 if (fdp->fd_kqhashmask != kqhashmask) {
8835 break;
8836 }
8837 }
8838 }
8839
8840 kqhash_unlock(fdp);
8841
8842 if (kq_ids) {
8843 size_t copysize;
8844 if (os_mul_overflow(sizeof(kqueue_id_t), MIN(buflen, nkqueues), ©size)) {
8845 err = ERANGE;
8846 goto out;
8847 }
8848
8849 assert(ubufsize >= copysize);
8850 err = copyout(kq_ids, ubuf, copysize);
8851 }
8852
8853 out:
8854 if (kq_ids) {
8855 kfree_data(kq_ids, bufsize);
8856 }
8857
8858 if (!err) {
8859 *nkqueues_out = (int)min(nkqueues, PROC_PIDDYNKQUEUES_MAX);
8860 }
8861 return err;
8862 }
8863
8864 int
kevent_copyout_dynkqinfo(void * proc,kqueue_id_t kq_id,user_addr_t ubuf,uint32_t ubufsize,int32_t * size_out)8865 kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
8866 uint32_t ubufsize, int32_t *size_out)
8867 {
8868 proc_t p = (proc_t)proc;
8869 struct kqworkloop *kqwl;
8870 int err = 0;
8871 struct kqueue_dyninfo kqdi = { };
8872
8873 assert(p != NULL);
8874
8875 if (ubufsize < sizeof(struct kqueue_info)) {
8876 return ENOBUFS;
8877 }
8878
8879 kqwl = kqworkloop_hash_lookup_and_retain(&p->p_fd, kq_id);
8880 if (!kqwl) {
8881 return ESRCH;
8882 }
8883
8884 /*
8885 * backward compatibility: allow the argument to this call to only be
8886 * a struct kqueue_info
8887 */
8888 if (ubufsize >= sizeof(struct kqueue_dyninfo)) {
8889 ubufsize = sizeof(struct kqueue_dyninfo);
8890 err = fill_kqueue_dyninfo(kqwl, &kqdi);
8891 } else {
8892 ubufsize = sizeof(struct kqueue_info);
8893 err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi.kqdi_info);
8894 }
8895 if (err == 0 && (err = copyout(&kqdi, ubuf, ubufsize)) == 0) {
8896 *size_out = ubufsize;
8897 }
8898 kqworkloop_release(kqwl);
8899 return err;
8900 }
8901
8902 int
kevent_copyout_dynkqextinfo(void * proc,kqueue_id_t kq_id,user_addr_t ubuf,uint32_t ubufsize,int32_t * nknotes_out)8903 kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
8904 uint32_t ubufsize, int32_t *nknotes_out)
8905 {
8906 proc_t p = (proc_t)proc;
8907 struct kqworkloop *kqwl;
8908 int err;
8909
8910 kqwl = kqworkloop_hash_lookup_and_retain(&p->p_fd, kq_id);
8911 if (!kqwl) {
8912 return ESRCH;
8913 }
8914
8915 err = pid_kqueue_extinfo(p, &kqwl->kqwl_kqueue, ubuf, ubufsize, nknotes_out);
8916 kqworkloop_release(kqwl);
8917 return err;
8918 }
8919
8920 int
pid_kqueue_extinfo(proc_t p,struct kqueue * kq,user_addr_t ubuf,uint32_t bufsize,int32_t * retval)8921 pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
8922 uint32_t bufsize, int32_t *retval)
8923 {
8924 struct knote *kn;
8925 int i;
8926 int err = 0;
8927 struct filedesc *fdp = &p->p_fd;
8928 unsigned long nknotes = 0;
8929 unsigned long buflen = bufsize / sizeof(struct kevent_extinfo);
8930 struct kevent_extinfo *kqext = NULL;
8931
8932 /* arbitrary upper limit to cap kernel memory usage, copyout size, etc. */
8933 buflen = MIN(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX);
8934
8935 kqext = (struct kevent_extinfo *)kalloc_data(buflen * sizeof(struct kevent_extinfo), Z_WAITOK | Z_ZERO);
8936 if (kqext == NULL) {
8937 err = ENOMEM;
8938 goto out;
8939 }
8940
8941 proc_fdlock(p);
8942 for (i = 0; i < fdp->fd_knlistsize; i++) {
8943 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
8944 nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
8945 }
8946 proc_fdunlock(p);
8947
8948 knhash_lock(fdp);
8949 u_long knhashmask = fdp->fd_knhashmask;
8950
8951 if (knhashmask != 0) {
8952 for (i = 0; i < (int)knhashmask + 1; i++) {
8953 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
8954 nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
8955
8956 knhash_unlock(fdp);
8957 knhash_lock(fdp);
8958
8959 /*
8960 * Reevaluate to see if we have raced with someone who changed this -
8961 * if we have, we should bail out with the set of info captured so far
8962 */
8963 if (fdp->fd_knhashmask != knhashmask) {
8964 break;
8965 }
8966 }
8967 }
8968 knhash_unlock(fdp);
8969
8970 assert(bufsize >= sizeof(struct kevent_extinfo) * MIN(buflen, nknotes));
8971 err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * MIN(buflen, nknotes));
8972
8973 out:
8974 kfree_data(kqext, buflen * sizeof(struct kevent_extinfo));
8975
8976 if (!err) {
8977 *retval = (int32_t)MIN(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX);
8978 }
8979 return err;
8980 }
8981
8982 static unsigned int
klist_copy_udata(struct klist * list,uint64_t * buf,unsigned int buflen,unsigned int nknotes)8983 klist_copy_udata(struct klist *list, uint64_t *buf,
8984 unsigned int buflen, unsigned int nknotes)
8985 {
8986 struct knote *kn;
8987 SLIST_FOREACH(kn, list, kn_link) {
8988 if (nknotes < buflen) {
8989 /*
8990 * kevent_register will always set kn_udata atomically
8991 * so that we don't have to take any kqlock here.
8992 */
8993 buf[nknotes] = os_atomic_load_wide(&kn->kn_udata, relaxed);
8994 }
8995 /* we return total number of knotes, which may be more than requested */
8996 nknotes++;
8997 }
8998
8999 return nknotes;
9000 }
9001
9002 int
kevent_proc_copy_uptrs(void * proc,uint64_t * buf,uint32_t bufsize)9003 kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize)
9004 {
9005 proc_t p = (proc_t)proc;
9006 struct filedesc *fdp = &p->p_fd;
9007 unsigned int nuptrs = 0;
9008 unsigned int buflen = bufsize / sizeof(uint64_t);
9009 struct kqworkloop *kqwl;
9010
9011 if (buflen > 0) {
9012 assert(buf != NULL);
9013 }
9014
9015 proc_fdlock(p);
9016 for (int i = 0; i < fdp->fd_knlistsize; i++) {
9017 nuptrs = klist_copy_udata(&fdp->fd_knlist[i], buf, buflen, nuptrs);
9018 }
9019 proc_fdunlock(p);
9020
9021 knhash_lock(fdp);
9022 if (fdp->fd_knhashmask != 0) {
9023 for (size_t i = 0; i < fdp->fd_knhashmask + 1; i++) {
9024 nuptrs = klist_copy_udata(&fdp->fd_knhash[i], buf, buflen, nuptrs);
9025 }
9026 }
9027 knhash_unlock(fdp);
9028
9029 kqhash_lock(fdp);
9030 if (fdp->fd_kqhashmask != 0) {
9031 for (size_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
9032 LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
9033 if (nuptrs < buflen) {
9034 buf[nuptrs] = kqwl->kqwl_dynamicid;
9035 }
9036 nuptrs++;
9037 }
9038 }
9039 }
9040 kqhash_unlock(fdp);
9041
9042 return (int)nuptrs;
9043 }
9044
9045 static void
kevent_set_return_to_kernel_user_tsd(proc_t p,thread_t thread)9046 kevent_set_return_to_kernel_user_tsd(proc_t p, thread_t thread)
9047 {
9048 uint64_t ast_addr;
9049 bool proc_is_64bit = !!(p->p_flag & P_LP64);
9050 size_t user_addr_size = proc_is_64bit ? 8 : 4;
9051 uint32_t ast_flags32 = 0;
9052 uint64_t ast_flags64 = 0;
9053 struct uthread *ut = get_bsdthread_info(thread);
9054
9055 if (ut->uu_kqr_bound != NULL) {
9056 ast_flags64 |= R2K_WORKLOOP_PENDING_EVENTS;
9057 }
9058
9059 if (ast_flags64 == 0) {
9060 return;
9061 }
9062
9063 if (!(p->p_flag & P_LP64)) {
9064 ast_flags32 = (uint32_t)ast_flags64;
9065 assert(ast_flags64 < 0x100000000ull);
9066 }
9067
9068 ast_addr = thread_rettokern_addr(thread);
9069 if (ast_addr == 0) {
9070 return;
9071 }
9072
9073 if (copyout((proc_is_64bit ? (void *)&ast_flags64 : (void *)&ast_flags32),
9074 (user_addr_t)ast_addr,
9075 user_addr_size) != 0) {
9076 printf("pid %d (tid:%llu): copyout of return_to_kernel ast flags failed with "
9077 "ast_addr = %llu\n", proc_getpid(p), thread_tid(current_thread()), ast_addr);
9078 }
9079 }
9080
9081 /*
9082 * Semantics of writing to TSD value:
9083 *
9084 * 1. It is written to by the kernel and cleared by userspace.
9085 * 2. When the userspace code clears the TSD field, it takes responsibility for
9086 * taking action on the quantum expiry action conveyed by kernel.
9087 * 3. The TSD value is always cleared upon entry into userspace and upon exit of
9088 * userspace back to kernel to make sure that it is never leaked across thread
9089 * requests.
9090 */
9091 void
kevent_set_workq_quantum_expiry_user_tsd(proc_t p,thread_t thread,uint64_t flags)9092 kevent_set_workq_quantum_expiry_user_tsd(proc_t p, thread_t thread,
9093 uint64_t flags)
9094 {
9095 uint64_t ast_addr;
9096 bool proc_is_64bit = !!(p->p_flag & P_LP64);
9097 uint32_t ast_flags32 = 0;
9098 uint64_t ast_flags64 = flags;
9099
9100 if (ast_flags64 == 0) {
9101 return;
9102 }
9103
9104 if (!(p->p_flag & P_LP64)) {
9105 ast_flags32 = (uint32_t)ast_flags64;
9106 assert(ast_flags64 < 0x100000000ull);
9107 }
9108
9109 ast_addr = thread_wqquantum_addr(thread);
9110 assert(ast_addr != 0);
9111
9112 if (proc_is_64bit) {
9113 if (copyout_atomic64(ast_flags64, (user_addr_t) ast_addr)) {
9114 #if DEBUG || DEVELOPMENT
9115 printf("pid %d (tid:%llu): copyout of workq quantum ast flags failed with "
9116 "ast_addr = %llu\n", proc_getpid(p), thread_tid(thread), ast_addr);
9117 #endif
9118 }
9119 } else {
9120 if (copyout_atomic32(ast_flags32, (user_addr_t) ast_addr)) {
9121 #if DEBUG || DEVELOPMENT
9122 printf("pid %d (tid:%llu): copyout of workq quantum ast flags failed with "
9123 "ast_addr = %llu\n", proc_getpid(p), thread_tid(thread), ast_addr);
9124 #endif
9125 }
9126 }
9127 }
9128
9129 void
kevent_ast(thread_t thread,uint16_t bits)9130 kevent_ast(thread_t thread, uint16_t bits)
9131 {
9132 proc_t p = current_proc();
9133
9134
9135 if (bits & AST_KEVENT_REDRIVE_THREADREQ) {
9136 workq_kern_threadreq_redrive(p, WORKQ_THREADREQ_CAN_CREATE_THREADS);
9137 }
9138 if (bits & AST_KEVENT_RETURN_TO_KERNEL) {
9139 kevent_set_return_to_kernel_user_tsd(p, thread);
9140 }
9141
9142 if (bits & AST_KEVENT_WORKQ_QUANTUM_EXPIRED) {
9143 workq_kern_quantum_expiry_reevaluate(p, thread);
9144 }
9145 }
9146
9147 #if DEVELOPMENT || DEBUG
9148
9149 #define KEVENT_SYSCTL_BOUND_ID 1
9150
9151 static int
9152 kevent_sysctl SYSCTL_HANDLER_ARGS
9153 {
9154 #pragma unused(oidp, arg2)
9155 uintptr_t type = (uintptr_t)arg1;
9156 uint64_t bound_id = 0;
9157
9158 if (type != KEVENT_SYSCTL_BOUND_ID) {
9159 return EINVAL;
9160 }
9161
9162 if (req->newptr) {
9163 return EINVAL;
9164 }
9165
9166 struct uthread *ut = current_uthread();
9167 if (!ut) {
9168 return EFAULT;
9169 }
9170
9171 workq_threadreq_t kqr = ut->uu_kqr_bound;
9172 if (kqr) {
9173 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
9174 bound_id = kqr_kqworkloop(kqr)->kqwl_dynamicid;
9175 } else {
9176 bound_id = -1;
9177 }
9178 }
9179
9180 return sysctl_io_number(req, bound_id, sizeof(bound_id), NULL, NULL);
9181 }
9182
9183 SYSCTL_NODE(_kern, OID_AUTO, kevent, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
9184 "kevent information");
9185
9186 SYSCTL_PROC(_kern_kevent, OID_AUTO, bound_id,
9187 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
9188 (void *)KEVENT_SYSCTL_BOUND_ID,
9189 sizeof(kqueue_id_t), kevent_sysctl, "Q",
9190 "get the ID of the bound kqueue");
9191
9192 #endif /* DEVELOPMENT || DEBUG */
9193