1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 *
28 */
29 /*-
30 * Copyright (c) 1999,2000,2001 Jonathan Lemon <[email protected]>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54 /*
55 * @(#)kern_event.c 1.0 (3/31/2000)
56 */
57 #include <stdint.h>
58 #include <machine/atomic.h>
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/filedesc.h>
63 #include <sys/kernel.h>
64 #include <sys/proc_internal.h>
65 #include <sys/kauth.h>
66 #include <sys/malloc.h>
67 #include <sys/unistd.h>
68 #include <sys/file_internal.h>
69 #include <sys/fcntl.h>
70 #include <sys/select.h>
71 #include <sys/queue.h>
72 #include <sys/event.h>
73 #include <sys/eventvar.h>
74 #include <sys/protosw.h>
75 #include <sys/socket.h>
76 #include <sys/socketvar.h>
77 #include <sys/stat.h>
78 #include <sys/syscall.h> // SYS_* constants
79 #include <sys/sysctl.h>
80 #include <sys/uio.h>
81 #include <sys/sysproto.h>
82 #include <sys/user.h>
83 #include <sys/vnode_internal.h>
84 #include <string.h>
85 #include <sys/proc_info.h>
86 #include <sys/codesign.h>
87 #include <sys/pthread_shims.h>
88 #include <sys/kdebug.h>
89 #include <os/base.h>
90 #include <pexpert/pexpert.h>
91
92 #include <kern/thread_group.h>
93 #include <kern/locks.h>
94 #include <kern/clock.h>
95 #include <kern/cpu_data.h>
96 #include <kern/policy_internal.h>
97 #include <kern/thread_call.h>
98 #include <kern/sched_prim.h>
99 #include <kern/waitq.h>
100 #include <kern/zalloc.h>
101 #include <kern/kalloc.h>
102 #include <kern/assert.h>
103 #include <kern/ast.h>
104 #include <kern/thread.h>
105 #include <kern/kcdata.h>
106
107 #include <pthread/priority_private.h>
108 #include <pthread/workqueue_syscalls.h>
109 #include <pthread/workqueue_internal.h>
110 #include <libkern/libkern.h>
111
112 #include <os/log.h>
113
114 #include "net/net_str_id.h"
115
116 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
117 #include <skywalk/lib/net_filter_event.h>
118
119 extern bool net_check_compatible_alf(void);
120 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
121
122 #include <mach/task.h>
123 #include <libkern/section_keywords.h>
124
125 #if CONFIG_MEMORYSTATUS
126 #include <sys/kern_memorystatus.h>
127 #endif
128
129 #if DEVELOPMENT || DEBUG
130 #define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0)
131 #define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1)
132 TUNABLE(uint32_t, kevent_debug_flags, "kevent_debug", 0);
133 #endif
134
135 static LCK_GRP_DECLARE(kq_lck_grp, "kqueue");
136 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) kn_kq_packing_params =
137 VM_PACKING_PARAMS(KNOTE_KQ_PACKED);
138
139 extern mach_port_name_t ipc_entry_name_mask(mach_port_name_t name); /* osfmk/ipc/ipc_entry.h */
140 extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); /* bsd/kern/kern_sig.c */
141
142 #define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code))
143
144 static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id,
145 vfs_context_t ctx);
146 static int kqueue_close(struct fileglob *fg, vfs_context_t ctx);
147 static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
148 struct kevent_qos_s *kev);
149 static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx);
150
151 static const struct fileops kqueueops = {
152 .fo_type = DTYPE_KQUEUE,
153 .fo_read = fo_no_read,
154 .fo_write = fo_no_write,
155 .fo_ioctl = fo_no_ioctl,
156 .fo_select = kqueue_select,
157 .fo_close = kqueue_close,
158 .fo_drain = kqueue_drain,
159 .fo_kqfilter = kqueue_kqfilter,
160 };
161
162 static inline int kevent_modern_copyout(struct kevent_qos_s *, user_addr_t *);
163 static int kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int result);
164 static void kevent_register_wait_block(struct turnstile *ts, thread_t handoff_thread,
165 thread_continue_t cont, struct _kevent_register *cont_args) __dead2;
166 static void kevent_register_wait_return(struct _kevent_register *cont_args) __dead2;
167 static void kevent_register_wait_cleanup(struct knote *kn);
168
169 static struct kqtailq *kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn);
170 static void kqueue_threadreq_initiate(struct kqueue *kq, workq_threadreq_t, kq_index_t qos, int flags);
171
172 static void kqworkq_unbind(proc_t p, workq_threadreq_t);
173 static thread_qos_t kqworkq_unbind_locked(struct kqworkq *kqwq, workq_threadreq_t, thread_t thread);
174 static workq_threadreq_t kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index);
175 static void kqueue_update_iotier_override(kqueue_t kqu);
176
177 static void kqworkloop_unbind(struct kqworkloop *kwql);
178
179 enum kqwl_unbind_locked_mode {
180 KQWL_OVERRIDE_DROP_IMMEDIATELY,
181 KQWL_OVERRIDE_DROP_DELAYED,
182 };
183 static void kqworkloop_unbind_locked(struct kqworkloop *kwql, thread_t thread,
184 enum kqwl_unbind_locked_mode how);
185 static void kqworkloop_unbind_delayed_override_drop(thread_t thread);
186 static kq_index_t kqworkloop_override(struct kqworkloop *kqwl);
187 static void kqworkloop_set_overcommit(struct kqworkloop *kqwl);
188 enum {
189 KQWL_UTQ_NONE,
190 /*
191 * The wakeup qos is the qos of QUEUED knotes.
192 *
193 * This QoS is accounted for with the events override in the
194 * kqr_override_index field. It is raised each time a new knote is queued at
195 * a given QoS. The kqwl_wakeup_qos field is a superset of the non empty
196 * knote buckets and is recomputed after each event delivery.
197 */
198 KQWL_UTQ_UPDATE_WAKEUP_QOS,
199 KQWL_UTQ_RECOMPUTE_WAKEUP_QOS,
200 KQWL_UTQ_UNBINDING, /* attempt to rebind */
201 KQWL_UTQ_PARKING,
202 /*
203 * The wakeup override is for suppressed knotes that have fired again at
204 * a higher QoS than the one for which they are suppressed already.
205 * This override is cleared when the knote suppressed list becomes empty.
206 */
207 KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
208 KQWL_UTQ_RESET_WAKEUP_OVERRIDE,
209 /*
210 * The QoS is the maximum QoS of an event enqueued on this workloop in
211 * userland. It is copied from the only EVFILT_WORKLOOP knote with
212 * a NOTE_WL_THREAD_REQUEST bit set allowed on this workloop. If there is no
213 * such knote, this QoS is 0.
214 */
215 KQWL_UTQ_SET_QOS_INDEX,
216 KQWL_UTQ_REDRIVE_EVENTS,
217 };
218 static void kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos);
219 static int kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags);
220
221 static struct knote *knote_alloc(void);
222 static void knote_free(struct knote *kn);
223 static int kq_add_knote(struct kqueue *kq, struct knote *kn,
224 struct knote_lock_ctx *knlc, struct proc *p);
225 static struct knote *kq_find_knote_and_kq_lock(struct kqueue *kq,
226 struct kevent_qos_s *kev, bool is_fd, struct proc *p);
227
228 static void knote_activate(kqueue_t kqu, struct knote *kn, int result);
229 static void knote_dequeue(kqueue_t kqu, struct knote *kn);
230
231 static void knote_apply_touch(kqueue_t kqu, struct knote *kn,
232 struct kevent_qos_s *kev, int result);
233 static void knote_suppress(kqueue_t kqu, struct knote *kn);
234 static void knote_unsuppress(kqueue_t kqu, struct knote *kn);
235 static void knote_drop(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc);
236
237 // both these functions may dequeue the knote and it is up to the caller
238 // to enqueue the knote back
239 static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result);
240 static void knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp);
241
242 static ZONE_DEFINE(knote_zone, "knote zone",
243 sizeof(struct knote), ZC_CACHING | ZC_ZFREE_CLEARMEM);
244 static ZONE_DEFINE(kqfile_zone, "kqueue file zone",
245 sizeof(struct kqfile), ZC_ZFREE_CLEARMEM | ZC_NOTBITAG);
246 static ZONE_DEFINE(kqworkq_zone, "kqueue workq zone",
247 sizeof(struct kqworkq), ZC_ZFREE_CLEARMEM | ZC_NOTBITAG);
248 static ZONE_DEFINE(kqworkloop_zone, "kqueue workloop zone",
249 sizeof(struct kqworkloop), ZC_CACHING | ZC_ZFREE_CLEARMEM | ZC_NOTBITAG);
250
251 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
252
253 static int filt_no_attach(struct knote *kn, struct kevent_qos_s *kev);
254 static void filt_no_detach(struct knote *kn);
255 static int filt_bad_event(struct knote *kn, long hint);
256 static int filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev);
257 static int filt_bad_process(struct knote *kn, struct kevent_qos_s *kev);
258
259 SECURITY_READ_ONLY_EARLY(static struct filterops) bad_filtops = {
260 .f_attach = filt_no_attach,
261 .f_detach = filt_no_detach,
262 .f_event = filt_bad_event,
263 .f_touch = filt_bad_touch,
264 .f_process = filt_bad_process,
265 };
266
267 #if CONFIG_MEMORYSTATUS
268 extern const struct filterops memorystatus_filtops;
269 #endif /* CONFIG_MEMORYSTATUS */
270 extern const struct filterops fs_filtops;
271 extern const struct filterops sig_filtops;
272 extern const struct filterops machport_filtops;
273 extern const struct filterops pipe_nfiltops;
274 extern const struct filterops pipe_rfiltops;
275 extern const struct filterops pipe_wfiltops;
276 extern const struct filterops ptsd_kqops;
277 extern const struct filterops ptmx_kqops;
278 extern const struct filterops soread_filtops;
279 extern const struct filterops sowrite_filtops;
280 extern const struct filterops sock_filtops;
281 extern const struct filterops soexcept_filtops;
282 extern const struct filterops spec_filtops;
283 extern const struct filterops bpfread_filtops;
284 extern const struct filterops necp_fd_rfiltops;
285 #if SKYWALK
286 extern const struct filterops skywalk_channel_rfiltops;
287 extern const struct filterops skywalk_channel_wfiltops;
288 extern const struct filterops skywalk_channel_efiltops;
289 #endif /* SKYWALK */
290 extern const struct filterops fsevent_filtops;
291 extern const struct filterops vnode_filtops;
292 extern const struct filterops tty_filtops;
293
294 const static struct filterops file_filtops;
295 const static struct filterops kqread_filtops;
296 const static struct filterops proc_filtops;
297 const static struct filterops timer_filtops;
298 const static struct filterops user_filtops;
299 const static struct filterops workloop_filtops;
300
301 /*
302 *
303 * Rules for adding new filters to the system:
304 * Public filters:
305 * - Add a new "EVFILT_" option value to bsd/sys/event.h (typically a negative value)
306 * in the exported section of the header
307 * - Update the EVFILT_SYSCOUNT value to reflect the new addition
308 * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end
309 * of the Public Filters section in the array.
310 * Private filters:
311 * - Add a new "EVFILT_" value to bsd/sys/event.h (typically a positive value)
312 * in the XNU_KERNEL_PRIVATE section of the header
313 * - Update the EVFILTID_MAX value to reflect the new addition
314 * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of
315 * the Private filters section of the array.
316 */
317 static_assert(EVFILTID_MAX < UINT8_MAX, "kn_filtid expects this to be true");
318 static const struct filterops * const sysfilt_ops[EVFILTID_MAX] = {
319 /* Public Filters */
320 [~EVFILT_READ] = &file_filtops,
321 [~EVFILT_WRITE] = &file_filtops,
322 [~EVFILT_AIO] = &bad_filtops,
323 [~EVFILT_VNODE] = &file_filtops,
324 [~EVFILT_PROC] = &proc_filtops,
325 [~EVFILT_SIGNAL] = &sig_filtops,
326 [~EVFILT_TIMER] = &timer_filtops,
327 [~EVFILT_MACHPORT] = &machport_filtops,
328 [~EVFILT_FS] = &fs_filtops,
329 [~EVFILT_USER] = &user_filtops,
330 [~EVFILT_UNUSED_11] = &bad_filtops,
331 [~EVFILT_VM] = &bad_filtops,
332 [~EVFILT_SOCK] = &file_filtops,
333 #if CONFIG_MEMORYSTATUS
334 [~EVFILT_MEMORYSTATUS] = &memorystatus_filtops,
335 #else
336 [~EVFILT_MEMORYSTATUS] = &bad_filtops,
337 #endif
338 [~EVFILT_EXCEPT] = &file_filtops,
339 #if SKYWALK
340 [~EVFILT_NW_CHANNEL] = &file_filtops,
341 #else /* !SKYWALK */
342 [~EVFILT_NW_CHANNEL] = &bad_filtops,
343 #endif /* !SKYWALK */
344 [~EVFILT_WORKLOOP] = &workloop_filtops,
345
346 /* Private filters */
347 [EVFILTID_KQREAD] = &kqread_filtops,
348 [EVFILTID_PIPE_N] = &pipe_nfiltops,
349 [EVFILTID_PIPE_R] = &pipe_rfiltops,
350 [EVFILTID_PIPE_W] = &pipe_wfiltops,
351 [EVFILTID_PTSD] = &ptsd_kqops,
352 [EVFILTID_SOREAD] = &soread_filtops,
353 [EVFILTID_SOWRITE] = &sowrite_filtops,
354 [EVFILTID_SCK] = &sock_filtops,
355 [EVFILTID_SOEXCEPT] = &soexcept_filtops,
356 [EVFILTID_SPEC] = &spec_filtops,
357 [EVFILTID_BPFREAD] = &bpfread_filtops,
358 [EVFILTID_NECP_FD] = &necp_fd_rfiltops,
359 #if SKYWALK
360 [EVFILTID_SKYWALK_CHANNEL_W] = &skywalk_channel_wfiltops,
361 [EVFILTID_SKYWALK_CHANNEL_R] = &skywalk_channel_rfiltops,
362 [EVFILTID_SKYWALK_CHANNEL_E] = &skywalk_channel_efiltops,
363 #else /* !SKYWALK */
364 [EVFILTID_SKYWALK_CHANNEL_W] = &bad_filtops,
365 [EVFILTID_SKYWALK_CHANNEL_R] = &bad_filtops,
366 [EVFILTID_SKYWALK_CHANNEL_E] = &bad_filtops,
367 #endif /* !SKYWALK */
368 [EVFILTID_FSEVENT] = &fsevent_filtops,
369 [EVFILTID_VN] = &vnode_filtops,
370 [EVFILTID_TTY] = &tty_filtops,
371 [EVFILTID_PTMX] = &ptmx_kqops,
372
373 /* fake filter for detached knotes, keep last */
374 [EVFILTID_DETACHED] = &bad_filtops,
375 };
376
377 static inline bool
kqr_thread_bound(workq_threadreq_t kqr)378 kqr_thread_bound(workq_threadreq_t kqr)
379 {
380 return kqr->tr_state == WORKQ_TR_STATE_BOUND;
381 }
382
383 static inline bool
kqr_thread_requested_pending(workq_threadreq_t kqr)384 kqr_thread_requested_pending(workq_threadreq_t kqr)
385 {
386 workq_tr_state_t tr_state = kqr->tr_state;
387 return tr_state > WORKQ_TR_STATE_IDLE && tr_state < WORKQ_TR_STATE_BOUND;
388 }
389
390 static inline bool
kqr_thread_requested(workq_threadreq_t kqr)391 kqr_thread_requested(workq_threadreq_t kqr)
392 {
393 return kqr->tr_state != WORKQ_TR_STATE_IDLE;
394 }
395
396 static inline thread_t
kqr_thread_fast(workq_threadreq_t kqr)397 kqr_thread_fast(workq_threadreq_t kqr)
398 {
399 assert(kqr_thread_bound(kqr));
400 return kqr->tr_thread;
401 }
402
403 static inline thread_t
kqr_thread(workq_threadreq_t kqr)404 kqr_thread(workq_threadreq_t kqr)
405 {
406 return kqr_thread_bound(kqr) ? kqr->tr_thread : THREAD_NULL;
407 }
408
409 static inline struct kqworkloop *
kqr_kqworkloop(workq_threadreq_t kqr)410 kqr_kqworkloop(workq_threadreq_t kqr)
411 {
412 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
413 return __container_of(kqr, struct kqworkloop, kqwl_request);
414 }
415 return NULL;
416 }
417
418 static inline kqueue_t
kqr_kqueue(proc_t p,workq_threadreq_t kqr)419 kqr_kqueue(proc_t p, workq_threadreq_t kqr)
420 {
421 kqueue_t kqu;
422 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
423 kqu.kqwl = kqr_kqworkloop(kqr);
424 } else {
425 kqu.kqwq = p->p_fd.fd_wqkqueue;
426 assert(kqr >= kqu.kqwq->kqwq_request &&
427 kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS);
428 }
429 return kqu;
430 }
431
432 #if CONFIG_PREADOPT_TG
433 /* There are no guarantees about which locks are held when this is called */
434 inline thread_group_qos_t
kqr_preadopt_thread_group(workq_threadreq_t req)435 kqr_preadopt_thread_group(workq_threadreq_t req)
436 {
437 struct kqworkloop *kqwl = kqr_kqworkloop(req);
438 return kqwl ? os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed) : NULL;
439 }
440
441 /* There are no guarantees about which locks are held when this is called */
_Atomic(thread_group_qos_t)442 inline _Atomic(thread_group_qos_t) *
443 kqr_preadopt_thread_group_addr(workq_threadreq_t req)
444 {
445 struct kqworkloop *kqwl = kqr_kqworkloop(req);
446 return kqwl ? (&kqwl->kqwl_preadopt_tg) : NULL;
447 }
448 #endif
449
450 /*
451 * kqueue/note lock implementations
452 *
453 * The kqueue lock guards the kq state, the state of its queues,
454 * and the kqueue-aware status and locks of individual knotes.
455 *
456 * The kqueue workq lock is used to protect state guarding the
457 * interaction of the kqueue with the workq. This state cannot
458 * be guarded by the kq lock - as it needs to be taken when we
459 * already have the waitq set lock held (during the waitq hook
460 * callback). It might be better to use the waitq lock itself
461 * for this, but the IRQ requirements make that difficult).
462 *
463 * Knote flags, filter flags, and associated data are protected
464 * by the underlying object lock - and are only ever looked at
465 * by calling the filter to get a [consistent] snapshot of that
466 * data.
467 */
468
469 static inline void
kqlock(kqueue_t kqu)470 kqlock(kqueue_t kqu)
471 {
472 lck_spin_lock(&kqu.kq->kq_lock);
473 }
474
475 static inline void
kqlock_held(__assert_only kqueue_t kqu)476 kqlock_held(__assert_only kqueue_t kqu)
477 {
478 LCK_SPIN_ASSERT(&kqu.kq->kq_lock, LCK_ASSERT_OWNED);
479 }
480
481 static inline void
kqunlock(kqueue_t kqu)482 kqunlock(kqueue_t kqu)
483 {
484 lck_spin_unlock(&kqu.kq->kq_lock);
485 }
486
487 static inline void
knhash_lock(struct filedesc * fdp)488 knhash_lock(struct filedesc *fdp)
489 {
490 lck_mtx_lock(&fdp->fd_knhashlock);
491 }
492
493 static inline void
knhash_unlock(struct filedesc * fdp)494 knhash_unlock(struct filedesc *fdp)
495 {
496 lck_mtx_unlock(&fdp->fd_knhashlock);
497 }
498
499 /* wait event for knote locks */
500 static inline event_t
knote_lock_wev(struct knote * kn)501 knote_lock_wev(struct knote *kn)
502 {
503 return (event_t)(&kn->kn_hook);
504 }
505
506 /* wait event for kevent_register_wait_* */
507 static inline event64_t
knote_filt_wev64(struct knote * kn)508 knote_filt_wev64(struct knote *kn)
509 {
510 /* kdp_workloop_sync_wait_find_owner knows about this */
511 return CAST_EVENT64_T(kn);
512 }
513
514 /* wait event for knote_post/knote_drop */
515 static inline event_t
knote_post_wev(struct knote * kn)516 knote_post_wev(struct knote *kn)
517 {
518 return &kn->kn_kevent;
519 }
520
521 /*!
522 * @function knote_has_qos
523 *
524 * @brief
525 * Whether the knote has a regular QoS.
526 *
527 * @discussion
528 * kn_qos_override is:
529 * - 0 on kqfiles
530 * - THREAD_QOS_LAST for special buckets (manager)
531 *
532 * Other values mean the knote participates to QoS propagation.
533 */
534 static inline bool
knote_has_qos(struct knote * kn)535 knote_has_qos(struct knote *kn)
536 {
537 return kn->kn_qos_override > 0 && kn->kn_qos_override < THREAD_QOS_LAST;
538 }
539
540 #pragma mark knote locks
541
542 /*
543 * Enum used by the knote_lock_* functions.
544 *
545 * KNOTE_KQ_LOCK_ALWAYS
546 * The function will always return with the kq lock held.
547 *
548 * KNOTE_KQ_LOCK_ON_SUCCESS
549 * The function will return with the kq lock held if it was successful
550 * (knote_lock() is the only function that can fail).
551 *
552 * KNOTE_KQ_LOCK_ON_FAILURE
553 * The function will return with the kq lock held if it was unsuccessful
554 * (knote_lock() is the only function that can fail).
555 *
556 * KNOTE_KQ_UNLOCK:
557 * The function returns with the kq unlocked.
558 */
559 enum kqlocking {
560 KNOTE_KQ_LOCK_ALWAYS,
561 KNOTE_KQ_LOCK_ON_SUCCESS,
562 KNOTE_KQ_LOCK_ON_FAILURE,
563 KNOTE_KQ_UNLOCK,
564 };
565
566 static struct knote_lock_ctx *
knote_lock_ctx_find(kqueue_t kqu,struct knote * kn)567 knote_lock_ctx_find(kqueue_t kqu, struct knote *kn)
568 {
569 struct knote_lock_ctx *ctx;
570 LIST_FOREACH(ctx, &kqu.kq->kq_knlocks, knlc_link) {
571 if (ctx->knlc_knote == kn) {
572 return ctx;
573 }
574 }
575 panic("knote lock context not found: %p", kn);
576 __builtin_trap();
577 }
578
579 /* slowpath of knote_lock() */
580 __attribute__((noinline))
581 static bool __result_use_check
knote_lock_slow(kqueue_t kqu,struct knote * kn,struct knote_lock_ctx * knlc,int kqlocking)582 knote_lock_slow(kqueue_t kqu, struct knote *kn,
583 struct knote_lock_ctx *knlc, int kqlocking)
584 {
585 struct knote_lock_ctx *owner_lc;
586 struct uthread *uth = current_uthread();
587 wait_result_t wr;
588
589 kqlock_held(kqu);
590
591 owner_lc = knote_lock_ctx_find(kqu, kn);
592 #if DEBUG || DEVELOPMENT
593 knlc->knlc_state = KNOTE_LOCK_CTX_WAITING;
594 #endif
595 owner_lc->knlc_waiters++;
596
597 /*
598 * Make our lock context visible to knote_unlock()
599 */
600 uth->uu_knlock = knlc;
601
602 wr = lck_spin_sleep_with_inheritor(&kqu.kq->kq_lock, LCK_SLEEP_UNLOCK,
603 knote_lock_wev(kn), owner_lc->knlc_thread,
604 THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER);
605
606 if (wr == THREAD_RESTART) {
607 /*
608 * We haven't been woken up by knote_unlock() but knote_unlock_cancel.
609 * We need to cleanup the state since no one did.
610 */
611 uth->uu_knlock = NULL;
612 #if DEBUG || DEVELOPMENT
613 assert(knlc->knlc_state == KNOTE_LOCK_CTX_WAITING);
614 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
615 #endif
616
617 if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
618 kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
619 kqlock(kqu);
620 }
621 return false;
622 } else {
623 if (kqlocking == KNOTE_KQ_LOCK_ALWAYS ||
624 kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) {
625 kqlock(kqu);
626 #if DEBUG || DEVELOPMENT
627 /*
628 * This state is set under the lock so we can't
629 * really assert this unless we hold the lock.
630 */
631 assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
632 #endif
633 }
634 return true;
635 }
636 }
637
638 /*
639 * Attempts to take the "knote" lock.
640 *
641 * Called with the kqueue lock held.
642 *
643 * Returns true if the knote lock is acquired, false if it has been dropped
644 */
645 static bool __result_use_check
knote_lock(kqueue_t kqu,struct knote * kn,struct knote_lock_ctx * knlc,enum kqlocking kqlocking)646 knote_lock(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc,
647 enum kqlocking kqlocking)
648 {
649 kqlock_held(kqu);
650
651 #if DEBUG || DEVELOPMENT
652 assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED);
653 #endif
654 knlc->knlc_knote = kn;
655 knlc->knlc_thread = current_thread();
656 knlc->knlc_waiters = 0;
657
658 if (__improbable(kn->kn_status & KN_LOCKED)) {
659 return knote_lock_slow(kqu, kn, knlc, kqlocking);
660 }
661
662 /*
663 * When the knote will be dropped, the knote lock is taken before
664 * KN_DROPPING is set, and then the knote will be removed from any
665 * hash table that references it before the lock is canceled.
666 */
667 assert((kn->kn_status & KN_DROPPING) == 0);
668 LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, knlc, knlc_link);
669 kn->kn_status |= KN_LOCKED;
670 #if DEBUG || DEVELOPMENT
671 knlc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
672 #endif
673
674 if (kqlocking == KNOTE_KQ_UNLOCK ||
675 kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) {
676 kqunlock(kqu);
677 }
678 return true;
679 }
680
681 /*
682 * Unlocks a knote successfully locked with knote_lock().
683 *
684 * Called with the kqueue lock held.
685 *
686 * Returns with the kqueue lock held according to KNOTE_KQ_* mode.
687 */
688 static void
knote_unlock(kqueue_t kqu,struct knote * kn,struct knote_lock_ctx * knlc,enum kqlocking kqlocking)689 knote_unlock(kqueue_t kqu, struct knote *kn,
690 struct knote_lock_ctx *knlc, enum kqlocking kqlocking)
691 {
692 kqlock_held(kqu);
693
694 assert(knlc->knlc_knote == kn);
695 assert(kn->kn_status & KN_LOCKED);
696 #if DEBUG || DEVELOPMENT
697 assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED);
698 #endif
699
700 LIST_REMOVE(knlc, knlc_link);
701
702 if (knlc->knlc_waiters) {
703 thread_t thread = THREAD_NULL;
704
705 wakeup_one_with_inheritor(knote_lock_wev(kn), THREAD_AWAKENED,
706 LCK_WAKE_DEFAULT, &thread);
707
708 /*
709 * knote_lock_slow() publishes the lock context of waiters
710 * in uthread::uu_knlock.
711 *
712 * Reach out and make this context the new owner.
713 */
714 struct uthread *ut = get_bsdthread_info(thread);
715 struct knote_lock_ctx *next_owner_lc = ut->uu_knlock;
716
717 assert(next_owner_lc->knlc_knote == kn);
718 next_owner_lc->knlc_waiters = knlc->knlc_waiters - 1;
719 LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, next_owner_lc, knlc_link);
720 #if DEBUG || DEVELOPMENT
721 next_owner_lc->knlc_state = KNOTE_LOCK_CTX_LOCKED;
722 #endif
723 ut->uu_knlock = NULL;
724 thread_deallocate_safe(thread);
725 } else {
726 kn->kn_status &= ~KN_LOCKED;
727 }
728
729 if ((kn->kn_status & KN_MERGE_QOS) && !(kn->kn_status & KN_POSTING)) {
730 /*
731 * No f_event() in flight anymore, we can leave QoS "Merge" mode
732 *
733 * See knote_adjust_qos()
734 */
735 kn->kn_status &= ~KN_MERGE_QOS;
736 }
737 if (kqlocking == KNOTE_KQ_UNLOCK) {
738 kqunlock(kqu);
739 }
740 #if DEBUG || DEVELOPMENT
741 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
742 #endif
743 }
744
745 /*
746 * Aborts all waiters for a knote lock, and unlock the knote.
747 *
748 * Called with the kqueue lock held.
749 *
750 * Returns with the kqueue unlocked.
751 */
752 static void
knote_unlock_cancel(struct kqueue * kq,struct knote * kn,struct knote_lock_ctx * knlc)753 knote_unlock_cancel(struct kqueue *kq, struct knote *kn,
754 struct knote_lock_ctx *knlc)
755 {
756 kqlock_held(kq);
757
758 assert(knlc->knlc_knote == kn);
759 assert(kn->kn_status & KN_LOCKED);
760 assert(kn->kn_status & KN_DROPPING);
761
762 LIST_REMOVE(knlc, knlc_link);
763 kn->kn_status &= ~KN_LOCKED;
764 kqunlock(kq);
765
766 if (knlc->knlc_waiters) {
767 wakeup_all_with_inheritor(knote_lock_wev(kn), THREAD_RESTART);
768 }
769 #if DEBUG || DEVELOPMENT
770 knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED;
771 #endif
772 }
773
774 /*
775 * Call the f_event hook of a given filter.
776 *
777 * Takes a use count to protect against concurrent drops.
778 */
779 static void
knote_post(struct knote * kn,long hint)780 knote_post(struct knote *kn, long hint)
781 {
782 struct kqueue *kq = knote_get_kq(kn);
783 int dropping, result;
784
785 kqlock(kq);
786
787 /*
788 * The select fallback is special, if KNOTE() is called,
789 * the contract is that kn->kn_hook _HAS_ to become NULL.
790 *
791 * the f_event() hook might not be called if we're dropping,
792 * so we hardcode it here, which is a little distasteful,
793 * but the select fallback is kinda magical in the first place.
794 */
795 if (kn->kn_filtid == EVFILTID_SPEC) {
796 kn->kn_hook = NULL;
797 }
798
799 if (__improbable(kn->kn_status & (KN_DROPPING | KN_VANISHED))) {
800 return kqunlock(kq);
801 }
802
803 if (__improbable(kn->kn_status & KN_POSTING)) {
804 panic("KNOTE() called concurrently on knote %p", kn);
805 }
806
807 kn->kn_status |= KN_POSTING;
808
809 kqunlock(kq);
810 result = filter_call(knote_fops(kn), f_event(kn, hint));
811 kqlock(kq);
812
813 /* Someone dropped the knote/the monitored object vanished while we
814 * were in f_event, swallow the side effects of the post.
815 */
816 dropping = (kn->kn_status & (KN_DROPPING | KN_VANISHED));
817
818 if (!dropping && (result & FILTER_ADJUST_EVENT_IOTIER_BIT)) {
819 kqueue_update_iotier_override(kq);
820 }
821
822 if (!dropping && (result & FILTER_ACTIVE)) {
823 knote_activate(kq, kn, result);
824 }
825
826 if ((kn->kn_status & KN_LOCKED) == 0) {
827 /*
828 * There's no other f_* call in flight, we can leave QoS "Merge" mode.
829 *
830 * See knote_adjust_qos()
831 */
832 kn->kn_status &= ~(KN_POSTING | KN_MERGE_QOS);
833 } else {
834 kn->kn_status &= ~KN_POSTING;
835 }
836
837 if (__improbable(dropping)) {
838 thread_wakeup(knote_post_wev(kn));
839 }
840
841 kqunlock(kq);
842 }
843
844 /*
845 * Called by knote_drop() and knote_fdclose() to wait for the last f_event()
846 * caller to be done.
847 *
848 * - kq locked at entry
849 * - kq unlocked at exit
850 */
851 static void
knote_wait_for_post(struct kqueue * kq,struct knote * kn)852 knote_wait_for_post(struct kqueue *kq, struct knote *kn)
853 {
854 kqlock_held(kq);
855
856 assert(kn->kn_status & (KN_DROPPING | KN_VANISHED));
857
858 if (kn->kn_status & KN_POSTING) {
859 lck_spin_sleep(&kq->kq_lock, LCK_SLEEP_UNLOCK, knote_post_wev(kn),
860 THREAD_UNINT | THREAD_WAIT_NOREPORT);
861 } else {
862 kqunlock(kq);
863 }
864 }
865
866 #pragma mark knote helpers for filters
867
868 OS_ALWAYS_INLINE
869 void
knote_set_error(struct knote * kn,int error)870 knote_set_error(struct knote *kn, int error)
871 {
872 kn->kn_flags |= EV_ERROR;
873 kn->kn_sdata = error;
874 }
875
876 OS_ALWAYS_INLINE
877 int64_t
knote_low_watermark(const struct knote * kn)878 knote_low_watermark(const struct knote *kn)
879 {
880 return (kn->kn_sfflags & NOTE_LOWAT) ? kn->kn_sdata : 1;
881 }
882
883 /*!
884 * @function knote_fill_kevent_with_sdata
885 *
886 * @brief
887 * Fills in a kevent from the current content of a knote.
888 *
889 * @discussion
890 * This is meant to be called from filter's f_event hooks.
891 * The kevent data is filled with kn->kn_sdata.
892 *
893 * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
894 *
895 * Using knote_fill_kevent is typically preferred.
896 */
897 OS_ALWAYS_INLINE
898 void
knote_fill_kevent_with_sdata(struct knote * kn,struct kevent_qos_s * kev)899 knote_fill_kevent_with_sdata(struct knote *kn, struct kevent_qos_s *kev)
900 {
901 #define knote_assert_aliases(name1, offs1, name2) \
902 static_assert(offsetof(struct kevent_qos_s, name1) + offs1 == \
903 offsetof(struct kevent_internal_s, name2), \
904 "kevent_qos_s::" #name1 " and kevent_internal_s::" #name2 "need to alias")
905 /*
906 * All the code makes assumptions on these aliasing,
907 * so make sure we fail the build if we ever ever ever break them.
908 */
909 knote_assert_aliases(ident, 0, kei_ident);
910 #ifdef __LITTLE_ENDIAN__
911 knote_assert_aliases(filter, 0, kei_filter); // non trivial overlap
912 knote_assert_aliases(filter, 1, kei_filtid); // non trivial overlap
913 #else
914 knote_assert_aliases(filter, 0, kei_filtid); // non trivial overlap
915 knote_assert_aliases(filter, 1, kei_filter); // non trivial overlap
916 #endif
917 knote_assert_aliases(flags, 0, kei_flags);
918 knote_assert_aliases(qos, 0, kei_qos);
919 knote_assert_aliases(udata, 0, kei_udata);
920 knote_assert_aliases(fflags, 0, kei_fflags);
921 knote_assert_aliases(xflags, 0, kei_sfflags); // non trivial overlap
922 knote_assert_aliases(data, 0, kei_sdata); // non trivial overlap
923 knote_assert_aliases(ext, 0, kei_ext);
924 #undef knote_assert_aliases
925
926 /*
927 * Fix the differences between kevent_qos_s and kevent_internal_s:
928 * - xflags is where kn_sfflags lives, we need to zero it
929 * - fixup the high bits of `filter` where kn_filtid lives
930 */
931 *kev = *(struct kevent_qos_s *)&kn->kn_kevent;
932 kev->xflags = 0;
933 kev->filter |= 0xff00;
934 if (kn->kn_flags & EV_CLEAR) {
935 kn->kn_fflags = 0;
936 }
937 }
938
939 /*!
940 * @function knote_fill_kevent
941 *
942 * @brief
943 * Fills in a kevent from the current content of a knote.
944 *
945 * @discussion
946 * This is meant to be called from filter's f_event hooks.
947 * The kevent data is filled with the passed in data.
948 *
949 * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
950 */
951 OS_ALWAYS_INLINE
952 void
knote_fill_kevent(struct knote * kn,struct kevent_qos_s * kev,int64_t data)953 knote_fill_kevent(struct knote *kn, struct kevent_qos_s *kev, int64_t data)
954 {
955 knote_fill_kevent_with_sdata(kn, kev);
956 kev->filter = kn->kn_filter;
957 kev->data = data;
958 }
959
960
961 #pragma mark file_filtops
962
963 static int
filt_fileattach(struct knote * kn,struct kevent_qos_s * kev)964 filt_fileattach(struct knote *kn, struct kevent_qos_s *kev)
965 {
966 return fo_kqfilter(kn->kn_fp, kn, kev);
967 }
968
969 SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = {
970 .f_isfd = 1,
971 .f_attach = filt_fileattach,
972 };
973
974 #pragma mark kqread_filtops
975
976 #define f_flag fp_glob->fg_flag
977 #define f_ops fp_glob->fg_ops
978 #define f_lflags fp_glob->fg_lflags
979
980 static void
filt_kqdetach(struct knote * kn)981 filt_kqdetach(struct knote *kn)
982 {
983 struct kqfile *kqf = (struct kqfile *)fp_get_data(kn->kn_fp);
984 struct kqueue *kq = &kqf->kqf_kqueue;
985
986 kqlock(kq);
987 KNOTE_DETACH(&kqf->kqf_sel.si_note, kn);
988 kqunlock(kq);
989 }
990
991 static int
filt_kqueue(struct knote * kn,__unused long hint)992 filt_kqueue(struct knote *kn, __unused long hint)
993 {
994 struct kqueue *kq = (struct kqueue *)fp_get_data(kn->kn_fp);
995
996 return kq->kq_count > 0;
997 }
998
999 static int
filt_kqtouch(struct knote * kn,struct kevent_qos_s * kev)1000 filt_kqtouch(struct knote *kn, struct kevent_qos_s *kev)
1001 {
1002 #pragma unused(kev)
1003 struct kqueue *kq = (struct kqueue *)fp_get_data(kn->kn_fp);
1004 int res;
1005
1006 kqlock(kq);
1007 res = (kq->kq_count > 0);
1008 kqunlock(kq);
1009
1010 return res;
1011 }
1012
1013 static int
filt_kqprocess(struct knote * kn,struct kevent_qos_s * kev)1014 filt_kqprocess(struct knote *kn, struct kevent_qos_s *kev)
1015 {
1016 struct kqueue *kq = (struct kqueue *)fp_get_data(kn->kn_fp);
1017 int res = 0;
1018
1019 kqlock(kq);
1020 if (kq->kq_count) {
1021 knote_fill_kevent(kn, kev, kq->kq_count);
1022 res = 1;
1023 }
1024 kqunlock(kq);
1025
1026 return res;
1027 }
1028
1029 SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = {
1030 .f_isfd = 1,
1031 .f_detach = filt_kqdetach,
1032 .f_event = filt_kqueue,
1033 .f_touch = filt_kqtouch,
1034 .f_process = filt_kqprocess,
1035 };
1036
1037 #pragma mark proc_filtops
1038
1039 static int
filt_procattach(struct knote * kn,__unused struct kevent_qos_s * kev)1040 filt_procattach(struct knote *kn, __unused struct kevent_qos_s *kev)
1041 {
1042 struct proc *p;
1043
1044 assert(PID_MAX < NOTE_PDATAMASK);
1045
1046 if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) {
1047 knote_set_error(kn, ENOTSUP);
1048 return 0;
1049 }
1050
1051 p = proc_find((int)kn->kn_id);
1052 if (p == NULL) {
1053 knote_set_error(kn, ESRCH);
1054 return 0;
1055 }
1056
1057 const uint32_t NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
1058
1059 if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits) {
1060 do {
1061 pid_t selfpid = proc_selfpid();
1062
1063 if (p->p_ppid == selfpid) {
1064 break; /* parent => ok */
1065 }
1066 if ((p->p_lflag & P_LTRACED) != 0 &&
1067 (p->p_oppid == selfpid)) {
1068 break; /* parent-in-waiting => ok */
1069 }
1070 if (cansignal(current_proc(), kauth_cred_get(), p, SIGKILL)) {
1071 break; /* allowed to signal => ok */
1072 }
1073 proc_rele(p);
1074 knote_set_error(kn, EACCES);
1075 return 0;
1076 } while (0);
1077 }
1078
1079 kn->kn_proc = p;
1080 kn->kn_flags |= EV_CLEAR; /* automatically set */
1081 kn->kn_sdata = 0; /* incoming data is ignored */
1082
1083 proc_klist_lock();
1084
1085 KNOTE_ATTACH(&p->p_klist, kn);
1086
1087 proc_klist_unlock();
1088
1089 proc_rele(p);
1090
1091 /*
1092 * only captures edge-triggered events after this point
1093 * so it can't already be fired.
1094 */
1095 return 0;
1096 }
1097
1098
1099 /*
1100 * The knote may be attached to a different process, which may exit,
1101 * leaving nothing for the knote to be attached to. In that case,
1102 * the pointer to the process will have already been nulled out.
1103 */
1104 static void
filt_procdetach(struct knote * kn)1105 filt_procdetach(struct knote *kn)
1106 {
1107 struct proc *p;
1108
1109 proc_klist_lock();
1110
1111 p = kn->kn_proc;
1112 if (p != PROC_NULL) {
1113 kn->kn_proc = PROC_NULL;
1114 KNOTE_DETACH(&p->p_klist, kn);
1115 }
1116
1117 proc_klist_unlock();
1118 }
1119
1120 static int
filt_procevent(struct knote * kn,long hint)1121 filt_procevent(struct knote *kn, long hint)
1122 {
1123 u_int event;
1124
1125 /* ALWAYS CALLED WITH proc_klist_lock */
1126
1127 /*
1128 * Note: a lot of bits in hint may be obtained from the knote
1129 * To free some of those bits, see <rdar://problem/12592988> Freeing up
1130 * bits in hint for filt_procevent
1131 *
1132 * mask off extra data
1133 */
1134 event = (u_int)hint & NOTE_PCTRLMASK;
1135
1136 /*
1137 * termination lifecycle events can happen while a debugger
1138 * has reparented a process, in which case notifications
1139 * should be quashed except to the tracing parent. When
1140 * the debugger reaps the child (either via wait4(2) or
1141 * process exit), the child will be reparented to the original
1142 * parent and these knotes re-fired.
1143 */
1144 if (event & NOTE_EXIT) {
1145 if ((kn->kn_proc->p_oppid != 0)
1146 && (proc_getpid(knote_get_kq(kn)->kq_p) != kn->kn_proc->p_ppid)) {
1147 /*
1148 * This knote is not for the current ptrace(2) parent, ignore.
1149 */
1150 return 0;
1151 }
1152 }
1153
1154 /*
1155 * if the user is interested in this event, record it.
1156 */
1157 if (kn->kn_sfflags & event) {
1158 kn->kn_fflags |= event;
1159 }
1160
1161 #pragma clang diagnostic push
1162 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1163 if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) {
1164 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1165 }
1166 #pragma clang diagnostic pop
1167
1168
1169 /*
1170 * The kernel has a wrapper in place that returns the same data
1171 * as is collected here, in kn_hook32. Any changes to how
1172 * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected
1173 * should also be reflected in the proc_pidnoteexit() wrapper.
1174 */
1175 if (event == NOTE_EXIT) {
1176 kn->kn_hook32 = 0;
1177 if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) {
1178 kn->kn_fflags |= NOTE_EXITSTATUS;
1179 kn->kn_hook32 |= (hint & NOTE_PDATAMASK);
1180 }
1181 if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) {
1182 kn->kn_fflags |= NOTE_EXIT_DETAIL;
1183 if ((kn->kn_proc->p_lflag &
1184 P_LTERM_DECRYPTFAIL) != 0) {
1185 kn->kn_hook32 |= NOTE_EXIT_DECRYPTFAIL;
1186 }
1187 if ((kn->kn_proc->p_lflag &
1188 P_LTERM_JETSAM) != 0) {
1189 kn->kn_hook32 |= NOTE_EXIT_MEMORY;
1190 switch (kn->kn_proc->p_lflag & P_JETSAM_MASK) {
1191 case P_JETSAM_VMPAGESHORTAGE:
1192 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE;
1193 break;
1194 case P_JETSAM_VMTHRASHING:
1195 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMTHRASHING;
1196 break;
1197 case P_JETSAM_FCTHRASHING:
1198 kn->kn_hook32 |= NOTE_EXIT_MEMORY_FCTHRASHING;
1199 break;
1200 case P_JETSAM_VNODE:
1201 kn->kn_hook32 |= NOTE_EXIT_MEMORY_VNODE;
1202 break;
1203 case P_JETSAM_HIWAT:
1204 kn->kn_hook32 |= NOTE_EXIT_MEMORY_HIWAT;
1205 break;
1206 case P_JETSAM_PID:
1207 kn->kn_hook32 |= NOTE_EXIT_MEMORY_PID;
1208 break;
1209 case P_JETSAM_IDLEEXIT:
1210 kn->kn_hook32 |= NOTE_EXIT_MEMORY_IDLE;
1211 break;
1212 }
1213 }
1214 if ((proc_getcsflags(kn->kn_proc) &
1215 CS_KILLED) != 0) {
1216 kn->kn_hook32 |= NOTE_EXIT_CSERROR;
1217 }
1218 }
1219 }
1220
1221 /* if we have any matching state, activate the knote */
1222 return kn->kn_fflags != 0;
1223 }
1224
1225 static int
filt_proctouch(struct knote * kn,struct kevent_qos_s * kev)1226 filt_proctouch(struct knote *kn, struct kevent_qos_s *kev)
1227 {
1228 int res;
1229
1230 proc_klist_lock();
1231
1232 /* accept new filter flags and mask off output events no long interesting */
1233 kn->kn_sfflags = kev->fflags;
1234
1235 /* restrict the current results to the (smaller?) set of new interest */
1236 /*
1237 * For compatibility with previous implementations, we leave kn_fflags
1238 * as they were before.
1239 */
1240 //kn->kn_fflags &= kn->kn_sfflags;
1241
1242 res = (kn->kn_fflags != 0);
1243
1244 proc_klist_unlock();
1245
1246 return res;
1247 }
1248
1249 static int
filt_procprocess(struct knote * kn,struct kevent_qos_s * kev)1250 filt_procprocess(struct knote *kn, struct kevent_qos_s *kev)
1251 {
1252 int res = 0;
1253
1254 proc_klist_lock();
1255 if (kn->kn_fflags) {
1256 knote_fill_kevent(kn, kev, kn->kn_hook32);
1257 kn->kn_hook32 = 0;
1258 res = 1;
1259 }
1260 proc_klist_unlock();
1261 return res;
1262 }
1263
1264 SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = {
1265 .f_attach = filt_procattach,
1266 .f_detach = filt_procdetach,
1267 .f_event = filt_procevent,
1268 .f_touch = filt_proctouch,
1269 .f_process = filt_procprocess,
1270 };
1271
1272 #pragma mark timer_filtops
1273
1274 struct filt_timer_params {
1275 uint64_t deadline; /* deadline in abs/cont time
1276 * (or 0 if NOTE_ABSOLUTE and deadline is in past) */
1277 uint64_t leeway; /* leeway in abstime, or 0 if none */
1278 uint64_t interval; /* interval in abstime or 0 if non-repeating timer */
1279 };
1280
1281 /*
1282 * Values stored in the knote at rest (using Mach absolute time units)
1283 *
1284 * kn->kn_thcall where the thread_call object is stored
1285 * kn->kn_ext[0] next deadline or 0 if immediate expiration
1286 * kn->kn_ext[1] leeway value
1287 * kn->kn_sdata interval timer: the interval
1288 * absolute/deadline timer: 0
1289 * kn->kn_hook32 timer state (with gencount)
1290 *
1291 * TIMER_IDLE:
1292 * The timer has either never been scheduled or been cancelled.
1293 * It is safe to schedule a new one in this state.
1294 *
1295 * TIMER_ARMED:
1296 * The timer has been scheduled
1297 *
1298 * TIMER_FIRED
1299 * The timer has fired and an event needs to be delivered.
1300 * When in this state, the callout may still be running.
1301 *
1302 * TIMER_IMMEDIATE
1303 * The timer has fired at registration time, and the callout was never
1304 * dispatched.
1305 */
1306 #define TIMER_IDLE 0x0
1307 #define TIMER_ARMED 0x1
1308 #define TIMER_FIRED 0x2
1309 #define TIMER_IMMEDIATE 0x3
1310 #define TIMER_STATE_MASK 0x3
1311 #define TIMER_GEN_INC 0x4
1312
1313 static void
filt_timer_set_params(struct knote * kn,struct filt_timer_params * params)1314 filt_timer_set_params(struct knote *kn, struct filt_timer_params *params)
1315 {
1316 kn->kn_ext[0] = params->deadline;
1317 kn->kn_ext[1] = params->leeway;
1318 kn->kn_sdata = params->interval;
1319 }
1320
1321 /*
1322 * filt_timervalidate - process data from user
1323 *
1324 * Sets up the deadline, interval, and leeway from the provided user data
1325 *
1326 * Input:
1327 * kn_sdata timer deadline or interval time
1328 * kn_sfflags style of timer, unit of measurement
1329 *
1330 * Output:
1331 * struct filter_timer_params to apply to the filter with
1332 * filt_timer_set_params when changes are ready to be commited.
1333 *
1334 * Returns:
1335 * EINVAL Invalid user data parameters
1336 * ERANGE Various overflows with the parameters
1337 *
1338 * Called with timer filter lock held.
1339 */
1340 static int
filt_timervalidate(const struct kevent_qos_s * kev,struct filt_timer_params * params)1341 filt_timervalidate(const struct kevent_qos_s *kev,
1342 struct filt_timer_params *params)
1343 {
1344 /*
1345 * There are 5 knobs that need to be chosen for a timer registration:
1346 *
1347 * A) Units of time (what is the time duration of the specified number)
1348 * Absolute and interval take:
1349 * NOTE_SECONDS, NOTE_USECONDS, NOTE_NSECONDS, NOTE_MACHTIME
1350 * Defaults to milliseconds if not specified
1351 *
1352 * B) Clock epoch (what is the zero point of the specified number)
1353 * For interval, there is none
1354 * For absolute, defaults to the gettimeofday/calendar epoch
1355 * With NOTE_MACHTIME, uses mach_absolute_time()
1356 * With NOTE_MACHTIME and NOTE_MACH_CONTINUOUS_TIME, uses mach_continuous_time()
1357 *
1358 * C) The knote's behavior on delivery
1359 * Interval timer causes the knote to arm for the next interval unless one-shot is set
1360 * Absolute is a forced one-shot timer which deletes on delivery
1361 * TODO: Add a way for absolute to be not forced one-shot
1362 *
1363 * D) Whether the time duration is relative to now or absolute
1364 * Interval fires at now + duration when it is set up
1365 * Absolute fires at now + difference between now walltime and passed in walltime
1366 * With NOTE_MACHTIME it fires at an absolute MAT or MCT.
1367 *
1368 * E) Whether the timer continues to tick across sleep
1369 * By default all three do not.
1370 * For interval and absolute, NOTE_MACH_CONTINUOUS_TIME causes them to tick across sleep
1371 * With NOTE_ABSOLUTE | NOTE_MACHTIME | NOTE_MACH_CONTINUOUS_TIME:
1372 * expires when mach_continuous_time() is > the passed in value.
1373 */
1374
1375 uint64_t multiplier;
1376
1377 boolean_t use_abstime = FALSE;
1378
1379 switch (kev->fflags & (NOTE_SECONDS | NOTE_USECONDS | NOTE_NSECONDS | NOTE_MACHTIME)) {
1380 case NOTE_SECONDS:
1381 multiplier = NSEC_PER_SEC;
1382 break;
1383 case NOTE_USECONDS:
1384 multiplier = NSEC_PER_USEC;
1385 break;
1386 case NOTE_NSECONDS:
1387 multiplier = 1;
1388 break;
1389 case NOTE_MACHTIME:
1390 multiplier = 0;
1391 use_abstime = TRUE;
1392 break;
1393 case 0: /* milliseconds (default) */
1394 multiplier = NSEC_PER_SEC / 1000;
1395 break;
1396 default:
1397 return EINVAL;
1398 }
1399
1400 /* transform the leeway in kn_ext[1] to same time scale */
1401 if (kev->fflags & NOTE_LEEWAY) {
1402 uint64_t leeway_abs;
1403
1404 if (use_abstime) {
1405 leeway_abs = (uint64_t)kev->ext[1];
1406 } else {
1407 uint64_t leeway_ns;
1408 if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns)) {
1409 return ERANGE;
1410 }
1411
1412 nanoseconds_to_absolutetime(leeway_ns, &leeway_abs);
1413 }
1414
1415 params->leeway = leeway_abs;
1416 } else {
1417 params->leeway = 0;
1418 }
1419
1420 if (kev->fflags & NOTE_ABSOLUTE) {
1421 uint64_t deadline_abs;
1422
1423 if (use_abstime) {
1424 deadline_abs = (uint64_t)kev->data;
1425 } else {
1426 uint64_t calendar_deadline_ns;
1427
1428 if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns)) {
1429 return ERANGE;
1430 }
1431
1432 /* calendar_deadline_ns is in nanoseconds since the epoch */
1433
1434 clock_sec_t seconds;
1435 clock_nsec_t nanoseconds;
1436
1437 /*
1438 * Note that the conversion through wall-time is only done once.
1439 *
1440 * If the relationship between MAT and gettimeofday changes,
1441 * the underlying timer does not update.
1442 *
1443 * TODO: build a wall-time denominated timer_call queue
1444 * and a flag to request DTRTing with wall-time timers
1445 */
1446 clock_get_calendar_nanotime(&seconds, &nanoseconds);
1447
1448 uint64_t calendar_now_ns = (uint64_t)seconds * NSEC_PER_SEC + nanoseconds;
1449
1450 /* if deadline is in the future */
1451 if (calendar_now_ns < calendar_deadline_ns) {
1452 uint64_t interval_ns = calendar_deadline_ns - calendar_now_ns;
1453 uint64_t interval_abs;
1454
1455 nanoseconds_to_absolutetime(interval_ns, &interval_abs);
1456
1457 /*
1458 * Note that the NOTE_MACH_CONTINUOUS_TIME flag here only
1459 * causes the timer to keep ticking across sleep, but
1460 * it does not change the calendar timebase.
1461 */
1462
1463 if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
1464 clock_continuoustime_interval_to_deadline(interval_abs,
1465 &deadline_abs);
1466 } else {
1467 clock_absolutetime_interval_to_deadline(interval_abs,
1468 &deadline_abs);
1469 }
1470 } else {
1471 deadline_abs = 0; /* cause immediate expiration */
1472 }
1473 }
1474
1475 params->deadline = deadline_abs;
1476 params->interval = 0; /* NOTE_ABSOLUTE is non-repeating */
1477 } else if (kev->data < 0) {
1478 /*
1479 * Negative interval timers fire immediately, once.
1480 *
1481 * Ideally a negative interval would be an error, but certain clients
1482 * pass negative values on accident, and expect an event back.
1483 *
1484 * In the old implementation the timer would repeat with no delay
1485 * N times until mach_absolute_time() + (N * interval) underflowed,
1486 * then it would wait ~forever by accidentally arming a timer for the far future.
1487 *
1488 * We now skip the power-wasting hot spin phase and go straight to the idle phase.
1489 */
1490
1491 params->deadline = 0; /* expire immediately */
1492 params->interval = 0; /* non-repeating */
1493 } else {
1494 uint64_t interval_abs = 0;
1495
1496 if (use_abstime) {
1497 interval_abs = (uint64_t)kev->data;
1498 } else {
1499 uint64_t interval_ns;
1500 if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns)) {
1501 return ERANGE;
1502 }
1503
1504 nanoseconds_to_absolutetime(interval_ns, &interval_abs);
1505 }
1506
1507 uint64_t deadline = 0;
1508
1509 if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) {
1510 clock_continuoustime_interval_to_deadline(interval_abs, &deadline);
1511 } else {
1512 clock_absolutetime_interval_to_deadline(interval_abs, &deadline);
1513 }
1514
1515 params->deadline = deadline;
1516 params->interval = interval_abs;
1517 }
1518
1519 return 0;
1520 }
1521
1522 /*
1523 * filt_timerexpire - the timer callout routine
1524 */
1525 static void
filt_timerexpire(void * knx,void * state_on_arm)1526 filt_timerexpire(void *knx, void *state_on_arm)
1527 {
1528 struct knote *kn = knx;
1529
1530 uint32_t state = (uint32_t)(uintptr_t)state_on_arm;
1531 uint32_t fired_state = state ^ TIMER_ARMED ^ TIMER_FIRED;
1532
1533 if (os_atomic_cmpxchg(&kn->kn_hook32, state, fired_state, relaxed)) {
1534 // our f_event always would say FILTER_ACTIVE,
1535 // so be leaner and just do it.
1536 struct kqueue *kq = knote_get_kq(kn);
1537 kqlock(kq);
1538 knote_activate(kq, kn, FILTER_ACTIVE);
1539 kqunlock(kq);
1540 } else {
1541 /*
1542 * The timer has been reprogrammed or canceled since it was armed,
1543 * and this is a late firing for the timer, just ignore it.
1544 */
1545 }
1546 }
1547
1548 /*
1549 * Does this deadline needs a timer armed for it, or has it expired?
1550 */
1551 static bool
filt_timer_is_ready(struct knote * kn)1552 filt_timer_is_ready(struct knote *kn)
1553 {
1554 uint64_t now, deadline = kn->kn_ext[0];
1555
1556 if (deadline == 0) {
1557 return true;
1558 }
1559
1560 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1561 now = mach_continuous_time();
1562 } else {
1563 now = mach_absolute_time();
1564 }
1565 return deadline <= now;
1566 }
1567
1568 /*
1569 * Arm a timer
1570 *
1571 * It is the responsibility of the caller to make sure the timer call
1572 * has completed or been cancelled properly prior to arming it.
1573 */
1574 static void
filt_timerarm(struct knote * kn)1575 filt_timerarm(struct knote *kn)
1576 {
1577 uint64_t deadline = kn->kn_ext[0];
1578 uint64_t leeway = kn->kn_ext[1];
1579 uint32_t state;
1580
1581 int filter_flags = kn->kn_sfflags;
1582 unsigned int timer_flags = 0;
1583
1584 if (filter_flags & NOTE_CRITICAL) {
1585 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL;
1586 } else if (filter_flags & NOTE_BACKGROUND) {
1587 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND;
1588 } else {
1589 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL;
1590 }
1591
1592 if (filter_flags & NOTE_LEEWAY) {
1593 timer_flags |= THREAD_CALL_DELAY_LEEWAY;
1594 }
1595
1596 if (filter_flags & NOTE_MACH_CONTINUOUS_TIME) {
1597 timer_flags |= THREAD_CALL_CONTINUOUS;
1598 }
1599
1600 /*
1601 * Move to ARMED.
1602 *
1603 * We increase the gencount, and setup the thread call with this expected
1604 * state. It means that if there was a previous generation of the timer in
1605 * flight that needs to be ignored, then 3 things are possible:
1606 *
1607 * - the timer fires first, filt_timerexpire() and sets the state to FIRED
1608 * but we clobber it with ARMED and a new gencount. The knote will still
1609 * be activated, but filt_timerprocess() which is serialized with this
1610 * call will not see the FIRED bit set and will not deliver an event.
1611 *
1612 * - this code runs first, but filt_timerexpire() comes second. Because it
1613 * knows an old gencount, it will debounce and not activate the knote.
1614 *
1615 * - filt_timerexpire() wasn't in flight yet, and thread_call_enter below
1616 * will just cancel it properly.
1617 *
1618 * This is important as userspace expects to never be woken up for past
1619 * timers after filt_timertouch ran.
1620 */
1621 state = os_atomic_load(&kn->kn_hook32, relaxed);
1622 state &= ~TIMER_STATE_MASK;
1623 state += TIMER_GEN_INC + TIMER_ARMED;
1624 os_atomic_store(&kn->kn_hook32, state, relaxed);
1625
1626 thread_call_enter_delayed_with_leeway(kn->kn_thcall,
1627 (void *)(uintptr_t)state, deadline, leeway, timer_flags);
1628 }
1629
1630 /*
1631 * Mark a timer as "already fired" when it is being reprogrammed
1632 *
1633 * If there is a timer in flight, this will do a best effort at canceling it,
1634 * but will not wait. If the thread call was in flight, having set the
1635 * TIMER_IMMEDIATE bit will debounce a filt_timerexpire() racing with this
1636 * cancelation.
1637 */
1638 static void
filt_timerfire_immediate(struct knote * kn)1639 filt_timerfire_immediate(struct knote *kn)
1640 {
1641 uint32_t state;
1642
1643 static_assert(TIMER_IMMEDIATE == TIMER_STATE_MASK,
1644 "validate that this atomic or will transition to IMMEDIATE");
1645 state = os_atomic_or_orig(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
1646
1647 if ((state & TIMER_STATE_MASK) == TIMER_ARMED) {
1648 thread_call_cancel(kn->kn_thcall);
1649 }
1650 }
1651
1652 /*
1653 * Allocate a thread call for the knote's lifetime, and kick off the timer.
1654 */
1655 static int
filt_timerattach(struct knote * kn,struct kevent_qos_s * kev)1656 filt_timerattach(struct knote *kn, struct kevent_qos_s *kev)
1657 {
1658 thread_call_t callout;
1659 struct filt_timer_params params;
1660 int error;
1661
1662 if ((error = filt_timervalidate(kev, ¶ms)) != 0) {
1663 knote_set_error(kn, error);
1664 return 0;
1665 }
1666
1667 callout = thread_call_allocate_with_options(filt_timerexpire,
1668 (thread_call_param_t)kn, THREAD_CALL_PRIORITY_HIGH,
1669 THREAD_CALL_OPTIONS_ONCE);
1670
1671 if (NULL == callout) {
1672 knote_set_error(kn, ENOMEM);
1673 return 0;
1674 }
1675
1676 filt_timer_set_params(kn, ¶ms);
1677 kn->kn_thcall = callout;
1678 kn->kn_flags |= EV_CLEAR;
1679 os_atomic_store(&kn->kn_hook32, TIMER_IDLE, relaxed);
1680
1681 /* NOTE_ABSOLUTE implies EV_ONESHOT */
1682 if (kn->kn_sfflags & NOTE_ABSOLUTE) {
1683 kn->kn_flags |= EV_ONESHOT;
1684 }
1685
1686 if (filt_timer_is_ready(kn)) {
1687 os_atomic_store(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed);
1688 return FILTER_ACTIVE;
1689 } else {
1690 filt_timerarm(kn);
1691 return 0;
1692 }
1693 }
1694
1695 /*
1696 * Shut down the timer if it's running, and free the callout.
1697 */
1698 static void
filt_timerdetach(struct knote * kn)1699 filt_timerdetach(struct knote *kn)
1700 {
1701 __assert_only boolean_t freed;
1702
1703 /*
1704 * Unconditionally cancel to make sure there can't be any filt_timerexpire()
1705 * running anymore.
1706 */
1707 thread_call_cancel_wait(kn->kn_thcall);
1708 freed = thread_call_free(kn->kn_thcall);
1709 assert(freed);
1710 }
1711
1712 /*
1713 * filt_timertouch - update timer knote with new user input
1714 *
1715 * Cancel and restart the timer based on new user data. When
1716 * the user picks up a knote, clear the count of how many timer
1717 * pops have gone off (in kn_data).
1718 */
1719 static int
filt_timertouch(struct knote * kn,struct kevent_qos_s * kev)1720 filt_timertouch(struct knote *kn, struct kevent_qos_s *kev)
1721 {
1722 struct filt_timer_params params;
1723 uint32_t changed_flags = (kn->kn_sfflags ^ kev->fflags);
1724 int error;
1725
1726 if (kev->qos && (knote_get_kq(kn)->kq_state & KQ_WORKLOOP) &&
1727 !_pthread_priority_thread_qos(kev->qos)) {
1728 /* validate usage of FILTER_UPDATE_REQ_QOS */
1729 kev->flags |= EV_ERROR;
1730 kev->data = ERANGE;
1731 return 0;
1732 }
1733
1734 if (changed_flags & NOTE_ABSOLUTE) {
1735 kev->flags |= EV_ERROR;
1736 kev->data = EINVAL;
1737 return 0;
1738 }
1739
1740 if ((error = filt_timervalidate(kev, ¶ms)) != 0) {
1741 kev->flags |= EV_ERROR;
1742 kev->data = error;
1743 return 0;
1744 }
1745
1746 /* capture the new values used to compute deadline */
1747 filt_timer_set_params(kn, ¶ms);
1748 kn->kn_sfflags = kev->fflags;
1749
1750 if (filt_timer_is_ready(kn)) {
1751 filt_timerfire_immediate(kn);
1752 return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS;
1753 } else {
1754 filt_timerarm(kn);
1755 return FILTER_UPDATE_REQ_QOS;
1756 }
1757 }
1758
1759 /*
1760 * filt_timerprocess - query state of knote and snapshot event data
1761 *
1762 * Determine if the timer has fired in the past, snapshot the state
1763 * of the kevent for returning to user-space, and clear pending event
1764 * counters for the next time.
1765 */
1766 static int
filt_timerprocess(struct knote * kn,struct kevent_qos_s * kev)1767 filt_timerprocess(struct knote *kn, struct kevent_qos_s *kev)
1768 {
1769 uint32_t state = os_atomic_load(&kn->kn_hook32, relaxed);
1770
1771 /*
1772 * filt_timerprocess is serialized with any filter routine except for
1773 * filt_timerexpire which atomically does a TIMER_ARMED -> TIMER_FIRED
1774 * transition, and on success, activates the knote.
1775 *
1776 * Hence, we don't need atomic modifications of the state, only to peek at
1777 * whether we see any of the "FIRED" state, and if we do, it is safe to
1778 * do simple state machine transitions.
1779 */
1780 switch (state & TIMER_STATE_MASK) {
1781 case TIMER_IDLE:
1782 case TIMER_ARMED:
1783 /*
1784 * This can happen if a touch resets a timer that had fired
1785 * without being processed
1786 */
1787 return 0;
1788 }
1789
1790 os_atomic_store(&kn->kn_hook32, state & ~TIMER_STATE_MASK, relaxed);
1791
1792 /*
1793 * Copy out the interesting kevent state,
1794 * but don't leak out the raw time calculations.
1795 *
1796 * TODO: potential enhancements - tell the user about:
1797 * - deadline to which this timer thought it was expiring
1798 * - return kn_sfflags in the fflags field so the client can know
1799 * under what flags the timer fired
1800 */
1801 knote_fill_kevent(kn, kev, 1);
1802 kev->ext[0] = 0;
1803 /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */
1804
1805 if (kn->kn_sdata != 0) {
1806 /*
1807 * This is a 'repeating' timer, so we have to emit
1808 * how many intervals expired between the arm
1809 * and the process.
1810 *
1811 * A very strange style of interface, because
1812 * this could easily be done in the client...
1813 */
1814
1815 uint64_t now;
1816
1817 if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) {
1818 now = mach_continuous_time();
1819 } else {
1820 now = mach_absolute_time();
1821 }
1822
1823 uint64_t first_deadline = kn->kn_ext[0];
1824 uint64_t interval_abs = kn->kn_sdata;
1825 uint64_t orig_arm_time = first_deadline - interval_abs;
1826
1827 assert(now > orig_arm_time);
1828 assert(now > first_deadline);
1829
1830 uint64_t elapsed = now - orig_arm_time;
1831
1832 uint64_t num_fired = elapsed / interval_abs;
1833
1834 /*
1835 * To reach this code, we must have seen the timer pop
1836 * and be in repeating mode, so therefore it must have been
1837 * more than 'interval' time since the attach or last
1838 * successful touch.
1839 */
1840 assert(num_fired > 0);
1841
1842 /* report how many intervals have elapsed to the user */
1843 kev->data = (int64_t)num_fired;
1844
1845 /* We only need to re-arm the timer if it's not about to be destroyed */
1846 if ((kn->kn_flags & EV_ONESHOT) == 0) {
1847 /* fire at the end of the next interval */
1848 uint64_t new_deadline = first_deadline + num_fired * interval_abs;
1849
1850 assert(new_deadline > now);
1851
1852 kn->kn_ext[0] = new_deadline;
1853
1854 /*
1855 * This can't shortcut setting up the thread call, because
1856 * knote_process deactivates EV_CLEAR knotes unconditionnally.
1857 */
1858 filt_timerarm(kn);
1859 }
1860 }
1861
1862 return FILTER_ACTIVE;
1863 }
1864
1865 SECURITY_READ_ONLY_EARLY(static struct filterops) timer_filtops = {
1866 .f_extended_codes = true,
1867 .f_attach = filt_timerattach,
1868 .f_detach = filt_timerdetach,
1869 .f_event = filt_bad_event,
1870 .f_touch = filt_timertouch,
1871 .f_process = filt_timerprocess,
1872 };
1873
1874 #pragma mark user_filtops
1875
1876 static int
filt_userattach(struct knote * kn,__unused struct kevent_qos_s * kev)1877 filt_userattach(struct knote *kn, __unused struct kevent_qos_s *kev)
1878 {
1879 if (kn->kn_sfflags & NOTE_TRIGGER) {
1880 kn->kn_hook32 = FILTER_ACTIVE;
1881 } else {
1882 kn->kn_hook32 = 0;
1883 }
1884 return kn->kn_hook32;
1885 }
1886
1887 static int
filt_usertouch(struct knote * kn,struct kevent_qos_s * kev)1888 filt_usertouch(struct knote *kn, struct kevent_qos_s *kev)
1889 {
1890 uint32_t ffctrl;
1891 int fflags;
1892
1893 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1894 fflags = kev->fflags & NOTE_FFLAGSMASK;
1895 switch (ffctrl) {
1896 case NOTE_FFNOP:
1897 break;
1898 case NOTE_FFAND:
1899 kn->kn_sfflags &= fflags;
1900 break;
1901 case NOTE_FFOR:
1902 kn->kn_sfflags |= fflags;
1903 break;
1904 case NOTE_FFCOPY:
1905 kn->kn_sfflags = fflags;
1906 break;
1907 }
1908 kn->kn_sdata = kev->data;
1909
1910 if (kev->fflags & NOTE_TRIGGER) {
1911 kn->kn_hook32 = FILTER_ACTIVE;
1912 }
1913 return (int)kn->kn_hook32;
1914 }
1915
1916 static int
filt_userprocess(struct knote * kn,struct kevent_qos_s * kev)1917 filt_userprocess(struct knote *kn, struct kevent_qos_s *kev)
1918 {
1919 int result = (int)kn->kn_hook32;
1920
1921 if (result) {
1922 /* EVFILT_USER returns the data that was passed in */
1923 knote_fill_kevent_with_sdata(kn, kev);
1924 kev->fflags = kn->kn_sfflags;
1925 if (kn->kn_flags & EV_CLEAR) {
1926 /* knote_fill_kevent cleared kn_fflags */
1927 kn->kn_hook32 = 0;
1928 }
1929 }
1930
1931 return result;
1932 }
1933
1934 SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = {
1935 .f_extended_codes = true,
1936 .f_attach = filt_userattach,
1937 .f_detach = filt_no_detach,
1938 .f_event = filt_bad_event,
1939 .f_touch = filt_usertouch,
1940 .f_process = filt_userprocess,
1941 };
1942
1943 #pragma mark workloop_filtops
1944
1945 #define EPREEMPTDISABLED (-1)
1946
1947 static inline void
filt_wllock(struct kqworkloop * kqwl)1948 filt_wllock(struct kqworkloop *kqwl)
1949 {
1950 lck_spin_lock(&kqwl->kqwl_statelock);
1951 }
1952
1953 static inline void
filt_wlunlock(struct kqworkloop * kqwl)1954 filt_wlunlock(struct kqworkloop *kqwl)
1955 {
1956 lck_spin_unlock(&kqwl->kqwl_statelock);
1957 }
1958
1959 /*
1960 * Returns true when the interlock for the turnstile is the workqueue lock
1961 *
1962 * When this is the case, all turnstiles operations are delegated
1963 * to the workqueue subsystem.
1964 *
1965 * This is required because kqueue_threadreq_bind_prepost only holds the
1966 * workqueue lock but needs to move the inheritor from the workloop turnstile
1967 * away from the creator thread, so that this now fulfilled request cannot be
1968 * picked anymore by other threads.
1969 */
1970 static inline bool
filt_wlturnstile_interlock_is_workq(struct kqworkloop * kqwl)1971 filt_wlturnstile_interlock_is_workq(struct kqworkloop *kqwl)
1972 {
1973 return kqr_thread_requested_pending(&kqwl->kqwl_request);
1974 }
1975
1976 static void
filt_wlupdate_inheritor(struct kqworkloop * kqwl,struct turnstile * ts,turnstile_update_flags_t flags)1977 filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts,
1978 turnstile_update_flags_t flags)
1979 {
1980 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1981 workq_threadreq_t kqr = &kqwl->kqwl_request;
1982
1983 /*
1984 * binding to the workq should always happen through
1985 * workq_kern_threadreq_update_inheritor()
1986 */
1987 assert(!filt_wlturnstile_interlock_is_workq(kqwl));
1988
1989 if ((inheritor = kqwl->kqwl_owner)) {
1990 flags |= TURNSTILE_INHERITOR_THREAD;
1991 } else if ((inheritor = kqr_thread(kqr))) {
1992 flags |= TURNSTILE_INHERITOR_THREAD;
1993 }
1994
1995 turnstile_update_inheritor(ts, inheritor, flags);
1996 }
1997
1998 #define EVFILT_WORKLOOP_EFAULT_RETRY_COUNT 100
1999 #define FILT_WLATTACH 0
2000 #define FILT_WLTOUCH 1
2001 #define FILT_WLDROP 2
2002
2003 __result_use_check
2004 static int
filt_wlupdate(struct kqworkloop * kqwl,struct knote * kn,struct kevent_qos_s * kev,kq_index_t qos_index,int op)2005 filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn,
2006 struct kevent_qos_s *kev, kq_index_t qos_index, int op)
2007 {
2008 user_addr_t uaddr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]);
2009 workq_threadreq_t kqr = &kqwl->kqwl_request;
2010 thread_t cur_owner, new_owner, extra_thread_ref = THREAD_NULL;
2011 kq_index_t cur_override = THREAD_QOS_UNSPECIFIED;
2012 int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
2013 int action = KQWL_UTQ_NONE, error = 0;
2014 bool wl_inheritor_updated = false, needs_wake = false;
2015 uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
2016 uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
2017 uint64_t udata = 0;
2018 struct turnstile *ts = TURNSTILE_NULL;
2019
2020 filt_wllock(kqwl);
2021
2022 again:
2023 new_owner = cur_owner = kqwl->kqwl_owner;
2024
2025 /*
2026 * Phase 1:
2027 *
2028 * If asked, load the uint64 value at the user provided address and compare
2029 * it against the passed in mask and expected value.
2030 *
2031 * If NOTE_WL_DISCOVER_OWNER is specified, translate the loaded name as
2032 * a thread reference.
2033 *
2034 * If NOTE_WL_END_OWNERSHIP is specified and the currently known owner is
2035 * the current thread, then end ownership.
2036 *
2037 * Lastly decide whether we need to perform a QoS update.
2038 */
2039 if (uaddr) {
2040 /*
2041 * Until <rdar://problem/24999882> exists,
2042 * disabling preemption copyin forces any
2043 * vm_fault we encounter to fail.
2044 */
2045 error = copyin_atomic64(uaddr, &udata);
2046
2047 /*
2048 * If we get EFAULT, drop locks, and retry.
2049 * If we still get an error report it,
2050 * else assume the memory has been faulted
2051 * and attempt to copyin under lock again.
2052 */
2053 switch (error) {
2054 case 0:
2055 break;
2056 case EFAULT:
2057 if (efault_retry-- > 0) {
2058 filt_wlunlock(kqwl);
2059 error = copyin_atomic64(uaddr, &udata);
2060 filt_wllock(kqwl);
2061 if (error == 0) {
2062 goto again;
2063 }
2064 }
2065 OS_FALLTHROUGH;
2066 default:
2067 goto out;
2068 }
2069
2070 /* Update state as copied in. */
2071 kev->ext[EV_EXTIDX_WL_VALUE] = udata;
2072
2073 if ((udata & mask) != (kdata & mask)) {
2074 error = ESTALE;
2075 } else if (kev->fflags & NOTE_WL_DISCOVER_OWNER) {
2076 /*
2077 * Decipher the owner port name, and translate accordingly.
2078 * The low 2 bits were borrowed for other flags, so mask them off.
2079 *
2080 * Then attempt translation to a thread reference or fail.
2081 */
2082 mach_port_name_t name = (mach_port_name_t)udata & ~0x3;
2083 if (name != MACH_PORT_NULL) {
2084 name = ipc_entry_name_mask(name);
2085 extra_thread_ref = port_name_to_thread(name,
2086 PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2087 if (extra_thread_ref == THREAD_NULL) {
2088 error = EOWNERDEAD;
2089 goto out;
2090 }
2091 new_owner = extra_thread_ref;
2092 }
2093 }
2094 }
2095
2096 if ((kev->fflags & NOTE_WL_END_OWNERSHIP) && new_owner == current_thread()) {
2097 new_owner = THREAD_NULL;
2098 }
2099
2100 if (error == 0) {
2101 if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) {
2102 action = KQWL_UTQ_SET_QOS_INDEX;
2103 } else if (qos_index && kqr->tr_kq_qos_index != qos_index) {
2104 action = KQWL_UTQ_SET_QOS_INDEX;
2105 }
2106
2107 if (op == FILT_WLTOUCH) {
2108 /*
2109 * Save off any additional fflags/data we just accepted
2110 * But only keep the last round of "update" bits we acted on which helps
2111 * debugging a lot.
2112 */
2113 kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK;
2114 kn->kn_sfflags |= kev->fflags;
2115 if (kev->fflags & NOTE_WL_SYNC_WAKE) {
2116 needs_wake = (kn->kn_thread != THREAD_NULL);
2117 }
2118 } else if (op == FILT_WLDROP) {
2119 if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) ==
2120 NOTE_WL_SYNC_WAIT) {
2121 /*
2122 * When deleting a SYNC_WAIT knote that hasn't been woken up
2123 * explicitly, issue a wake up.
2124 */
2125 kn->kn_sfflags |= NOTE_WL_SYNC_WAKE;
2126 needs_wake = (kn->kn_thread != THREAD_NULL);
2127 }
2128 }
2129 }
2130
2131 /*
2132 * Phase 2:
2133 *
2134 * Commit ownership and QoS changes if any, possibly wake up waiters
2135 */
2136
2137 if (cur_owner == new_owner && action == KQWL_UTQ_NONE && !needs_wake) {
2138 goto out;
2139 }
2140
2141 kqlock(kqwl);
2142
2143 /* If already tracked as servicer, don't track as owner */
2144 if (new_owner == kqr_thread(kqr)) {
2145 new_owner = THREAD_NULL;
2146 }
2147
2148 if (cur_owner != new_owner) {
2149 kqwl->kqwl_owner = new_owner;
2150 if (new_owner == extra_thread_ref) {
2151 /* we just transfered this ref to kqwl_owner */
2152 extra_thread_ref = THREAD_NULL;
2153 }
2154 cur_override = kqworkloop_override(kqwl);
2155
2156 if (new_owner) {
2157 /* override it before we drop the old */
2158 if (cur_override != THREAD_QOS_UNSPECIFIED) {
2159 thread_add_kevent_override(new_owner, cur_override);
2160 }
2161 if (kqr_thread_requested_pending(kqr)) {
2162 if (action == KQWL_UTQ_NONE) {
2163 action = KQWL_UTQ_REDRIVE_EVENTS;
2164 }
2165 }
2166 } else if (action == KQWL_UTQ_NONE &&
2167 !kqr_thread_requested(kqr) &&
2168 kqwl->kqwl_wakeup_qos) {
2169 action = KQWL_UTQ_REDRIVE_EVENTS;
2170 }
2171 }
2172
2173 if (action != KQWL_UTQ_NONE) {
2174 kqworkloop_update_threads_qos(kqwl, action, qos_index);
2175 }
2176
2177 ts = kqwl->kqwl_turnstile;
2178 if (cur_owner != new_owner && ts) {
2179 if (action == KQWL_UTQ_REDRIVE_EVENTS) {
2180 /*
2181 * Note that when action is KQWL_UTQ_REDRIVE_EVENTS,
2182 * the code went through workq_kern_threadreq_initiate()
2183 * and the workqueue has set the inheritor already
2184 */
2185 assert(filt_wlturnstile_interlock_is_workq(kqwl));
2186 } else if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2187 workq_kern_threadreq_lock(kqwl->kqwl_p);
2188 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, kqr, new_owner,
2189 ts, TURNSTILE_IMMEDIATE_UPDATE);
2190 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2191 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
2192 /*
2193 * If the workq is no longer the interlock, then
2194 * workq_kern_threadreq_update_inheritor() has finished a bind
2195 * and we need to fallback to the regular path.
2196 */
2197 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
2198 }
2199 wl_inheritor_updated = true;
2200 } else {
2201 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
2202 wl_inheritor_updated = true;
2203 }
2204
2205 /*
2206 * We need a turnstile reference because we are dropping the interlock
2207 * and the caller has not called turnstile_prepare.
2208 */
2209 if (wl_inheritor_updated) {
2210 turnstile_reference(ts);
2211 }
2212 }
2213
2214 if (needs_wake && ts) {
2215 waitq_wakeup64_thread(&ts->ts_waitq, knote_filt_wev64(kn),
2216 kn->kn_thread, THREAD_AWAKENED);
2217 if (op == FILT_WLATTACH || op == FILT_WLTOUCH) {
2218 disable_preemption();
2219 error = EPREEMPTDISABLED;
2220 }
2221 }
2222
2223 kqunlock(kqwl);
2224
2225 out:
2226 /*
2227 * Phase 3:
2228 *
2229 * Unlock and cleanup various lingering references and things.
2230 */
2231 filt_wlunlock(kqwl);
2232
2233 #if CONFIG_WORKLOOP_DEBUG
2234 KQWL_HISTORY_WRITE_ENTRY(kqwl, {
2235 .updater = current_thread(),
2236 .servicer = kqr_thread(kqr), /* Note: racy */
2237 .old_owner = cur_owner,
2238 .new_owner = new_owner,
2239
2240 .kev_ident = kev->ident,
2241 .error = (int16_t)error,
2242 .kev_flags = kev->flags,
2243 .kev_fflags = kev->fflags,
2244
2245 .kev_mask = mask,
2246 .kev_value = kdata,
2247 .in_value = udata,
2248 });
2249 #endif // CONFIG_WORKLOOP_DEBUG
2250
2251 if (wl_inheritor_updated) {
2252 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
2253 turnstile_deallocate_safe(ts);
2254 }
2255
2256 if (cur_owner && new_owner != cur_owner) {
2257 if (cur_override != THREAD_QOS_UNSPECIFIED) {
2258 thread_drop_kevent_override(cur_owner);
2259 }
2260 thread_deallocate_safe(cur_owner);
2261 }
2262 if (extra_thread_ref) {
2263 thread_deallocate_safe(extra_thread_ref);
2264 }
2265 return error;
2266 }
2267
2268 /*
2269 * Remembers the last updated that came in from userspace for debugging reasons.
2270 * - fflags is mirrored from the userspace kevent
2271 * - ext[i, i != VALUE] is mirrored from the userspace kevent
2272 * - ext[VALUE] is set to what the kernel loaded atomically
2273 * - data is set to the error if any
2274 */
2275 static inline void
filt_wlremember_last_update(struct knote * kn,struct kevent_qos_s * kev,int error)2276 filt_wlremember_last_update(struct knote *kn, struct kevent_qos_s *kev,
2277 int error)
2278 {
2279 kn->kn_fflags = kev->fflags;
2280 kn->kn_sdata = error;
2281 memcpy(kn->kn_ext, kev->ext, sizeof(kev->ext));
2282 }
2283
2284 static int
filt_wlupdate_sync_ipc(struct kqworkloop * kqwl,struct knote * kn,struct kevent_qos_s * kev,int op)2285 filt_wlupdate_sync_ipc(struct kqworkloop *kqwl, struct knote *kn,
2286 struct kevent_qos_s *kev, int op)
2287 {
2288 user_addr_t uaddr = (user_addr_t) kev->ext[EV_EXTIDX_WL_ADDR];
2289 uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE];
2290 uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK];
2291 uint64_t udata = 0;
2292 int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT;
2293 int error = 0;
2294
2295 if (op == FILT_WLATTACH) {
2296 (void)kqueue_alloc_turnstile(&kqwl->kqwl_kqueue);
2297 } else if (uaddr == 0) {
2298 return 0;
2299 }
2300
2301 filt_wllock(kqwl);
2302
2303 again:
2304
2305 /*
2306 * Do the debounce thing, the lock serializing the state is the knote lock.
2307 */
2308 if (uaddr) {
2309 /*
2310 * Until <rdar://problem/24999882> exists,
2311 * disabling preemption copyin forces any
2312 * vm_fault we encounter to fail.
2313 */
2314 error = copyin_atomic64(uaddr, &udata);
2315
2316 /*
2317 * If we get EFAULT, drop locks, and retry.
2318 * If we still get an error report it,
2319 * else assume the memory has been faulted
2320 * and attempt to copyin under lock again.
2321 */
2322 switch (error) {
2323 case 0:
2324 break;
2325 case EFAULT:
2326 if (efault_retry-- > 0) {
2327 filt_wlunlock(kqwl);
2328 error = copyin_atomic64(uaddr, &udata);
2329 filt_wllock(kqwl);
2330 if (error == 0) {
2331 goto again;
2332 }
2333 }
2334 OS_FALLTHROUGH;
2335 default:
2336 goto out;
2337 }
2338
2339 kev->ext[EV_EXTIDX_WL_VALUE] = udata;
2340 kn->kn_ext[EV_EXTIDX_WL_VALUE] = udata;
2341
2342 if ((udata & mask) != (kdata & mask)) {
2343 error = ESTALE;
2344 goto out;
2345 }
2346 }
2347
2348 if (op == FILT_WLATTACH) {
2349 error = filt_wlattach_sync_ipc(kn);
2350 if (error == 0) {
2351 disable_preemption();
2352 error = EPREEMPTDISABLED;
2353 }
2354 }
2355
2356 out:
2357 filt_wlunlock(kqwl);
2358 return error;
2359 }
2360
2361 static int
filt_wlattach(struct knote * kn,struct kevent_qos_s * kev)2362 filt_wlattach(struct knote *kn, struct kevent_qos_s *kev)
2363 {
2364 struct kqueue *kq = knote_get_kq(kn);
2365 struct kqworkloop *kqwl = (struct kqworkloop *)kq;
2366 int error = 0, result = 0;
2367 kq_index_t qos_index = 0;
2368
2369 if (__improbable((kq->kq_state & KQ_WORKLOOP) == 0)) {
2370 error = ENOTSUP;
2371 goto out;
2372 }
2373
2374 uint32_t command = (kn->kn_sfflags & NOTE_WL_COMMANDS_MASK);
2375 switch (command) {
2376 case NOTE_WL_THREAD_REQUEST:
2377 if (kn->kn_id != kqwl->kqwl_dynamicid) {
2378 error = EINVAL;
2379 goto out;
2380 }
2381 qos_index = _pthread_priority_thread_qos(kn->kn_qos);
2382 if (qos_index == THREAD_QOS_UNSPECIFIED) {
2383 error = ERANGE;
2384 goto out;
2385 }
2386 if (kqwl->kqwl_request.tr_kq_qos_index) {
2387 /*
2388 * There already is a thread request, and well, you're only allowed
2389 * one per workloop, so fail the attach.
2390 */
2391 error = EALREADY;
2392 goto out;
2393 }
2394 break;
2395 case NOTE_WL_SYNC_WAIT:
2396 case NOTE_WL_SYNC_WAKE:
2397 if (kn->kn_id == kqwl->kqwl_dynamicid) {
2398 error = EINVAL;
2399 goto out;
2400 }
2401 if ((kn->kn_flags & EV_DISABLE) == 0) {
2402 error = EINVAL;
2403 goto out;
2404 }
2405 if (kn->kn_sfflags & NOTE_WL_END_OWNERSHIP) {
2406 error = EINVAL;
2407 goto out;
2408 }
2409 break;
2410
2411 case NOTE_WL_SYNC_IPC:
2412 if ((kn->kn_flags & EV_DISABLE) == 0) {
2413 error = EINVAL;
2414 goto out;
2415 }
2416 if (kn->kn_sfflags & (NOTE_WL_UPDATE_QOS | NOTE_WL_DISCOVER_OWNER)) {
2417 error = EINVAL;
2418 goto out;
2419 }
2420 break;
2421 default:
2422 error = EINVAL;
2423 goto out;
2424 }
2425
2426 if (command == NOTE_WL_SYNC_IPC) {
2427 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLATTACH);
2428 } else {
2429 error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLATTACH);
2430 }
2431
2432 if (error == EPREEMPTDISABLED) {
2433 error = 0;
2434 result = FILTER_THREADREQ_NODEFEER;
2435 }
2436 out:
2437 if (error) {
2438 /* If userland wants ESTALE to be hidden, fail the attach anyway */
2439 if (error == ESTALE && (kn->kn_sfflags & NOTE_WL_IGNORE_ESTALE)) {
2440 error = 0;
2441 }
2442 knote_set_error(kn, error);
2443 return result;
2444 }
2445 if (command == NOTE_WL_SYNC_WAIT) {
2446 return kevent_register_wait_prepare(kn, kev, result);
2447 }
2448 /* Just attaching the thread request successfully will fire it */
2449 if (command == NOTE_WL_THREAD_REQUEST) {
2450 /*
2451 * Thread Request knotes need an explicit touch to be active again,
2452 * so delivering an event needs to also consume it.
2453 */
2454 kn->kn_flags |= EV_CLEAR;
2455 return result | FILTER_ACTIVE;
2456 }
2457 return result;
2458 }
2459
2460 static void __dead2
filt_wlwait_continue(void * parameter,wait_result_t wr)2461 filt_wlwait_continue(void *parameter, wait_result_t wr)
2462 {
2463 struct _kevent_register *cont_args = parameter;
2464 struct kqworkloop *kqwl = cont_args->kqwl;
2465
2466 kqlock(kqwl);
2467 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2468 workq_kern_threadreq_lock(kqwl->kqwl_p);
2469 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
2470 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2471 } else {
2472 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL, TURNSTILE_WORKLOOPS);
2473 }
2474 kqunlock(kqwl);
2475
2476 turnstile_cleanup();
2477
2478 if (wr == THREAD_INTERRUPTED) {
2479 cont_args->kev.flags |= EV_ERROR;
2480 cont_args->kev.data = EINTR;
2481 } else if (wr != THREAD_AWAKENED) {
2482 panic("Unexpected wait result: %d", wr);
2483 }
2484
2485 kevent_register_wait_return(cont_args);
2486 }
2487
2488 /*
2489 * Called with the workloop mutex held, most of the time never returns as it
2490 * calls filt_wlwait_continue through a continuation.
2491 */
2492 static void __dead2
filt_wlpost_register_wait(struct uthread * uth,struct knote * kn,struct _kevent_register * cont_args)2493 filt_wlpost_register_wait(struct uthread *uth, struct knote *kn,
2494 struct _kevent_register *cont_args)
2495 {
2496 struct kqworkloop *kqwl = cont_args->kqwl;
2497 workq_threadreq_t kqr = &kqwl->kqwl_request;
2498 struct turnstile *ts;
2499 bool workq_locked = false;
2500
2501 kqlock_held(kqwl);
2502
2503 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
2504 workq_kern_threadreq_lock(kqwl->kqwl_p);
2505 workq_locked = true;
2506 }
2507
2508 ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
2509 TURNSTILE_NULL, TURNSTILE_WORKLOOPS);
2510
2511 if (workq_locked) {
2512 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
2513 &kqwl->kqwl_request, kqwl->kqwl_owner, ts,
2514 TURNSTILE_DELAYED_UPDATE);
2515 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
2516 /*
2517 * if the interlock is no longer the workqueue lock,
2518 * then we don't need to hold it anymore.
2519 */
2520 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2521 workq_locked = false;
2522 }
2523 }
2524 if (!workq_locked) {
2525 /*
2526 * If the interlock is the workloop's, then it's our responsibility to
2527 * call update_inheritor, so just do it.
2528 */
2529 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_DELAYED_UPDATE);
2530 }
2531
2532 thread_set_pending_block_hint(get_machthread(uth), kThreadWaitWorkloopSyncWait);
2533 waitq_assert_wait64(&ts->ts_waitq, knote_filt_wev64(kn),
2534 THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER);
2535
2536 if (workq_locked) {
2537 workq_kern_threadreq_unlock(kqwl->kqwl_p);
2538 }
2539
2540 thread_t thread = kqwl->kqwl_owner ?: kqr_thread(kqr);
2541 if (thread) {
2542 thread_reference(thread);
2543 }
2544
2545 kevent_register_wait_block(ts, thread, filt_wlwait_continue, cont_args);
2546 }
2547
2548 /* called in stackshot context to report the thread responsible for blocking this thread */
2549 void
kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread,event64_t event,thread_waitinfo_t * waitinfo)2550 kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread,
2551 event64_t event, thread_waitinfo_t *waitinfo)
2552 {
2553 struct knote *kn = (struct knote *)event;
2554
2555 zone_require(knote_zone, kn);
2556
2557 assert(kn->kn_thread == thread);
2558
2559 struct kqueue *kq = knote_get_kq(kn);
2560
2561 zone_require(kqworkloop_zone, kq);
2562 assert(kq->kq_state & KQ_WORKLOOP);
2563
2564 struct kqworkloop *kqwl = (struct kqworkloop *)kq;
2565 workq_threadreq_t kqr = &kqwl->kqwl_request;
2566
2567 thread_t kqwl_owner = kqwl->kqwl_owner;
2568
2569 if (kqwl_owner != THREAD_NULL) {
2570 thread_require(kqwl_owner);
2571 waitinfo->owner = thread_tid(kqwl->kqwl_owner);
2572 } else if ((kqr->tr_state >= WORKQ_TR_STATE_BINDING) && (kqr->tr_thread != NULL)) {
2573 thread_require(kqr->tr_thread);
2574 waitinfo->owner = thread_tid(kqr->tr_thread);
2575 } else if (kqr_thread_requested_pending(kqr)) { /* > idle, < bound */
2576 waitinfo->owner = STACKSHOT_WAITOWNER_THREQUESTED;
2577 } else {
2578 waitinfo->owner = 0;
2579 }
2580
2581 waitinfo->context = kqwl->kqwl_dynamicid;
2582 }
2583
2584 static void
filt_wldetach(struct knote * kn)2585 filt_wldetach(struct knote *kn)
2586 {
2587 if (kn->kn_sfflags & NOTE_WL_SYNC_IPC) {
2588 filt_wldetach_sync_ipc(kn);
2589 } else if (kn->kn_thread) {
2590 kevent_register_wait_cleanup(kn);
2591 }
2592 }
2593
2594 static int
filt_wlvalidate_kev_flags(struct knote * kn,struct kevent_qos_s * kev,thread_qos_t * qos_index)2595 filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_qos_s *kev,
2596 thread_qos_t *qos_index)
2597 {
2598 uint32_t new_commands = kev->fflags & NOTE_WL_COMMANDS_MASK;
2599 uint32_t sav_commands = kn->kn_sfflags & NOTE_WL_COMMANDS_MASK;
2600
2601 if ((kev->fflags & NOTE_WL_DISCOVER_OWNER) && (kev->flags & EV_DELETE)) {
2602 return EINVAL;
2603 }
2604 if (kev->fflags & NOTE_WL_UPDATE_QOS) {
2605 if (kev->flags & EV_DELETE) {
2606 return EINVAL;
2607 }
2608 if (sav_commands != NOTE_WL_THREAD_REQUEST) {
2609 return EINVAL;
2610 }
2611 if (!(*qos_index = _pthread_priority_thread_qos(kev->qos))) {
2612 return ERANGE;
2613 }
2614 }
2615
2616 switch (new_commands) {
2617 case NOTE_WL_THREAD_REQUEST:
2618 /* thread requests can only update themselves */
2619 if (sav_commands != NOTE_WL_THREAD_REQUEST) {
2620 return EINVAL;
2621 }
2622 break;
2623
2624 case NOTE_WL_SYNC_WAIT:
2625 if (kev->fflags & NOTE_WL_END_OWNERSHIP) {
2626 return EINVAL;
2627 }
2628 goto sync_checks;
2629
2630 case NOTE_WL_SYNC_WAKE:
2631 sync_checks:
2632 if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE))) {
2633 return EINVAL;
2634 }
2635 if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
2636 return EINVAL;
2637 }
2638 break;
2639
2640 case NOTE_WL_SYNC_IPC:
2641 if (sav_commands != NOTE_WL_SYNC_IPC) {
2642 return EINVAL;
2643 }
2644 if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) {
2645 return EINVAL;
2646 }
2647 break;
2648
2649 default:
2650 return EINVAL;
2651 }
2652 return 0;
2653 }
2654
2655 static int
filt_wltouch(struct knote * kn,struct kevent_qos_s * kev)2656 filt_wltouch(struct knote *kn, struct kevent_qos_s *kev)
2657 {
2658 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2659 thread_qos_t qos_index = THREAD_QOS_UNSPECIFIED;
2660 int result = 0;
2661
2662 int error = filt_wlvalidate_kev_flags(kn, kev, &qos_index);
2663 if (error) {
2664 goto out;
2665 }
2666
2667 uint32_t command = kev->fflags & NOTE_WL_COMMANDS_MASK;
2668 if (command == NOTE_WL_SYNC_IPC) {
2669 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLTOUCH);
2670 } else {
2671 error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLTOUCH);
2672 filt_wlremember_last_update(kn, kev, error);
2673 }
2674 if (error == EPREEMPTDISABLED) {
2675 error = 0;
2676 result = FILTER_THREADREQ_NODEFEER;
2677 }
2678
2679 out:
2680 if (error) {
2681 if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
2682 /* If userland wants ESTALE to be hidden, do not activate */
2683 return result;
2684 }
2685 kev->flags |= EV_ERROR;
2686 kev->data = error;
2687 return result;
2688 }
2689 if (command == NOTE_WL_SYNC_WAIT && !(kn->kn_sfflags & NOTE_WL_SYNC_WAKE)) {
2690 return kevent_register_wait_prepare(kn, kev, result);
2691 }
2692 /* Just touching the thread request successfully will fire it */
2693 if (command == NOTE_WL_THREAD_REQUEST) {
2694 if (kev->fflags & NOTE_WL_UPDATE_QOS) {
2695 result |= FILTER_UPDATE_REQ_QOS;
2696 }
2697 result |= FILTER_ACTIVE;
2698 }
2699 return result;
2700 }
2701
2702 static bool
filt_wlallow_drop(struct knote * kn,struct kevent_qos_s * kev)2703 filt_wlallow_drop(struct knote *kn, struct kevent_qos_s *kev)
2704 {
2705 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2706
2707 int error = filt_wlvalidate_kev_flags(kn, kev, NULL);
2708 if (error) {
2709 goto out;
2710 }
2711
2712 uint32_t command = (kev->fflags & NOTE_WL_COMMANDS_MASK);
2713 if (command == NOTE_WL_SYNC_IPC) {
2714 error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLDROP);
2715 } else {
2716 error = filt_wlupdate(kqwl, kn, kev, 0, FILT_WLDROP);
2717 filt_wlremember_last_update(kn, kev, error);
2718 }
2719 assert(error != EPREEMPTDISABLED);
2720
2721 out:
2722 if (error) {
2723 if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) {
2724 return false;
2725 }
2726 kev->flags |= EV_ERROR;
2727 kev->data = error;
2728 return false;
2729 }
2730 return true;
2731 }
2732
2733 static int
filt_wlprocess(struct knote * kn,struct kevent_qos_s * kev)2734 filt_wlprocess(struct knote *kn, struct kevent_qos_s *kev)
2735 {
2736 struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn);
2737 int rc = 0;
2738
2739 assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST);
2740
2741 kqlock(kqwl);
2742
2743 if (kqwl->kqwl_owner) {
2744 /*
2745 * <rdar://problem/33584321> userspace sometimes due to events being
2746 * delivered but not triggering a drain session can cause a process
2747 * of the thread request knote.
2748 *
2749 * When that happens, the automatic deactivation due to process
2750 * would swallow the event, so we have to activate the knote again.
2751 */
2752 knote_activate(kqwl, kn, FILTER_ACTIVE);
2753 } else {
2754 #if DEBUG || DEVELOPMENT
2755 if (kevent_debug_flags & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) {
2756 /*
2757 * see src/queue_internal.h in libdispatch
2758 */
2759 #define DISPATCH_QUEUE_ENQUEUED 0x1ull
2760 user_addr_t addr = CAST_USER_ADDR_T(kn->kn_ext[EV_EXTIDX_WL_ADDR]);
2761 task_t t = current_task();
2762 uint64_t val;
2763 if (addr && task_is_active(t) && !task_is_halting(t) &&
2764 copyin_atomic64(addr, &val) == 0 &&
2765 val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 &&
2766 (val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) {
2767 panic("kevent: workloop %#016llx is not enqueued "
2768 "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)",
2769 kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]);
2770 }
2771 }
2772 #endif
2773 knote_fill_kevent(kn, kev, 0);
2774 kev->fflags = kn->kn_sfflags;
2775 rc |= FILTER_ACTIVE;
2776 }
2777
2778 kqunlock(kqwl);
2779
2780 if (rc & FILTER_ACTIVE) {
2781 workq_thread_set_max_qos(kqwl->kqwl_p, &kqwl->kqwl_request);
2782 }
2783 return rc;
2784 }
2785
2786 SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = {
2787 .f_extended_codes = true,
2788 .f_attach = filt_wlattach,
2789 .f_detach = filt_wldetach,
2790 .f_event = filt_bad_event,
2791 .f_touch = filt_wltouch,
2792 .f_process = filt_wlprocess,
2793 .f_allow_drop = filt_wlallow_drop,
2794 .f_post_register_wait = filt_wlpost_register_wait,
2795 };
2796
2797 #pragma mark - kqueues allocation and deallocation
2798
2799 OS_NOINLINE
2800 static void
2801 kqworkloop_dealloc(struct kqworkloop *, bool hash_remove);
2802
2803 static inline bool
kqworkloop_try_retain(struct kqworkloop * kqwl)2804 kqworkloop_try_retain(struct kqworkloop *kqwl)
2805 {
2806 return os_ref_retain_try_raw(&kqwl->kqwl_retains, NULL);
2807 }
2808
2809 static inline void
kqworkloop_retain(struct kqworkloop * kqwl)2810 kqworkloop_retain(struct kqworkloop *kqwl)
2811 {
2812 return os_ref_retain_raw(&kqwl->kqwl_retains, NULL);
2813 }
2814
2815 OS_ALWAYS_INLINE
2816 static inline void
kqueue_retain(kqueue_t kqu)2817 kqueue_retain(kqueue_t kqu)
2818 {
2819 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2820 kqworkloop_retain(kqu.kqwl);
2821 }
2822 }
2823
2824 OS_ALWAYS_INLINE
2825 static inline void
kqworkloop_release_live(struct kqworkloop * kqwl)2826 kqworkloop_release_live(struct kqworkloop *kqwl)
2827 {
2828 os_ref_release_live_raw(&kqwl->kqwl_retains, NULL);
2829 }
2830
2831 OS_ALWAYS_INLINE
2832 static inline void
kqueue_release_live(kqueue_t kqu)2833 kqueue_release_live(kqueue_t kqu)
2834 {
2835 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2836 kqworkloop_release_live(kqu.kqwl);
2837 }
2838 }
2839
2840 OS_ALWAYS_INLINE
2841 static inline void
kqworkloop_release(struct kqworkloop * kqwl)2842 kqworkloop_release(struct kqworkloop *kqwl)
2843 {
2844 if (os_ref_release_raw(&kqwl->kqwl_retains, NULL) == 0) {
2845 kqworkloop_dealloc(kqwl, true);
2846 }
2847 }
2848
2849 OS_ALWAYS_INLINE
2850 static inline void
kqueue_release(kqueue_t kqu)2851 kqueue_release(kqueue_t kqu)
2852 {
2853 if (kqu.kq->kq_state & KQ_DYNAMIC) {
2854 kqworkloop_release(kqu.kqwl);
2855 }
2856 }
2857
2858 /*!
2859 * @function kqueue_destroy
2860 *
2861 * @brief
2862 * Common part to all kqueue dealloc functions.
2863 */
2864 OS_NOINLINE
2865 static void
kqueue_destroy(kqueue_t kqu,zone_t zone)2866 kqueue_destroy(kqueue_t kqu, zone_t zone)
2867 {
2868 lck_spin_destroy(&kqu.kq->kq_lock, &kq_lck_grp);
2869
2870 zfree(zone, kqu.kq);
2871 }
2872
2873 /*!
2874 * @function kqueue_init
2875 *
2876 * @brief
2877 * Common part to all kqueue alloc functions.
2878 */
2879 static kqueue_t
kqueue_init(kqueue_t kqu)2880 kqueue_init(kqueue_t kqu)
2881 {
2882 lck_spin_init(&kqu.kq->kq_lock, &kq_lck_grp, LCK_ATTR_NULL);
2883 return kqu;
2884 }
2885
2886 #pragma mark kqfile allocation and deallocation
2887
2888 /*!
2889 * @function kqueue_dealloc
2890 *
2891 * @brief
2892 * Detach all knotes from a kqfile and free it.
2893 *
2894 * @discussion
2895 * We walk each list looking for knotes referencing this
2896 * this kqueue. If we find one, we try to drop it. But
2897 * if we fail to get a drop reference, that will wait
2898 * until it is dropped. So, we can just restart again
2899 * safe in the assumption that the list will eventually
2900 * not contain any more references to this kqueue (either
2901 * we dropped them all, or someone else did).
2902 *
2903 * Assumes no new events are being added to the kqueue.
2904 * Nothing locked on entry or exit.
2905 */
2906 void
kqueue_dealloc(struct kqueue * kq)2907 kqueue_dealloc(struct kqueue *kq)
2908 {
2909 KNOTE_LOCK_CTX(knlc);
2910 struct proc *p = kq->kq_p;
2911 struct filedesc *fdp = &p->p_fd;
2912 struct knote *kn;
2913
2914 assert(kq && (kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
2915
2916 proc_fdlock(p);
2917 for (int i = 0; i < fdp->fd_knlistsize; i++) {
2918 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
2919 while (kn != NULL) {
2920 if (kq == knote_get_kq(kn)) {
2921 kqlock(kq);
2922 proc_fdunlock(p);
2923 if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
2924 knote_drop(kq, kn, &knlc);
2925 }
2926 proc_fdlock(p);
2927 /* start over at beginning of list */
2928 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
2929 continue;
2930 }
2931 kn = SLIST_NEXT(kn, kn_link);
2932 }
2933 }
2934
2935 knhash_lock(fdp);
2936 proc_fdunlock(p);
2937
2938 if (fdp->fd_knhashmask != 0) {
2939 for (int i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
2940 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
2941 while (kn != NULL) {
2942 if (kq == knote_get_kq(kn)) {
2943 kqlock(kq);
2944 knhash_unlock(fdp);
2945 if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
2946 knote_drop(kq, kn, &knlc);
2947 }
2948 knhash_lock(fdp);
2949 /* start over at beginning of list */
2950 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
2951 continue;
2952 }
2953 kn = SLIST_NEXT(kn, kn_link);
2954 }
2955 }
2956 }
2957 knhash_unlock(fdp);
2958
2959 kqueue_destroy(kq, kqfile_zone);
2960 }
2961
2962 /*!
2963 * @function kqueue_alloc
2964 *
2965 * @brief
2966 * Allocate a kqfile.
2967 */
2968 struct kqueue *
kqueue_alloc(struct proc * p)2969 kqueue_alloc(struct proc *p)
2970 {
2971 struct kqfile *kqf;
2972
2973 /*
2974 * kqfiles are created with kqueue() so we need to wait for
2975 * the first kevent syscall to know which bit among
2976 * KQ_KEV_{32,64,QOS} will be set in kqf_state
2977 */
2978 kqf = zalloc_flags(kqfile_zone, Z_WAITOK | Z_ZERO);
2979 kqf->kqf_p = p;
2980 TAILQ_INIT_AFTER_BZERO(&kqf->kqf_queue);
2981 TAILQ_INIT_AFTER_BZERO(&kqf->kqf_suppressed);
2982
2983 return kqueue_init(kqf).kq;
2984 }
2985
2986 /*!
2987 * @function kqueue_internal
2988 *
2989 * @brief
2990 * Core implementation for kqueue and guarded_kqueue_np()
2991 */
2992 int
kqueue_internal(struct proc * p,fp_initfn_t fp_init,void * initarg,int32_t * retval)2993 kqueue_internal(struct proc *p, fp_initfn_t fp_init, void *initarg, int32_t *retval)
2994 {
2995 struct kqueue *kq;
2996 struct fileproc *fp;
2997 int fd, error;
2998
2999 error = falloc_withinit(p, &fp, &fd, vfs_context_current(),
3000 fp_init, initarg);
3001 if (error) {
3002 return error;
3003 }
3004
3005 kq = kqueue_alloc(p);
3006 if (kq == NULL) {
3007 fp_free(p, fd, fp);
3008 return ENOMEM;
3009 }
3010
3011 fp->fp_flags |= FP_CLOEXEC | FP_CLOFORK;
3012 fp->f_flag = FREAD | FWRITE;
3013 fp->f_ops = &kqueueops;
3014 fp_set_data(fp, kq);
3015 fp->f_lflags |= FG_CONFINED;
3016
3017 proc_fdlock(p);
3018 procfdtbl_releasefd(p, fd, NULL);
3019 fp_drop(p, fd, fp, 1);
3020 proc_fdunlock(p);
3021
3022 *retval = fd;
3023 return error;
3024 }
3025
3026 /*!
3027 * @function kqueue
3028 *
3029 * @brief
3030 * The kqueue syscall.
3031 */
3032 int
kqueue(struct proc * p,__unused struct kqueue_args * uap,int32_t * retval)3033 kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
3034 {
3035 return kqueue_internal(p, NULL, NULL, retval);
3036 }
3037
3038 #pragma mark kqworkq allocation and deallocation
3039
3040 /*!
3041 * @function kqworkq_dealloc
3042 *
3043 * @brief
3044 * Deallocates a workqueue kqueue.
3045 *
3046 * @discussion
3047 * This only happens at process death, or for races with concurrent
3048 * kevent_get_kqwq calls, hence we don't have to care about knotes referencing
3049 * this kqueue, either there are none, or someone else took care of them.
3050 */
3051 void
kqworkq_dealloc(struct kqworkq * kqwq)3052 kqworkq_dealloc(struct kqworkq *kqwq)
3053 {
3054 kqueue_destroy(kqwq, kqworkq_zone);
3055 }
3056
3057 /*!
3058 * @function kqworkq_alloc
3059 *
3060 * @brief
3061 * Allocates a workqueue kqueue.
3062 *
3063 * @discussion
3064 * This is the slow path of kevent_get_kqwq.
3065 * This takes care of making sure procs have a single workq kqueue.
3066 */
3067 OS_NOINLINE
3068 static struct kqworkq *
kqworkq_alloc(struct proc * p,unsigned int flags)3069 kqworkq_alloc(struct proc *p, unsigned int flags)
3070 {
3071 struct kqworkq *kqwq, *tmp;
3072
3073 kqwq = zalloc_flags(kqworkq_zone, Z_WAITOK | Z_ZERO);
3074
3075 assert((flags & KEVENT_FLAG_LEGACY32) == 0);
3076 if (flags & KEVENT_FLAG_LEGACY64) {
3077 kqwq->kqwq_state = KQ_WORKQ | KQ_KEV64;
3078 } else {
3079 kqwq->kqwq_state = KQ_WORKQ | KQ_KEV_QOS;
3080 }
3081 kqwq->kqwq_p = p;
3082
3083 for (int i = 0; i < KQWQ_NBUCKETS; i++) {
3084 TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_queue[i]);
3085 TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_suppressed[i]);
3086 }
3087 for (int i = 0; i < KQWQ_NBUCKETS; i++) {
3088 /*
3089 * Because of how the bucketized system works, we mix overcommit
3090 * sources with not overcommit: each time we move a knote from
3091 * one bucket to the next due to overrides, we'd had to track
3092 * overcommitness, and it's really not worth it in the workloop
3093 * enabled world that track this faithfully.
3094 *
3095 * Incidentally, this behaves like the original manager-based
3096 * kqwq where event delivery always happened (hence is
3097 * "overcommit")
3098 */
3099 kqwq->kqwq_request[i].tr_state = WORKQ_TR_STATE_IDLE;
3100 kqwq->kqwq_request[i].tr_flags = WORKQ_TR_FLAG_KEVENT;
3101 if (i != KQWQ_QOS_MANAGER) {
3102 kqwq->kqwq_request[i].tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
3103 }
3104 kqwq->kqwq_request[i].tr_kq_qos_index = (kq_index_t)i + 1;
3105 }
3106
3107 kqueue_init(kqwq);
3108
3109 if (!os_atomic_cmpxchgv(&p->p_fd.fd_wqkqueue, NULL, kqwq, &tmp, release)) {
3110 kqworkq_dealloc(kqwq);
3111 return tmp;
3112 }
3113
3114 return kqwq;
3115 }
3116
3117 #pragma mark kqworkloop allocation and deallocation
3118
3119 #define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
3120 #define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE
3121
3122 OS_ALWAYS_INLINE
3123 static inline void
kqhash_lock(struct filedesc * fdp)3124 kqhash_lock(struct filedesc *fdp)
3125 {
3126 lck_mtx_lock_spin_always(&fdp->fd_kqhashlock);
3127 }
3128
3129 OS_ALWAYS_INLINE
3130 static inline void
kqhash_unlock(struct filedesc * fdp)3131 kqhash_unlock(struct filedesc *fdp)
3132 {
3133 lck_mtx_unlock(&fdp->fd_kqhashlock);
3134 }
3135
3136 OS_ALWAYS_INLINE
3137 static inline void
kqworkloop_hash_insert_locked(struct filedesc * fdp,kqueue_id_t id,struct kqworkloop * kqwl)3138 kqworkloop_hash_insert_locked(struct filedesc *fdp, kqueue_id_t id,
3139 struct kqworkloop *kqwl)
3140 {
3141 struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
3142 LIST_INSERT_HEAD(list, kqwl, kqwl_hashlink);
3143 }
3144
3145 OS_ALWAYS_INLINE
3146 static inline struct kqworkloop *
kqworkloop_hash_lookup_locked(struct filedesc * fdp,kqueue_id_t id)3147 kqworkloop_hash_lookup_locked(struct filedesc *fdp, kqueue_id_t id)
3148 {
3149 struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)];
3150 struct kqworkloop *kqwl;
3151
3152 LIST_FOREACH(kqwl, list, kqwl_hashlink) {
3153 if (kqwl->kqwl_dynamicid == id) {
3154 return kqwl;
3155 }
3156 }
3157 return NULL;
3158 }
3159
3160 static struct kqworkloop *
kqworkloop_hash_lookup_and_retain(struct filedesc * fdp,kqueue_id_t kq_id)3161 kqworkloop_hash_lookup_and_retain(struct filedesc *fdp, kqueue_id_t kq_id)
3162 {
3163 struct kqworkloop *kqwl = NULL;
3164
3165 kqhash_lock(fdp);
3166 if (__probable(fdp->fd_kqhash)) {
3167 kqwl = kqworkloop_hash_lookup_locked(fdp, kq_id);
3168 if (kqwl && !kqworkloop_try_retain(kqwl)) {
3169 kqwl = NULL;
3170 }
3171 }
3172 kqhash_unlock(fdp);
3173 return kqwl;
3174 }
3175
3176 OS_NOINLINE
3177 static void
kqworkloop_hash_init(struct filedesc * fdp)3178 kqworkloop_hash_init(struct filedesc *fdp)
3179 {
3180 struct kqwllist *alloc_hash;
3181 u_long alloc_mask;
3182
3183 kqhash_unlock(fdp);
3184 alloc_hash = hashinit(CONFIG_KQ_HASHSIZE, M_KQUEUE, &alloc_mask);
3185 kqhash_lock(fdp);
3186
3187 /* See if we won the race */
3188 if (__probable(fdp->fd_kqhashmask == 0)) {
3189 fdp->fd_kqhash = alloc_hash;
3190 fdp->fd_kqhashmask = alloc_mask;
3191 } else {
3192 kqhash_unlock(fdp);
3193 hashdestroy(alloc_hash, M_KQUEUE, alloc_mask);
3194 kqhash_lock(fdp);
3195 }
3196 }
3197
3198 /*
3199 * kqueue iotier override is only supported for kqueue that has
3200 * only one port as a mach port source. Updating the iotier
3201 * override on the mach port source will update the override
3202 * on kqueue as well. Since kqueue with iotier override will
3203 * only have one port attached, there is no logic for saturation
3204 * like qos override, the iotier override of mach port source
3205 * would be reflected in kevent iotier override.
3206 */
3207 void
kqueue_set_iotier_override(kqueue_t kqu,uint8_t iotier_override)3208 kqueue_set_iotier_override(kqueue_t kqu, uint8_t iotier_override)
3209 {
3210 if (!(kqu.kq->kq_state & KQ_WORKLOOP)) {
3211 return;
3212 }
3213
3214 struct kqworkloop *kqwl = kqu.kqwl;
3215 os_atomic_store(&kqwl->kqwl_iotier_override, iotier_override, relaxed);
3216 }
3217
3218 uint8_t
kqueue_get_iotier_override(kqueue_t kqu)3219 kqueue_get_iotier_override(kqueue_t kqu)
3220 {
3221 if (!(kqu.kq->kq_state & KQ_WORKLOOP)) {
3222 return THROTTLE_LEVEL_END;
3223 }
3224
3225 struct kqworkloop *kqwl = kqu.kqwl;
3226 return os_atomic_load(&kqwl->kqwl_iotier_override, relaxed);
3227 }
3228
3229 #if CONFIG_PREADOPT_TG
3230 /*
3231 * This function is called with a borrowed reference on the thread group without
3232 * kq lock held with the mqueue lock held. It may or may not have the knote lock
3233 * (called from both fevent as well as fattach/ftouch). Upon success, an
3234 * additional reference on the TG is taken
3235 */
3236 void
kqueue_set_preadopted_thread_group(kqueue_t kqu,struct thread_group * tg,thread_qos_t qos)3237 kqueue_set_preadopted_thread_group(kqueue_t kqu, struct thread_group *tg, thread_qos_t qos)
3238 {
3239 if (!(kqu.kq->kq_state & KQ_WORKLOOP)) {
3240 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_PREADOPT_NA),
3241 (uintptr_t)thread_tid(current_thread()), 0, 0, 0);
3242 return;
3243 }
3244
3245 struct kqworkloop *kqwl = kqu.kqwl;
3246
3247 assert(qos < THREAD_QOS_LAST);
3248
3249 thread_group_retain(tg);
3250
3251 thread_group_qos_t old_tg; thread_group_qos_t new_tg;
3252 int ret = os_atomic_rmw_loop(&kqwl->kqwl_preadopt_tg, old_tg, new_tg, relaxed, {
3253 if (!KQWL_CAN_ADOPT_PREADOPT_TG(old_tg)) {
3254 os_atomic_rmw_loop_give_up(break);
3255 }
3256
3257 if (old_tg != KQWL_PREADOPTED_TG_NULL) {
3258 /*
3259 * Note that old_tg could be a NULL TG pointer but with a QoS
3260 * set. See also workq_thread_reset_pri.
3261 *
3262 * Compare the QoS of existing preadopted tg with new one and
3263 * only overwrite the thread group if we have one with a higher
3264 * QoS.
3265 */
3266 thread_qos_t existing_qos = KQWL_GET_PREADOPTED_TG_QOS(old_tg);
3267 if (existing_qos >= qos) {
3268 os_atomic_rmw_loop_give_up(break);
3269 }
3270 }
3271
3272 // Transfer the ref taken earlier in the function to the kqwl
3273 new_tg = KQWL_ENCODE_PREADOPTED_TG_QOS(tg, qos);
3274 });
3275
3276 if (ret) {
3277 KQWL_PREADOPT_TG_HISTORY_WRITE_ENTRY(kqwl, KQWL_PREADOPT_OP_INCOMING_IPC, old_tg, tg);
3278
3279 if (KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) {
3280 thread_group_deallocate_safe(KQWL_GET_PREADOPTED_TG(old_tg));
3281 }
3282
3283 os_atomic_store(&kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_NEEDS_REDRIVE, release);
3284 } else {
3285 // We failed to write to the kqwl_preadopt_tg, drop the ref we took
3286 // earlier in the function
3287 thread_group_deallocate_safe(tg);
3288 }
3289 }
3290
3291 /*
3292 * Called from fprocess of EVFILT_MACHPORT without the kqueue lock held.
3293 */
3294 bool
kqueue_process_preadopt_thread_group(thread_t thread,struct kqueue * kq,struct thread_group * tg)3295 kqueue_process_preadopt_thread_group(thread_t thread, struct kqueue *kq, struct thread_group *tg)
3296 {
3297 bool success = false;
3298 if (kq->kq_state & KQ_WORKLOOP) {
3299 struct kqworkloop *kqwl = (struct kqworkloop *) kq;
3300 thread_group_qos_t old_tg;
3301 success = os_atomic_cmpxchgv(&kqwl->kqwl_preadopt_tg,
3302 KQWL_PREADOPTED_TG_SENTINEL, KQWL_PREADOPTED_TG_PROCESSED,
3303 &old_tg, relaxed);
3304 if (success) {
3305 thread_set_preadopt_thread_group(thread, tg);
3306 }
3307
3308 __assert_only thread_group_qos_t preadopt_tg;
3309 preadopt_tg = os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed);
3310 assert(preadopt_tg == KQWL_PREADOPTED_TG_PROCESSED ||
3311 preadopt_tg == KQWL_PREADOPTED_TG_NEVER);
3312 }
3313
3314 return success;
3315 }
3316 #endif
3317
3318 /*!
3319 * @function kqworkloop_dealloc
3320 *
3321 * @brief
3322 * Deallocates a workloop kqueue.
3323 *
3324 * @discussion
3325 * Knotes hold references on the workloop, so we can't really reach this
3326 * function unless all of these are already gone.
3327 *
3328 * Nothing locked on entry or exit.
3329 *
3330 * @param hash_remove
3331 * Whether to remove the workloop from its hash table.
3332 */
3333 static void
kqworkloop_dealloc(struct kqworkloop * kqwl,bool hash_remove)3334 kqworkloop_dealloc(struct kqworkloop *kqwl, bool hash_remove)
3335 {
3336 thread_t cur_owner;
3337
3338 cur_owner = kqwl->kqwl_owner;
3339 if (cur_owner) {
3340 if (kqworkloop_override(kqwl) != THREAD_QOS_UNSPECIFIED) {
3341 thread_drop_kevent_override(cur_owner);
3342 }
3343 thread_deallocate(cur_owner);
3344 kqwl->kqwl_owner = THREAD_NULL;
3345 }
3346
3347 if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
3348 struct turnstile *ts;
3349 turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
3350 &ts, TURNSTILE_WORKLOOPS);
3351 turnstile_cleanup();
3352 turnstile_deallocate(ts);
3353 }
3354
3355 if (hash_remove) {
3356 struct filedesc *fdp = &kqwl->kqwl_p->p_fd;
3357
3358 kqhash_lock(fdp);
3359 LIST_REMOVE(kqwl, kqwl_hashlink);
3360 kqhash_unlock(fdp);
3361 }
3362
3363 #if CONFIG_PREADOPT_TG
3364 thread_group_qos_t tg = os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed);
3365 if (KQWL_HAS_VALID_PREADOPTED_TG(tg)) {
3366 thread_group_release(KQWL_GET_PREADOPTED_TG(tg));
3367 }
3368 #endif
3369
3370 assert(TAILQ_EMPTY(&kqwl->kqwl_suppressed));
3371 assert(kqwl->kqwl_owner == THREAD_NULL);
3372 assert(kqwl->kqwl_turnstile == TURNSTILE_NULL);
3373
3374 lck_spin_destroy(&kqwl->kqwl_statelock, &kq_lck_grp);
3375 kqueue_destroy(kqwl, kqworkloop_zone);
3376 }
3377
3378 /*!
3379 * @function kqworkloop_alloc
3380 *
3381 * @brief
3382 * Allocates a workloop kqueue.
3383 */
3384 static void
kqworkloop_init(struct kqworkloop * kqwl,proc_t p,kqueue_id_t id,workq_threadreq_param_t * trp)3385 kqworkloop_init(struct kqworkloop *kqwl, proc_t p,
3386 kqueue_id_t id, workq_threadreq_param_t *trp)
3387 {
3388 kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC | KQ_KEV_QOS;
3389 os_ref_init_raw(&kqwl->kqwl_retains, NULL);
3390 kqwl->kqwl_dynamicid = id;
3391 kqwl->kqwl_p = p;
3392 if (trp) {
3393 kqwl->kqwl_params = trp->trp_value;
3394 }
3395
3396 workq_tr_flags_t tr_flags = WORKQ_TR_FLAG_WORKLOOP;
3397 if (trp) {
3398 if (trp->trp_flags & TRP_PRIORITY) {
3399 tr_flags |= WORKQ_TR_FLAG_WL_OUTSIDE_QOS;
3400 }
3401 if (trp->trp_flags) {
3402 tr_flags |= WORKQ_TR_FLAG_WL_PARAMS;
3403 }
3404 }
3405 kqwl->kqwl_request.tr_state = WORKQ_TR_STATE_IDLE;
3406 kqwl->kqwl_request.tr_flags = tr_flags;
3407 os_atomic_store(&kqwl->kqwl_iotier_override, (uint8_t)THROTTLE_LEVEL_END, relaxed);
3408 #if CONFIG_PREADOPT_TG
3409 if (task_is_app(current_task())) {
3410 /* Apps will never adopt a thread group that is not their own. This is a
3411 * gross hack to simulate the post-process that is done in the voucher
3412 * subsystem today for thread groups */
3413 os_atomic_store(&kqwl->kqwl_preadopt_tg, KQWL_PREADOPTED_TG_NEVER, relaxed);
3414 }
3415 #endif
3416
3417 for (int i = 0; i < KQWL_NBUCKETS; i++) {
3418 TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_queue[i]);
3419 }
3420 TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_suppressed);
3421
3422 lck_spin_init(&kqwl->kqwl_statelock, &kq_lck_grp, LCK_ATTR_NULL);
3423
3424 kqueue_init(kqwl);
3425 }
3426
3427 /*!
3428 * @function kqworkloop_get_or_create
3429 *
3430 * @brief
3431 * Wrapper around kqworkloop_alloc that handles the uniquing of workloops.
3432 *
3433 * @returns
3434 * 0: success
3435 * EINVAL: invalid parameters
3436 * EEXIST: KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST is set and a collision exists.
3437 * ENOENT: KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST is set and the entry wasn't found.
3438 * ENOMEM: allocation failed
3439 */
3440 static int
kqworkloop_get_or_create(struct proc * p,kqueue_id_t id,workq_threadreq_param_t * trp,unsigned int flags,struct kqworkloop ** kqwlp)3441 kqworkloop_get_or_create(struct proc *p, kqueue_id_t id,
3442 workq_threadreq_param_t *trp, unsigned int flags, struct kqworkloop **kqwlp)
3443 {
3444 struct filedesc *fdp = &p->p_fd;
3445 struct kqworkloop *alloc_kqwl = NULL;
3446 struct kqworkloop *kqwl = NULL;
3447 int error = 0;
3448
3449 assert(!trp || (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST));
3450
3451 if (id == 0 || id == (kqueue_id_t)-1) {
3452 return EINVAL;
3453 }
3454
3455 for (;;) {
3456 kqhash_lock(fdp);
3457 if (__improbable(fdp->fd_kqhash == NULL)) {
3458 kqworkloop_hash_init(fdp);
3459 }
3460
3461 kqwl = kqworkloop_hash_lookup_locked(fdp, id);
3462 if (kqwl) {
3463 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
3464 /*
3465 * If MUST_NOT_EXIST was passed, even if we would have failed
3466 * the try_retain, it could have gone the other way, and
3467 * userspace can't tell. Let'em fix their race.
3468 */
3469 error = EEXIST;
3470 break;
3471 }
3472
3473 if (__probable(kqworkloop_try_retain(kqwl))) {
3474 /*
3475 * This is a valid live workloop !
3476 */
3477 *kqwlp = kqwl;
3478 error = 0;
3479 break;
3480 }
3481 }
3482
3483 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST)) {
3484 error = ENOENT;
3485 break;
3486 }
3487
3488 /*
3489 * We didn't find what we were looking for.
3490 *
3491 * If this is the second time we reach this point (alloc_kqwl != NULL),
3492 * then we're done.
3493 *
3494 * If this is the first time we reach this point (alloc_kqwl == NULL),
3495 * then try to allocate one without blocking.
3496 */
3497 if (__probable(alloc_kqwl == NULL)) {
3498 alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_NOWAIT | Z_ZERO);
3499 }
3500 if (__probable(alloc_kqwl)) {
3501 kqworkloop_init(alloc_kqwl, p, id, trp);
3502 kqworkloop_hash_insert_locked(fdp, id, alloc_kqwl);
3503 kqhash_unlock(fdp);
3504 *kqwlp = alloc_kqwl;
3505 return 0;
3506 }
3507
3508 /*
3509 * We have to block to allocate a workloop, drop the lock,
3510 * allocate one, but then we need to retry lookups as someone
3511 * else could race with us.
3512 */
3513 kqhash_unlock(fdp);
3514
3515 alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_WAITOK | Z_ZERO);
3516 }
3517
3518 kqhash_unlock(fdp);
3519
3520 if (__improbable(alloc_kqwl)) {
3521 zfree(kqworkloop_zone, alloc_kqwl);
3522 }
3523
3524 return error;
3525 }
3526
3527 #pragma mark - knotes
3528
3529 static int
filt_no_attach(struct knote * kn,__unused struct kevent_qos_s * kev)3530 filt_no_attach(struct knote *kn, __unused struct kevent_qos_s *kev)
3531 {
3532 knote_set_error(kn, ENOTSUP);
3533 return 0;
3534 }
3535
3536 static void
filt_no_detach(__unused struct knote * kn)3537 filt_no_detach(__unused struct knote *kn)
3538 {
3539 }
3540
3541 static int __dead2
filt_bad_event(struct knote * kn,long hint)3542 filt_bad_event(struct knote *kn, long hint)
3543 {
3544 panic("%s[%d](%p, %ld)", __func__, kn->kn_filter, kn, hint);
3545 }
3546
3547 static int __dead2
filt_bad_touch(struct knote * kn,struct kevent_qos_s * kev)3548 filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev)
3549 {
3550 panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
3551 }
3552
3553 static int __dead2
filt_bad_process(struct knote * kn,struct kevent_qos_s * kev)3554 filt_bad_process(struct knote *kn, struct kevent_qos_s *kev)
3555 {
3556 panic("%s[%d](%p, %p)", __func__, kn->kn_filter, kn, kev);
3557 }
3558
3559 /*
3560 * knotes_dealloc - detach all knotes for the process and drop them
3561 *
3562 * Process is in such a state that it will not try to allocate
3563 * any more knotes during this process (stopped for exit or exec).
3564 */
3565 void
knotes_dealloc(proc_t p)3566 knotes_dealloc(proc_t p)
3567 {
3568 struct filedesc *fdp = &p->p_fd;
3569 struct kqueue *kq;
3570 struct knote *kn;
3571 struct klist *kn_hash = NULL;
3572 u_long kn_hashmask;
3573 int i;
3574
3575 proc_fdlock(p);
3576
3577 /* Close all the fd-indexed knotes up front */
3578 if (fdp->fd_knlistsize > 0) {
3579 for (i = 0; i < fdp->fd_knlistsize; i++) {
3580 while ((kn = SLIST_FIRST(&fdp->fd_knlist[i])) != NULL) {
3581 kq = knote_get_kq(kn);
3582 kqlock(kq);
3583 proc_fdunlock(p);
3584 knote_drop(kq, kn, NULL);
3585 proc_fdlock(p);
3586 }
3587 }
3588 /* free the table */
3589 kfree_type(struct klist, fdp->fd_knlistsize, fdp->fd_knlist);
3590 }
3591 fdp->fd_knlistsize = 0;
3592
3593 proc_fdunlock(p);
3594
3595 knhash_lock(fdp);
3596
3597 /* Clean out all the hashed knotes as well */
3598 if (fdp->fd_knhashmask != 0) {
3599 for (i = 0; i <= (int)fdp->fd_knhashmask; i++) {
3600 while ((kn = SLIST_FIRST(&fdp->fd_knhash[i])) != NULL) {
3601 kq = knote_get_kq(kn);
3602 kqlock(kq);
3603 knhash_unlock(fdp);
3604 knote_drop(kq, kn, NULL);
3605 knhash_lock(fdp);
3606 }
3607 }
3608 kn_hash = fdp->fd_knhash;
3609 kn_hashmask = fdp->fd_knhashmask;
3610 fdp->fd_knhashmask = 0;
3611 fdp->fd_knhash = NULL;
3612 }
3613
3614 knhash_unlock(fdp);
3615
3616 if (kn_hash) {
3617 hashdestroy(kn_hash, M_KQUEUE, kn_hashmask);
3618 }
3619 }
3620
3621 /*
3622 * kqworkloops_dealloc - rebalance retains on kqworkloops created with
3623 * scheduling parameters
3624 *
3625 * Process is in such a state that it will not try to allocate
3626 * any more knotes during this process (stopped for exit or exec).
3627 */
3628 void
kqworkloops_dealloc(proc_t p)3629 kqworkloops_dealloc(proc_t p)
3630 {
3631 struct filedesc *fdp = &p->p_fd;
3632 struct kqworkloop *kqwl, *kqwln;
3633 struct kqwllist tofree;
3634
3635 if (!fdt_flag_test(fdp, FD_WORKLOOP)) {
3636 return;
3637 }
3638
3639 kqhash_lock(fdp);
3640
3641 if (fdp->fd_kqhashmask == 0) {
3642 kqhash_unlock(fdp);
3643 return;
3644 }
3645
3646 LIST_INIT(&tofree);
3647
3648 for (size_t i = 0; i <= fdp->fd_kqhashmask; i++) {
3649 LIST_FOREACH_SAFE(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink, kqwln) {
3650 /*
3651 * kqworkloops that have scheduling parameters have an
3652 * implicit retain from kqueue_workloop_ctl that needs
3653 * to be balanced on process exit.
3654 */
3655 assert(kqwl->kqwl_params);
3656 LIST_REMOVE(kqwl, kqwl_hashlink);
3657 LIST_INSERT_HEAD(&tofree, kqwl, kqwl_hashlink);
3658 }
3659 }
3660
3661 kqhash_unlock(fdp);
3662
3663 LIST_FOREACH_SAFE(kqwl, &tofree, kqwl_hashlink, kqwln) {
3664 uint32_t ref = os_ref_get_count_raw(&kqwl->kqwl_retains);
3665 if (ref != 1) {
3666 panic("kq(%p) invalid refcount %d", kqwl, ref);
3667 }
3668 kqworkloop_dealloc(kqwl, false);
3669 }
3670 }
3671
3672 static int
kevent_register_validate_priority(struct kqueue * kq,struct knote * kn,struct kevent_qos_s * kev)3673 kevent_register_validate_priority(struct kqueue *kq, struct knote *kn,
3674 struct kevent_qos_s *kev)
3675 {
3676 /* We don't care about the priority of a disabled or deleted knote */
3677 if (kev->flags & (EV_DISABLE | EV_DELETE)) {
3678 return 0;
3679 }
3680
3681 if (kq->kq_state & KQ_WORKLOOP) {
3682 /*
3683 * Workloops need valid priorities with a QOS (excluding manager) for
3684 * any enabled knote.
3685 *
3686 * When it is pre-existing, just make sure it has a valid QoS as
3687 * kevent_register() will not use the incoming priority (filters who do
3688 * have the responsibility to validate it again, see filt_wltouch).
3689 *
3690 * If the knote is being made, validate the incoming priority.
3691 */
3692 if (!_pthread_priority_thread_qos(kn ? kn->kn_qos : kev->qos)) {
3693 return ERANGE;
3694 }
3695 }
3696
3697 return 0;
3698 }
3699
3700 /*
3701 * Prepare a filter for waiting after register.
3702 *
3703 * The f_post_register_wait hook will be called later by kevent_register()
3704 * and should call kevent_register_wait_block()
3705 */
3706 static int
kevent_register_wait_prepare(struct knote * kn,struct kevent_qos_s * kev,int rc)3707 kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int rc)
3708 {
3709 thread_t thread = current_thread();
3710
3711 assert(knote_fops(kn)->f_extended_codes);
3712
3713 if (kn->kn_thread == NULL) {
3714 thread_reference(thread);
3715 kn->kn_thread = thread;
3716 } else if (kn->kn_thread != thread) {
3717 /*
3718 * kn_thread may be set from a previous aborted wait
3719 * However, it has to be from the same thread.
3720 */
3721 kev->flags |= EV_ERROR;
3722 kev->data = EXDEV;
3723 return 0;
3724 }
3725
3726 return FILTER_REGISTER_WAIT | rc;
3727 }
3728
3729 /*
3730 * Cleanup a kevent_register_wait_prepare() effect for threads that have been
3731 * aborted instead of properly woken up with thread_wakeup_thread().
3732 */
3733 static void
kevent_register_wait_cleanup(struct knote * kn)3734 kevent_register_wait_cleanup(struct knote *kn)
3735 {
3736 thread_t thread = kn->kn_thread;
3737 kn->kn_thread = NULL;
3738 thread_deallocate(thread);
3739 }
3740
3741 /*
3742 * Must be called at the end of a f_post_register_wait call from a filter.
3743 */
3744 static void
kevent_register_wait_block(struct turnstile * ts,thread_t thread,thread_continue_t cont,struct _kevent_register * cont_args)3745 kevent_register_wait_block(struct turnstile *ts, thread_t thread,
3746 thread_continue_t cont, struct _kevent_register *cont_args)
3747 {
3748 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
3749 kqunlock(cont_args->kqwl);
3750 cont_args->handoff_thread = thread;
3751 thread_handoff_parameter(thread, cont, cont_args, THREAD_HANDOFF_NONE);
3752 }
3753
3754 /*
3755 * Called by Filters using a f_post_register_wait to return from their wait.
3756 */
3757 static void
kevent_register_wait_return(struct _kevent_register * cont_args)3758 kevent_register_wait_return(struct _kevent_register *cont_args)
3759 {
3760 struct kqworkloop *kqwl = cont_args->kqwl;
3761 struct kevent_qos_s *kev = &cont_args->kev;
3762 int error = 0;
3763
3764 if (cont_args->handoff_thread) {
3765 thread_deallocate(cont_args->handoff_thread);
3766 }
3767
3768 if (kev->flags & (EV_ERROR | EV_RECEIPT)) {
3769 if ((kev->flags & EV_ERROR) == 0) {
3770 kev->flags |= EV_ERROR;
3771 kev->data = 0;
3772 }
3773 error = kevent_modern_copyout(kev, &cont_args->ueventlist);
3774 if (error == 0) {
3775 cont_args->eventout++;
3776 }
3777 }
3778
3779 kqworkloop_release(kqwl);
3780 if (error == 0) {
3781 *(int32_t *)¤t_uthread()->uu_rval = cont_args->eventout;
3782 }
3783 unix_syscall_return(error);
3784 }
3785
3786 /*
3787 * kevent_register - add a new event to a kqueue
3788 *
3789 * Creates a mapping between the event source and
3790 * the kqueue via a knote data structure.
3791 *
3792 * Because many/most the event sources are file
3793 * descriptor related, the knote is linked off
3794 * the filedescriptor table for quick access.
3795 *
3796 * called with nothing locked
3797 * caller holds a reference on the kqueue
3798 */
3799
3800 int
kevent_register(struct kqueue * kq,struct kevent_qos_s * kev,struct knote ** kn_out)3801 kevent_register(struct kqueue *kq, struct kevent_qos_s *kev,
3802 struct knote **kn_out)
3803 {
3804 struct proc *p = kq->kq_p;
3805 const struct filterops *fops;
3806 struct knote *kn = NULL;
3807 int result = 0, error = 0;
3808 unsigned short kev_flags = kev->flags;
3809 KNOTE_LOCK_CTX(knlc);
3810
3811 if (__probable(kev->filter < 0 && kev->filter + EVFILT_SYSCOUNT >= 0)) {
3812 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
3813 } else {
3814 error = EINVAL;
3815 goto out;
3816 }
3817
3818 /* restrict EV_VANISHED to adding udata-specific dispatch kevents */
3819 if (__improbable((kev->flags & EV_VANISHED) &&
3820 (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2))) {
3821 error = EINVAL;
3822 goto out;
3823 }
3824
3825 /* Simplify the flags - delete and disable overrule */
3826 if (kev->flags & EV_DELETE) {
3827 kev->flags &= ~EV_ADD;
3828 }
3829 if (kev->flags & EV_DISABLE) {
3830 kev->flags &= ~EV_ENABLE;
3831 }
3832
3833 if (kq->kq_state & KQ_WORKLOOP) {
3834 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER),
3835 ((struct kqworkloop *)kq)->kqwl_dynamicid,
3836 kev->udata, kev->flags, kev->filter);
3837 } else if (kq->kq_state & KQ_WORKQ) {
3838 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER),
3839 0, kev->udata, kev->flags, kev->filter);
3840 } else {
3841 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_REGISTER),
3842 VM_KERNEL_UNSLIDE_OR_PERM(kq),
3843 kev->udata, kev->flags, kev->filter);
3844 }
3845
3846 restart:
3847 /* find the matching knote from the fd tables/hashes */
3848 kn = kq_find_knote_and_kq_lock(kq, kev, fops->f_isfd, p);
3849 error = kevent_register_validate_priority(kq, kn, kev);
3850 result = 0;
3851 if (error) {
3852 if (kn) {
3853 kqunlock(kq);
3854 }
3855 goto out;
3856 }
3857
3858 if (kn == NULL && (kev->flags & EV_ADD) == 0) {
3859 /*
3860 * No knote found, EV_ADD wasn't specified
3861 */
3862
3863 if ((kev_flags & EV_ADD) && (kev_flags & EV_DELETE) &&
3864 (kq->kq_state & KQ_WORKLOOP)) {
3865 /*
3866 * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete
3867 * that doesn't care about ENOENT, so just pretend the deletion
3868 * happened.
3869 */
3870 } else {
3871 error = ENOENT;
3872 }
3873 goto out;
3874 } else if (kn == NULL) {
3875 /*
3876 * No knote found, need to attach a new one (attach)
3877 */
3878
3879 struct fileproc *knote_fp = NULL;
3880
3881 /* grab a file reference for the new knote */
3882 if (fops->f_isfd) {
3883 if ((error = fp_lookup(p, (int)kev->ident, &knote_fp, 0)) != 0) {
3884 goto out;
3885 }
3886 }
3887
3888 kn = knote_alloc();
3889 kn->kn_fp = knote_fp;
3890 kn->kn_is_fd = fops->f_isfd;
3891 kn->kn_kq_packed = VM_PACK_POINTER((vm_offset_t)kq, KNOTE_KQ_PACKED);
3892 kn->kn_status = 0;
3893
3894 /* was vanish support requested */
3895 if (kev->flags & EV_VANISHED) {
3896 kev->flags &= ~EV_VANISHED;
3897 kn->kn_status |= KN_REQVANISH;
3898 }
3899
3900 /* snapshot matching/dispatching protocol flags into knote */
3901 if (kev->flags & EV_DISABLE) {
3902 kn->kn_status |= KN_DISABLED;
3903 }
3904
3905 /*
3906 * copy the kevent state into knote
3907 * protocol is that fflags and data
3908 * are saved off, and cleared before
3909 * calling the attach routine.
3910 *
3911 * - kn->kn_sfflags aliases with kev->xflags
3912 * - kn->kn_sdata aliases with kev->data
3913 * - kn->kn_filter is the top 8 bits of kev->filter
3914 */
3915 kn->kn_kevent = *(struct kevent_internal_s *)kev;
3916 kn->kn_sfflags = kev->fflags;
3917 kn->kn_filtid = (uint8_t)~kev->filter;
3918 kn->kn_fflags = 0;
3919 knote_reset_priority(kq, kn, kev->qos);
3920
3921 /* Add the knote for lookup thru the fd table */
3922 error = kq_add_knote(kq, kn, &knlc, p);
3923 if (error) {
3924 knote_free(kn);
3925 if (knote_fp != NULL) {
3926 fp_drop(p, (int)kev->ident, knote_fp, 0);
3927 }
3928
3929 if (error == ERESTART) {
3930 goto restart;
3931 }
3932 goto out;
3933 }
3934
3935 /* fp reference count now applies to knote */
3936
3937 /*
3938 * we can't use filter_call() because f_attach can change the filter ops
3939 * for a filter that supports f_extended_codes, so we need to reload
3940 * knote_fops() and not use `fops`.
3941 */
3942 result = fops->f_attach(kn, kev);
3943 if (result && !knote_fops(kn)->f_extended_codes) {
3944 result = FILTER_ACTIVE;
3945 }
3946
3947 kqlock(kq);
3948
3949 if (result & FILTER_THREADREQ_NODEFEER) {
3950 enable_preemption();
3951 }
3952
3953 if (kn->kn_flags & EV_ERROR) {
3954 /*
3955 * Failed to attach correctly, so drop.
3956 */
3957 kn->kn_filtid = EVFILTID_DETACHED;
3958 error = (int)kn->kn_sdata;
3959 knote_drop(kq, kn, &knlc);
3960 result = 0;
3961 goto out;
3962 }
3963
3964 /*
3965 * end "attaching" phase - now just attached
3966 *
3967 * Mark the thread request overcommit, if appropos
3968 *
3969 * If the attach routine indicated that an
3970 * event is already fired, activate the knote.
3971 */
3972 if ((kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) &&
3973 (kq->kq_state & KQ_WORKLOOP)) {
3974 kqworkloop_set_overcommit((struct kqworkloop *)kq);
3975 }
3976 } else if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
3977 /*
3978 * The knote was dropped while we were waiting for the lock,
3979 * we need to re-evaluate entirely
3980 */
3981
3982 goto restart;
3983 } else if (kev->flags & EV_DELETE) {
3984 /*
3985 * Deletion of a knote (drop)
3986 *
3987 * If the filter wants to filter drop events, let it do so.
3988 *
3989 * defer-delete: when trying to delete a disabled EV_DISPATCH2 knote,
3990 * we must wait for the knote to be re-enabled (unless it is being
3991 * re-enabled atomically here).
3992 */
3993
3994 if (knote_fops(kn)->f_allow_drop) {
3995 bool drop;
3996
3997 kqunlock(kq);
3998 drop = knote_fops(kn)->f_allow_drop(kn, kev);
3999 kqlock(kq);
4000
4001 if (!drop) {
4002 goto out_unlock;
4003 }
4004 }
4005
4006 if ((kev->flags & EV_ENABLE) == 0 &&
4007 (kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
4008 (kn->kn_status & KN_DISABLED) != 0) {
4009 kn->kn_status |= KN_DEFERDELETE;
4010 error = EINPROGRESS;
4011 goto out_unlock;
4012 }
4013
4014 knote_drop(kq, kn, &knlc);
4015 goto out;
4016 } else {
4017 /*
4018 * Regular update of a knote (touch)
4019 *
4020 * Call touch routine to notify filter of changes in filter values
4021 * (and to re-determine if any events are fired).
4022 *
4023 * If the knote is in defer-delete, avoid calling the filter touch
4024 * routine (it has delivered its last event already).
4025 *
4026 * If the touch routine had no failure,
4027 * apply the requested side effects to the knote.
4028 */
4029
4030 if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
4031 if (kev->flags & EV_ENABLE) {
4032 result = FILTER_ACTIVE;
4033 }
4034 } else {
4035 kqunlock(kq);
4036 result = filter_call(knote_fops(kn), f_touch(kn, kev));
4037 kqlock(kq);
4038 if (result & FILTER_THREADREQ_NODEFEER) {
4039 enable_preemption();
4040 }
4041 }
4042
4043 if (kev->flags & EV_ERROR) {
4044 result = 0;
4045 goto out_unlock;
4046 }
4047
4048 if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0 &&
4049 kn->kn_udata != kev->udata) {
4050 // this allows klist_copy_udata() not to take locks
4051 os_atomic_store_wide(&kn->kn_udata, kev->udata, relaxed);
4052 }
4053 if ((kev->flags & EV_DISABLE) && !(kn->kn_status & KN_DISABLED)) {
4054 kn->kn_status |= KN_DISABLED;
4055 knote_dequeue(kq, kn);
4056 }
4057 }
4058
4059 /* accept new kevent state */
4060 knote_apply_touch(kq, kn, kev, result);
4061
4062 out_unlock:
4063 /*
4064 * When the filter asked for a post-register wait,
4065 * we leave the kqueue locked for kevent_register()
4066 * to call the filter's f_post_register_wait hook.
4067 */
4068 if (result & FILTER_REGISTER_WAIT) {
4069 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4070 *kn_out = kn;
4071 } else {
4072 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
4073 }
4074
4075 out:
4076 /* output local errors through the kevent */
4077 if (error) {
4078 kev->flags |= EV_ERROR;
4079 kev->data = error;
4080 }
4081 return result;
4082 }
4083
4084 /*
4085 * knote_process - process a triggered event
4086 *
4087 * Validate that it is really still a triggered event
4088 * by calling the filter routines (if necessary). Hold
4089 * a use reference on the knote to avoid it being detached.
4090 *
4091 * If it is still considered triggered, we will have taken
4092 * a copy of the state under the filter lock. We use that
4093 * snapshot to dispatch the knote for future processing (or
4094 * not, if this was a lost event).
4095 *
4096 * Our caller assures us that nobody else can be processing
4097 * events from this knote during the whole operation. But
4098 * others can be touching or posting events to the knote
4099 * interspersed with our processing it.
4100 *
4101 * caller holds a reference on the kqueue.
4102 * kqueue locked on entry and exit - but may be dropped
4103 */
4104 static int
knote_process(struct knote * kn,kevent_ctx_t kectx,kevent_callback_t callback)4105 knote_process(struct knote *kn, kevent_ctx_t kectx,
4106 kevent_callback_t callback)
4107 {
4108 struct kevent_qos_s kev;
4109 struct kqueue *kq = knote_get_kq(kn);
4110 KNOTE_LOCK_CTX(knlc);
4111 int result = FILTER_ACTIVE;
4112 int error = 0;
4113 bool drop = false;
4114
4115 /*
4116 * Must be active
4117 * Must be queued and not disabled/suppressed or dropping
4118 */
4119 assert(kn->kn_status & KN_QUEUED);
4120 assert(kn->kn_status & KN_ACTIVE);
4121 assert(!(kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING)));
4122
4123 if (kq->kq_state & KQ_WORKLOOP) {
4124 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS),
4125 ((struct kqworkloop *)kq)->kqwl_dynamicid,
4126 kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4127 kn->kn_filtid);
4128 } else if (kq->kq_state & KQ_WORKQ) {
4129 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS),
4130 0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4131 kn->kn_filtid);
4132 } else {
4133 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS),
4134 VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata,
4135 kn->kn_status | (kn->kn_id << 32), kn->kn_filtid);
4136 }
4137
4138 if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS)) {
4139 /*
4140 * When the knote is dropping or has dropped,
4141 * then there's nothing we want to process.
4142 */
4143 return EJUSTRETURN;
4144 }
4145
4146 /*
4147 * While waiting for the knote lock, we may have dropped the kq lock.
4148 * and a touch may have disabled and dequeued the knote.
4149 */
4150 if (!(kn->kn_status & KN_QUEUED)) {
4151 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4152 return EJUSTRETURN;
4153 }
4154
4155 /*
4156 * For deferred-drop or vanished events, we just create a fake
4157 * event to acknowledge end-of-life. Otherwise, we call the
4158 * filter's process routine to snapshot the kevent state under
4159 * the filter's locking protocol.
4160 *
4161 * suppress knotes to avoid returning the same event multiple times in
4162 * a single call.
4163 */
4164 knote_suppress(kq, kn);
4165
4166 if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) {
4167 uint16_t kev_flags = EV_DISPATCH2 | EV_ONESHOT;
4168 if (kn->kn_status & KN_DEFERDELETE) {
4169 kev_flags |= EV_DELETE;
4170 } else {
4171 kev_flags |= EV_VANISHED;
4172 }
4173
4174 /* create fake event */
4175 kev = (struct kevent_qos_s){
4176 .filter = kn->kn_filter,
4177 .ident = kn->kn_id,
4178 .flags = kev_flags,
4179 .udata = kn->kn_udata,
4180 };
4181 } else {
4182 kqunlock(kq);
4183 kev = (struct kevent_qos_s) { };
4184 result = filter_call(knote_fops(kn), f_process(kn, &kev));
4185 kqlock(kq);
4186 }
4187
4188 /*
4189 * Determine how to dispatch the knote for future event handling.
4190 * not-fired: just return (do not callout, leave deactivated).
4191 * One-shot: If dispatch2, enter deferred-delete mode (unless this is
4192 * is the deferred delete event delivery itself). Otherwise,
4193 * drop it.
4194 * Dispatch: don't clear state, just mark it disabled.
4195 * Cleared: just leave it deactivated.
4196 * Others: re-activate as there may be more events to handle.
4197 * This will not wake up more handlers right now, but
4198 * at the completion of handling events it may trigger
4199 * more handler threads (TODO: optimize based on more than
4200 * just this one event being detected by the filter).
4201 */
4202 if ((result & FILTER_ACTIVE) == 0) {
4203 if ((kn->kn_status & KN_ACTIVE) == 0) {
4204 /*
4205 * Some knotes (like EVFILT_WORKLOOP) can be reactivated from
4206 * within f_process() but that doesn't necessarily make them
4207 * ready to process, so we should leave them be.
4208 *
4209 * For other knotes, since we will not return an event,
4210 * there's no point keeping the knote suppressed.
4211 */
4212 knote_unsuppress(kq, kn);
4213 }
4214 knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS);
4215 return EJUSTRETURN;
4216 }
4217
4218 if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
4219 knote_adjust_qos(kq, kn, result);
4220 }
4221
4222 if (result & FILTER_ADJUST_EVENT_IOTIER_BIT) {
4223 kqueue_update_iotier_override(kq);
4224 }
4225
4226 kev.qos = _pthread_priority_combine(kn->kn_qos, kn->kn_qos_override);
4227
4228 if (kev.flags & EV_ONESHOT) {
4229 if ((kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 &&
4230 (kn->kn_status & KN_DEFERDELETE) == 0) {
4231 /* defer dropping non-delete oneshot dispatch2 events */
4232 kn->kn_status |= KN_DEFERDELETE | KN_DISABLED;
4233 } else {
4234 drop = true;
4235 }
4236 } else if (kn->kn_flags & EV_DISPATCH) {
4237 /* disable all dispatch knotes */
4238 kn->kn_status |= KN_DISABLED;
4239 } else if ((kn->kn_flags & EV_CLEAR) == 0) {
4240 /* re-activate in case there are more events */
4241 knote_activate(kq, kn, FILTER_ACTIVE);
4242 }
4243
4244 /*
4245 * callback to handle each event as we find it.
4246 * If we have to detach and drop the knote, do
4247 * it while we have the kq unlocked.
4248 */
4249 if (drop) {
4250 knote_drop(kq, kn, &knlc);
4251 } else {
4252 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
4253 }
4254
4255 if (kev.flags & EV_VANISHED) {
4256 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED),
4257 kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
4258 kn->kn_filtid);
4259 }
4260
4261 error = (callback)(&kev, kectx);
4262 kqlock(kq);
4263 return error;
4264 }
4265
4266 /*
4267 * Returns -1 if the kqueue was unbound and processing should not happen
4268 */
4269 #define KQWQAE_BEGIN_PROCESSING 1
4270 #define KQWQAE_END_PROCESSING 2
4271 #define KQWQAE_UNBIND 3
4272 static int
kqworkq_acknowledge_events(struct kqworkq * kqwq,workq_threadreq_t kqr,int kevent_flags,int kqwqae_op)4273 kqworkq_acknowledge_events(struct kqworkq *kqwq, workq_threadreq_t kqr,
4274 int kevent_flags, int kqwqae_op)
4275 {
4276 struct knote *kn;
4277 int rc = 0;
4278 bool unbind;
4279 struct kqtailq *suppressq = &kqwq->kqwq_suppressed[kqr->tr_kq_qos_index - 1];
4280 struct kqtailq *queue = &kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1];
4281
4282 kqlock_held(&kqwq->kqwq_kqueue);
4283
4284 /*
4285 * Return suppressed knotes to their original state.
4286 * For workq kqueues, suppressed ones that are still
4287 * truly active (not just forced into the queue) will
4288 * set flags we check below to see if anything got
4289 * woken up.
4290 */
4291 while ((kn = TAILQ_FIRST(suppressq)) != NULL) {
4292 knote_unsuppress(kqwq, kn);
4293 }
4294
4295 if (kqwqae_op == KQWQAE_UNBIND) {
4296 unbind = true;
4297 } else if ((kevent_flags & KEVENT_FLAG_PARKING) == 0) {
4298 unbind = false;
4299 } else {
4300 unbind = TAILQ_EMPTY(queue);
4301 }
4302 if (unbind) {
4303 thread_t thread = kqr_thread_fast(kqr);
4304 thread_qos_t old_override;
4305
4306 #if DEBUG || DEVELOPMENT
4307 thread_t self = current_thread();
4308 struct uthread *ut = get_bsdthread_info(self);
4309
4310 assert(thread == self);
4311 assert(ut->uu_kqr_bound == kqr);
4312 #endif // DEBUG || DEVELOPMENT
4313
4314 old_override = kqworkq_unbind_locked(kqwq, kqr, thread);
4315 if (!TAILQ_EMPTY(queue)) {
4316 /*
4317 * Request a new thread if we didn't process the whole
4318 * queue.
4319 */
4320 kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr,
4321 kqr->tr_kq_qos_index, 0);
4322 }
4323 if (old_override) {
4324 thread_drop_kevent_override(thread);
4325 }
4326 rc = -1;
4327 }
4328
4329 return rc;
4330 }
4331
4332 /*
4333 * Return 0 to indicate that processing should proceed,
4334 * -1 if there is nothing to process.
4335 *
4336 * Called with kqueue locked and returns the same way,
4337 * but may drop lock temporarily.
4338 */
4339 static int
kqworkq_begin_processing(struct kqworkq * kqwq,workq_threadreq_t kqr,int kevent_flags)4340 kqworkq_begin_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
4341 int kevent_flags)
4342 {
4343 int rc = 0;
4344
4345 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START,
4346 0, kqr->tr_kq_qos_index);
4347
4348 rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
4349 KQWQAE_BEGIN_PROCESSING);
4350
4351 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END,
4352 thread_tid(kqr_thread(kqr)),
4353 !TAILQ_EMPTY(&kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1]));
4354
4355 return rc;
4356 }
4357
4358 static thread_qos_t
kqworkloop_acknowledge_events(struct kqworkloop * kqwl)4359 kqworkloop_acknowledge_events(struct kqworkloop *kqwl)
4360 {
4361 kq_index_t qos = THREAD_QOS_UNSPECIFIED;
4362 struct knote *kn, *tmp;
4363
4364 kqlock_held(kqwl);
4365
4366 TAILQ_FOREACH_SAFE(kn, &kqwl->kqwl_suppressed, kn_tqe, tmp) {
4367 /*
4368 * If a knote that can adjust QoS is disabled because of the automatic
4369 * behavior of EV_DISPATCH, the knotes should stay suppressed so that
4370 * further overrides keep pushing.
4371 */
4372 if (knote_fops(kn)->f_adjusts_qos &&
4373 (kn->kn_status & KN_DISABLED) != 0 &&
4374 (kn->kn_status & KN_DROPPING) == 0 &&
4375 (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) {
4376 qos = MAX(qos, kn->kn_qos_override);
4377 continue;
4378 }
4379 knote_unsuppress(kqwl, kn);
4380 }
4381
4382 return qos;
4383 }
4384
4385 static int
kqworkloop_begin_processing(struct kqworkloop * kqwl,unsigned int kevent_flags)4386 kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags)
4387 {
4388 workq_threadreq_t kqr = &kqwl->kqwl_request;
4389 struct kqueue *kq = &kqwl->kqwl_kqueue;
4390 int rc = 0, op = KQWL_UTQ_NONE;
4391
4392 kqlock_held(kq);
4393
4394 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START,
4395 kqwl->kqwl_dynamicid, 0, 0);
4396
4397 /* nobody else should still be processing */
4398 assert((kq->kq_state & KQ_PROCESSING) == 0);
4399
4400 kq->kq_state |= KQ_PROCESSING;
4401
4402 if (kevent_flags & KEVENT_FLAG_PARKING) {
4403 /*
4404 * When "parking" we want to process events and if no events are found
4405 * unbind.
4406 *
4407 * However, non overcommit threads sometimes park even when they have
4408 * more work so that the pool can narrow. For these, we need to unbind
4409 * early, so that calling kqworkloop_update_threads_qos() can ask the
4410 * workqueue subsystem whether the thread should park despite having
4411 * pending events.
4412 */
4413 if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
4414 op = KQWL_UTQ_PARKING;
4415 } else {
4416 op = KQWL_UTQ_UNBINDING;
4417 }
4418 } else if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
4419 op = KQWL_UTQ_RESET_WAKEUP_OVERRIDE;
4420 }
4421
4422 if (op != KQWL_UTQ_NONE) {
4423 thread_qos_t qos_override;
4424 thread_t thread = kqr_thread_fast(kqr);
4425
4426 qos_override = kqworkloop_acknowledge_events(kqwl);
4427
4428 if (op == KQWL_UTQ_UNBINDING) {
4429 kqworkloop_unbind_locked(kqwl, thread,
4430 KQWL_OVERRIDE_DROP_IMMEDIATELY);
4431 kqworkloop_release_live(kqwl);
4432 }
4433 kqworkloop_update_threads_qos(kqwl, op, qos_override);
4434 if (op == KQWL_UTQ_PARKING &&
4435 (!kqwl->kqwl_count || kqwl->kqwl_owner)) {
4436 kqworkloop_unbind_locked(kqwl, thread,
4437 KQWL_OVERRIDE_DROP_DELAYED);
4438 kqworkloop_release_live(kqwl);
4439 rc = -1;
4440 } else if (op == KQWL_UTQ_UNBINDING &&
4441 kqr_thread(kqr) != thread) {
4442 rc = -1;
4443 }
4444
4445 if (rc == -1) {
4446 kq->kq_state &= ~KQ_PROCESSING;
4447 kqworkloop_unbind_delayed_override_drop(thread);
4448 }
4449 }
4450
4451 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END,
4452 kqwl->kqwl_dynamicid, 0, 0);
4453
4454 return rc;
4455 }
4456
4457 /*
4458 * Return 0 to indicate that processing should proceed,
4459 * -1 if there is nothing to process.
4460 * EBADF if the kqueue is draining
4461 *
4462 * Called with kqueue locked and returns the same way,
4463 * but may drop lock temporarily.
4464 * May block.
4465 */
4466 static int
kqfile_begin_processing(struct kqfile * kq)4467 kqfile_begin_processing(struct kqfile *kq)
4468 {
4469 kqlock_held(kq);
4470
4471 assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
4472 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START,
4473 VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4474
4475 /* wait to become the exclusive processing thread */
4476 while ((kq->kqf_state & (KQ_PROCESSING | KQ_DRAIN)) == KQ_PROCESSING) {
4477 kq->kqf_state |= KQ_PROCWAIT;
4478 lck_spin_sleep(&kq->kqf_lock, LCK_SLEEP_DEFAULT,
4479 &kq->kqf_suppressed, THREAD_UNINT | THREAD_WAIT_NOREPORT);
4480 }
4481
4482 if (kq->kqf_state & KQ_DRAIN) {
4483 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4484 VM_KERNEL_UNSLIDE_OR_PERM(kq), 2);
4485 return EBADF;
4486 }
4487
4488 /* Nobody else processing */
4489
4490 /* anything left to process? */
4491 if (kq->kqf_count == 0) {
4492 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4493 VM_KERNEL_UNSLIDE_OR_PERM(kq), 1);
4494 return -1;
4495 }
4496
4497 /* convert to processing mode */
4498 kq->kqf_state |= KQ_PROCESSING;
4499
4500 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END,
4501 VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4502 return 0;
4503 }
4504
4505 /*
4506 * Try to end the processing, only called when a workq thread is attempting to
4507 * park (KEVENT_FLAG_PARKING is set).
4508 *
4509 * When returning -1, the kqworkq is setup again so that it is ready to be
4510 * processed.
4511 */
4512 static int
kqworkq_end_processing(struct kqworkq * kqwq,workq_threadreq_t kqr,int kevent_flags)4513 kqworkq_end_processing(struct kqworkq *kqwq, workq_threadreq_t kqr,
4514 int kevent_flags)
4515 {
4516 if (kevent_flags & KEVENT_FLAG_PARKING) {
4517 /*
4518 * if acknowledge events "succeeds" it means there are events,
4519 * which is a failure condition for end_processing.
4520 */
4521 int rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags,
4522 KQWQAE_END_PROCESSING);
4523 if (rc == 0) {
4524 return -1;
4525 }
4526 }
4527
4528 return 0;
4529 }
4530
4531 /*
4532 * Try to end the processing, only called when a workq thread is attempting to
4533 * park (KEVENT_FLAG_PARKING is set).
4534 *
4535 * When returning -1, the kqworkq is setup again so that it is ready to be
4536 * processed (as if kqworkloop_begin_processing had just been called).
4537 *
4538 * If successful and KEVENT_FLAG_PARKING was set in the kevent_flags,
4539 * the kqworkloop is unbound from its servicer as a side effect.
4540 */
4541 static int
kqworkloop_end_processing(struct kqworkloop * kqwl,int flags,int kevent_flags)4542 kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags)
4543 {
4544 struct kqueue *kq = &kqwl->kqwl_kqueue;
4545 workq_threadreq_t kqr = &kqwl->kqwl_request;
4546 int rc = 0;
4547
4548 kqlock_held(kq);
4549
4550 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START,
4551 kqwl->kqwl_dynamicid, 0, 0);
4552
4553 if (kevent_flags & KEVENT_FLAG_PARKING) {
4554 thread_t thread = kqr_thread_fast(kqr);
4555 thread_qos_t qos_override;
4556
4557 /*
4558 * When KEVENT_FLAG_PARKING is set, we need to attempt
4559 * an unbind while still under the lock.
4560 *
4561 * So we do everything kqworkloop_unbind() would do, but because
4562 * we're inside kqueue_process(), if the workloop actually
4563 * received events while our locks were dropped, we have
4564 * the opportunity to fail the end processing and loop again.
4565 *
4566 * This avoids going through the process-wide workqueue lock
4567 * hence scales better.
4568 */
4569 assert(flags & KQ_PROCESSING);
4570 qos_override = kqworkloop_acknowledge_events(kqwl);
4571 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_PARKING, qos_override);
4572
4573 if (kqwl->kqwl_wakeup_qos && !kqwl->kqwl_owner) {
4574 rc = -1;
4575 } else {
4576 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
4577 kqworkloop_release_live(kqwl);
4578 kq->kq_state &= ~flags;
4579 kqworkloop_unbind_delayed_override_drop(thread);
4580 }
4581 } else {
4582 kq->kq_state &= ~flags;
4583 kq->kq_state |= KQ_R2K_ARMED;
4584 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0);
4585 }
4586
4587 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END,
4588 kqwl->kqwl_dynamicid, 0, 0);
4589
4590 return rc;
4591 }
4592
4593 /*
4594 * Called with kqueue lock held.
4595 *
4596 * 0: no more events
4597 * -1: has more events
4598 * EBADF: kqueue is in draining mode
4599 */
4600 static int
kqfile_end_processing(struct kqfile * kq)4601 kqfile_end_processing(struct kqfile *kq)
4602 {
4603 struct knote *kn;
4604 int procwait;
4605
4606 kqlock_held(kq);
4607
4608 assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
4609
4610 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END),
4611 VM_KERNEL_UNSLIDE_OR_PERM(kq), 0);
4612
4613 /*
4614 * Return suppressed knotes to their original state.
4615 */
4616 while ((kn = TAILQ_FIRST(&kq->kqf_suppressed)) != NULL) {
4617 knote_unsuppress(kq, kn);
4618 }
4619
4620 procwait = (kq->kqf_state & KQ_PROCWAIT);
4621 kq->kqf_state &= ~(KQ_PROCESSING | KQ_PROCWAIT);
4622
4623 if (procwait) {
4624 /* first wake up any thread already waiting to process */
4625 thread_wakeup(&kq->kqf_suppressed);
4626 }
4627
4628 if (kq->kqf_state & KQ_DRAIN) {
4629 return EBADF;
4630 }
4631 return kq->kqf_count != 0 ? -1 : 0;
4632 }
4633
4634 static int
kqueue_workloop_ctl_internal(proc_t p,uintptr_t cmd,uint64_t __unused options,struct kqueue_workloop_params * params,int * retval)4635 kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options,
4636 struct kqueue_workloop_params *params, int *retval)
4637 {
4638 int error = 0;
4639 struct kqworkloop *kqwl;
4640 struct filedesc *fdp = &p->p_fd;
4641 workq_threadreq_param_t trp = { };
4642
4643 switch (cmd) {
4644 case KQ_WORKLOOP_CREATE:
4645 if (!params->kqwlp_flags) {
4646 error = EINVAL;
4647 break;
4648 }
4649
4650 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) &&
4651 (params->kqwlp_sched_pri < 1 ||
4652 params->kqwlp_sched_pri > 63 /* MAXPRI_USER */)) {
4653 error = EINVAL;
4654 break;
4655 }
4656
4657 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) &&
4658 invalid_policy(params->kqwlp_sched_pol)) {
4659 error = EINVAL;
4660 break;
4661 }
4662
4663 if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) &&
4664 (params->kqwlp_cpu_percent <= 0 ||
4665 params->kqwlp_cpu_percent > 100 ||
4666 params->kqwlp_cpu_refillms <= 0 ||
4667 params->kqwlp_cpu_refillms > 0x00ffffff)) {
4668 error = EINVAL;
4669 break;
4670 }
4671
4672 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) {
4673 trp.trp_flags |= TRP_PRIORITY;
4674 trp.trp_pri = (uint8_t)params->kqwlp_sched_pri;
4675 }
4676 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) {
4677 trp.trp_flags |= TRP_POLICY;
4678 trp.trp_pol = (uint8_t)params->kqwlp_sched_pol;
4679 }
4680 if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) {
4681 trp.trp_flags |= TRP_CPUPERCENT;
4682 trp.trp_cpupercent = (uint8_t)params->kqwlp_cpu_percent;
4683 trp.trp_refillms = params->kqwlp_cpu_refillms;
4684 }
4685
4686 error = kqworkloop_get_or_create(p, params->kqwlp_id, &trp,
4687 KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
4688 KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST, &kqwl);
4689 if (error) {
4690 break;
4691 }
4692
4693 if (!fdt_flag_test(fdp, FD_WORKLOOP)) {
4694 /* FD_WORKLOOP indicates we've ever created a workloop
4695 * via this syscall but its only ever added to a process, never
4696 * removed.
4697 */
4698 proc_fdlock(p);
4699 fdt_flag_set(fdp, FD_WORKLOOP);
4700 proc_fdunlock(p);
4701 }
4702 break;
4703 case KQ_WORKLOOP_DESTROY:
4704 error = kqworkloop_get_or_create(p, params->kqwlp_id, NULL,
4705 KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP |
4706 KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST, &kqwl);
4707 if (error) {
4708 break;
4709 }
4710 kqlock(kqwl);
4711 trp.trp_value = kqwl->kqwl_params;
4712 if (trp.trp_flags && !(trp.trp_flags & TRP_RELEASED)) {
4713 trp.trp_flags |= TRP_RELEASED;
4714 kqwl->kqwl_params = trp.trp_value;
4715 kqworkloop_release_live(kqwl);
4716 } else {
4717 error = EINVAL;
4718 }
4719 kqunlock(kqwl);
4720 kqworkloop_release(kqwl);
4721 break;
4722 }
4723 *retval = 0;
4724 return error;
4725 }
4726
4727 int
kqueue_workloop_ctl(proc_t p,struct kqueue_workloop_ctl_args * uap,int * retval)4728 kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval)
4729 {
4730 struct kqueue_workloop_params params = {
4731 .kqwlp_id = 0,
4732 };
4733 if (uap->sz < sizeof(params.kqwlp_version)) {
4734 return EINVAL;
4735 }
4736
4737 size_t copyin_sz = MIN(sizeof(params), uap->sz);
4738 int rv = copyin(uap->addr, ¶ms, copyin_sz);
4739 if (rv) {
4740 return rv;
4741 }
4742
4743 if (params.kqwlp_version != (int)uap->sz) {
4744 return EINVAL;
4745 }
4746
4747 return kqueue_workloop_ctl_internal(p, uap->cmd, uap->options, ¶ms,
4748 retval);
4749 }
4750
4751 static int
kqueue_select(struct fileproc * fp,int which,void * wql,__unused vfs_context_t ctx)4752 kqueue_select(struct fileproc *fp, int which, void *wql, __unused vfs_context_t ctx)
4753 {
4754 struct kqfile *kq = (struct kqfile *)fp_get_data(fp);
4755 int retnum = 0;
4756
4757 assert((kq->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4758
4759 if (which == FREAD) {
4760 kqlock(kq);
4761 if (kqfile_begin_processing(kq) == 0) {
4762 retnum = kq->kqf_count;
4763 kqfile_end_processing(kq);
4764 } else if ((kq->kqf_state & KQ_DRAIN) == 0) {
4765 selrecord(kq->kqf_p, &kq->kqf_sel, wql);
4766 }
4767 kqunlock(kq);
4768 }
4769 return retnum;
4770 }
4771
4772 /*
4773 * kqueue_close -
4774 */
4775 static int
kqueue_close(struct fileglob * fg,__unused vfs_context_t ctx)4776 kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx)
4777 {
4778 struct kqfile *kqf = fg_get_data(fg);
4779
4780 assert((kqf->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4781 kqlock(kqf);
4782 selthreadclear(&kqf->kqf_sel);
4783 kqunlock(kqf);
4784 kqueue_dealloc(&kqf->kqf_kqueue);
4785 fg_set_data(fg, NULL);
4786 return 0;
4787 }
4788
4789 /*
4790 * Max depth of the nested kq path that can be created.
4791 * Note that this has to be less than the size of kq_level
4792 * to avoid wrapping around and mislabeling the level. We also
4793 * want to be aggressive about this so that we don't overflow the
4794 * kernel stack while posting kevents
4795 */
4796 #define MAX_NESTED_KQ 10
4797
4798 /*
4799 * The callers has taken a use-count reference on this kqueue and will donate it
4800 * to the kqueue we are being added to. This keeps the kqueue from closing until
4801 * that relationship is torn down.
4802 */
4803 static int
kqueue_kqfilter(struct fileproc * fp,struct knote * kn,__unused struct kevent_qos_s * kev)4804 kqueue_kqfilter(struct fileproc *fp, struct knote *kn,
4805 __unused struct kevent_qos_s *kev)
4806 {
4807 struct kqfile *kqf = (struct kqfile *)fp_get_data(fp);
4808 struct kqueue *kq = &kqf->kqf_kqueue;
4809 struct kqueue *parentkq = knote_get_kq(kn);
4810
4811 assert((kqf->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4812
4813 if (parentkq == kq || kn->kn_filter != EVFILT_READ) {
4814 knote_set_error(kn, EINVAL);
4815 return 0;
4816 }
4817
4818 /*
4819 * We have to avoid creating a cycle when nesting kqueues
4820 * inside another. Rather than trying to walk the whole
4821 * potential DAG of nested kqueues, we just use a simple
4822 * ceiling protocol. When a kqueue is inserted into another,
4823 * we check that the (future) parent is not already nested
4824 * into another kqueue at a lower level than the potenial
4825 * child (because it could indicate a cycle). If that test
4826 * passes, we just mark the nesting levels accordingly.
4827 *
4828 * Only up to MAX_NESTED_KQ can be nested.
4829 *
4830 * Note: kqworkq and kqworkloop cannot be nested and have reused their
4831 * kq_level field, so ignore these as parent.
4832 */
4833
4834 kqlock(parentkq);
4835
4836 if ((parentkq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) {
4837 if (parentkq->kq_level > 0 &&
4838 parentkq->kq_level < kq->kq_level) {
4839 kqunlock(parentkq);
4840 knote_set_error(kn, EINVAL);
4841 return 0;
4842 }
4843
4844 /* set parent level appropriately */
4845 uint16_t plevel = (parentkq->kq_level == 0)? 2: parentkq->kq_level;
4846 if (plevel < kq->kq_level + 1) {
4847 if (kq->kq_level + 1 > MAX_NESTED_KQ) {
4848 kqunlock(parentkq);
4849 knote_set_error(kn, EINVAL);
4850 return 0;
4851 }
4852 plevel = kq->kq_level + 1;
4853 }
4854
4855 parentkq->kq_level = plevel;
4856 }
4857
4858 kqunlock(parentkq);
4859
4860 kn->kn_filtid = EVFILTID_KQREAD;
4861 kqlock(kq);
4862 KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn);
4863 /* indicate nesting in child, if needed */
4864 if (kq->kq_level == 0) {
4865 kq->kq_level = 1;
4866 }
4867
4868 int count = kq->kq_count;
4869 kqunlock(kq);
4870 return count > 0;
4871 }
4872
4873 __attribute__((noinline))
4874 static void
kqfile_wakeup(struct kqfile * kqf,long hint,wait_result_t wr)4875 kqfile_wakeup(struct kqfile *kqf, long hint, wait_result_t wr)
4876 {
4877 /* wakeup a thread waiting on this queue */
4878 selwakeup(&kqf->kqf_sel);
4879
4880 /* wake up threads in kqueue_scan() */
4881 if (kqf->kqf_state & KQ_SLEEP) {
4882 kqf->kqf_state &= ~KQ_SLEEP;
4883 thread_wakeup_with_result(&kqf->kqf_count, wr);
4884 }
4885
4886 if (hint == NOTE_REVOKE) {
4887 /* wakeup threads waiting their turn to process */
4888 if (kqf->kqf_state & KQ_PROCWAIT) {
4889 assert(kqf->kqf_state & KQ_PROCESSING);
4890 kqf->kqf_state &= ~KQ_PROCWAIT;
4891 thread_wakeup(&kqf->kqf_suppressed);
4892 }
4893
4894 /* no need to KNOTE: knote_fdclose() takes care of it */
4895 } else {
4896 /* wakeup other kqueues/select sets we're inside */
4897 KNOTE(&kqf->kqf_sel.si_note, hint);
4898 }
4899 }
4900
4901 /*
4902 * kqueue_drain - called when kq is closed
4903 */
4904 static int
kqueue_drain(struct fileproc * fp,__unused vfs_context_t ctx)4905 kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx)
4906 {
4907 struct kqfile *kqf = (struct kqfile *)fp_get_data(fp);
4908
4909 assert((kqf->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4910
4911 kqlock(kqf);
4912 kqf->kqf_state |= KQ_DRAIN;
4913 kqfile_wakeup(kqf, NOTE_REVOKE, THREAD_RESTART);
4914 kqunlock(kqf);
4915 return 0;
4916 }
4917
4918 int
kqueue_stat(struct kqueue * kq,void * ub,int isstat64,proc_t p)4919 kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
4920 {
4921 assert((kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0);
4922
4923 kqlock(kq);
4924 if (isstat64 != 0) {
4925 struct stat64 *sb64 = (struct stat64 *)ub;
4926
4927 bzero((void *)sb64, sizeof(*sb64));
4928 sb64->st_size = kq->kq_count;
4929 if (kq->kq_state & KQ_KEV_QOS) {
4930 sb64->st_blksize = sizeof(struct kevent_qos_s);
4931 } else if (kq->kq_state & KQ_KEV64) {
4932 sb64->st_blksize = sizeof(struct kevent64_s);
4933 } else if (IS_64BIT_PROCESS(p)) {
4934 sb64->st_blksize = sizeof(struct user64_kevent);
4935 } else {
4936 sb64->st_blksize = sizeof(struct user32_kevent);
4937 }
4938 sb64->st_mode = S_IFIFO;
4939 } else {
4940 struct stat *sb = (struct stat *)ub;
4941
4942 bzero((void *)sb, sizeof(*sb));
4943 sb->st_size = kq->kq_count;
4944 if (kq->kq_state & KQ_KEV_QOS) {
4945 sb->st_blksize = sizeof(struct kevent_qos_s);
4946 } else if (kq->kq_state & KQ_KEV64) {
4947 sb->st_blksize = sizeof(struct kevent64_s);
4948 } else if (IS_64BIT_PROCESS(p)) {
4949 sb->st_blksize = sizeof(struct user64_kevent);
4950 } else {
4951 sb->st_blksize = sizeof(struct user32_kevent);
4952 }
4953 sb->st_mode = S_IFIFO;
4954 }
4955 kqunlock(kq);
4956 return 0;
4957 }
4958
4959 static inline bool
kqueue_threadreq_can_use_ast(struct kqueue * kq)4960 kqueue_threadreq_can_use_ast(struct kqueue *kq)
4961 {
4962 if (current_proc() == kq->kq_p) {
4963 /*
4964 * Setting an AST from a non BSD syscall is unsafe: mach_msg_trap() can
4965 * do combined send/receive and in the case of self-IPC, the AST may bet
4966 * set on a thread that will not return to userspace and needs the
4967 * thread the AST would create to unblock itself.
4968 *
4969 * At this time, we really want to target:
4970 *
4971 * - kevent variants that can cause thread creations, and dispatch
4972 * really only uses kevent_qos and kevent_id,
4973 *
4974 * - workq_kernreturn (directly about thread creations)
4975 *
4976 * - bsdthread_ctl which is used for qos changes and has direct impact
4977 * on the creator thread scheduling decisions.
4978 */
4979 switch (current_uthread()->syscall_code) {
4980 case SYS_kevent_qos:
4981 case SYS_kevent_id:
4982 case SYS_workq_kernreturn:
4983 case SYS_bsdthread_ctl:
4984 return true;
4985 }
4986 }
4987 return false;
4988 }
4989
4990 /*
4991 * Interact with the pthread kext to request a servicing there at a specific QoS
4992 * level.
4993 *
4994 * - Caller holds the kqlock
4995 *
4996 * - May be called with the kqueue's wait queue set locked,
4997 * so cannot do anything that could recurse on that.
4998 */
4999 static void
kqueue_threadreq_initiate(kqueue_t kqu,workq_threadreq_t kqr,kq_index_t qos,int flags)5000 kqueue_threadreq_initiate(kqueue_t kqu, workq_threadreq_t kqr,
5001 kq_index_t qos, int flags)
5002 {
5003 assert(kqr_thread(kqr) == THREAD_NULL);
5004 assert(!kqr_thread_requested(kqr));
5005 struct turnstile *ts = TURNSTILE_NULL;
5006
5007 if (workq_is_exiting(kqu.kq->kq_p)) {
5008 return;
5009 }
5010
5011 kqlock_held(kqu);
5012
5013 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5014 struct kqworkloop *kqwl = kqu.kqwl;
5015
5016 assert(kqwl->kqwl_owner == THREAD_NULL);
5017 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST),
5018 kqwl->kqwl_dynamicid, 0, qos, kqwl->kqwl_wakeup_qos);
5019 ts = kqwl->kqwl_turnstile;
5020 /* Add a thread request reference on the kqueue. */
5021 kqworkloop_retain(kqwl);
5022
5023 #if CONFIG_PREADOPT_TG
5024 /* This thread is the one which is ack-ing the thread group on the kqwl
5025 * under the kqlock and will take action accordingly, pairs with the
5026 * release barrier in kqueue_set_preadopted_thread_group */
5027 uint16_t tg_acknowledged;
5028 if (os_atomic_cmpxchgv(&kqwl->kqwl_preadopt_tg_needs_redrive,
5029 KQWL_PREADOPT_TG_NEEDS_REDRIVE, KQWL_PREADOPT_TG_CLEAR_REDRIVE,
5030 &tg_acknowledged, acquire)) {
5031 flags |= WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG;
5032 }
5033 #endif
5034 } else {
5035 assert(kqu.kq->kq_state & KQ_WORKQ);
5036 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST), -1, 0, qos,
5037 !TAILQ_EMPTY(&kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1]));
5038 }
5039
5040 /*
5041 * New-style thread request supported.
5042 * Provide the pthread kext a pointer to a workq_threadreq_s structure for
5043 * its use until a corresponding kqueue_threadreq_bind callback.
5044 */
5045 if (kqueue_threadreq_can_use_ast(kqu.kq)) {
5046 flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
5047 }
5048 if (qos == KQWQ_QOS_MANAGER) {
5049 qos = WORKQ_THREAD_QOS_MANAGER;
5050 }
5051
5052 if (!workq_kern_threadreq_initiate(kqu.kq->kq_p, kqr, ts, qos, flags)) {
5053 /*
5054 * Process is shutting down or exec'ing.
5055 * All the kqueues are going to be cleaned up
5056 * soon. Forget we even asked for a thread -
5057 * and make sure we don't ask for more.
5058 */
5059 kqu.kq->kq_state &= ~KQ_R2K_ARMED;
5060 kqueue_release_live(kqu);
5061 }
5062 }
5063
5064 /*
5065 * kqueue_threadreq_bind_prepost - prepost the bind to kevent
5066 *
5067 * This is used when kqueue_threadreq_bind may cause a lock inversion.
5068 */
5069 __attribute__((always_inline))
5070 void
kqueue_threadreq_bind_prepost(struct proc * p __unused,workq_threadreq_t kqr,struct uthread * ut)5071 kqueue_threadreq_bind_prepost(struct proc *p __unused, workq_threadreq_t kqr,
5072 struct uthread *ut)
5073 {
5074 ut->uu_kqr_bound = kqr;
5075 kqr->tr_thread = get_machthread(ut);
5076 kqr->tr_state = WORKQ_TR_STATE_BINDING;
5077 }
5078
5079 /*
5080 * kqueue_threadreq_bind_commit - commit a bind prepost
5081 *
5082 * The workq code has to commit any binding prepost before the thread has
5083 * a chance to come back to userspace (and do kevent syscalls) or be aborted.
5084 */
5085 void
kqueue_threadreq_bind_commit(struct proc * p,thread_t thread)5086 kqueue_threadreq_bind_commit(struct proc *p, thread_t thread)
5087 {
5088 struct uthread *ut = get_bsdthread_info(thread);
5089 workq_threadreq_t kqr = ut->uu_kqr_bound;
5090 kqueue_t kqu = kqr_kqueue(p, kqr);
5091
5092 kqlock(kqu);
5093 if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
5094 kqueue_threadreq_bind(p, kqr, thread, 0);
5095 }
5096 kqunlock(kqu);
5097 }
5098
5099 static void
kqueue_threadreq_modify(kqueue_t kqu,workq_threadreq_t kqr,kq_index_t qos,workq_kern_threadreq_flags_t flags)5100 kqueue_threadreq_modify(kqueue_t kqu, workq_threadreq_t kqr, kq_index_t qos,
5101 workq_kern_threadreq_flags_t flags)
5102 {
5103 assert(kqr_thread_requested_pending(kqr));
5104
5105 kqlock_held(kqu);
5106
5107 if (kqueue_threadreq_can_use_ast(kqu.kq)) {
5108 flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE;
5109 }
5110
5111 #if CONFIG_PREADOPT_TG
5112 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5113 uint16_t tg_ack_status;
5114 struct kqworkloop *kqwl = kqu.kqwl;
5115
5116 /* This thread is the one which is ack-ing the thread group on the kqwl
5117 * under the kqlock and will take action accordingly, needs acquire
5118 * barrier */
5119 if (os_atomic_cmpxchgv(&kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_NEEDS_REDRIVE,
5120 KQWL_PREADOPT_TG_CLEAR_REDRIVE, &tg_ack_status, acquire)) {
5121 flags |= WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG;
5122 }
5123 }
5124 #endif
5125
5126 workq_kern_threadreq_modify(kqu.kq->kq_p, kqr, qos, flags);
5127 }
5128
5129 /*
5130 * kqueue_threadreq_bind - bind thread to processing kqrequest
5131 *
5132 * The provided thread will be responsible for delivering events
5133 * associated with the given kqrequest. Bind it and get ready for
5134 * the thread to eventually arrive.
5135 */
5136 void
kqueue_threadreq_bind(struct proc * p,workq_threadreq_t kqr,thread_t thread,unsigned int flags)5137 kqueue_threadreq_bind(struct proc *p, workq_threadreq_t kqr, thread_t thread,
5138 unsigned int flags)
5139 {
5140 kqueue_t kqu = kqr_kqueue(p, kqr);
5141 struct uthread *ut = get_bsdthread_info(thread);
5142
5143 kqlock_held(kqu);
5144
5145 assert(ut->uu_kqueue_override == 0);
5146
5147 if (kqr->tr_state == WORKQ_TR_STATE_BINDING) {
5148 assert(ut->uu_kqr_bound == kqr);
5149 assert(kqr->tr_thread == thread);
5150 } else {
5151 assert(kqr_thread_requested_pending(kqr));
5152 assert(kqr->tr_thread == THREAD_NULL);
5153 assert(ut->uu_kqr_bound == NULL);
5154 ut->uu_kqr_bound = kqr;
5155 kqr->tr_thread = thread;
5156 }
5157
5158 kqr->tr_state = WORKQ_TR_STATE_BOUND;
5159
5160 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5161 struct turnstile *ts = kqu.kqwl->kqwl_turnstile;
5162
5163 if (__improbable(thread == kqu.kqwl->kqwl_owner)) {
5164 /*
5165 * <rdar://problem/38626999> shows that asserting here is not ok.
5166 *
5167 * This is not supposed to happen for correct use of the interface,
5168 * but it is sadly possible for userspace (with the help of memory
5169 * corruption, such as over-release of a dispatch queue) to make
5170 * the creator thread the "owner" of a workloop.
5171 *
5172 * Once that happens, and that creator thread picks up the same
5173 * workloop as a servicer, we trip this codepath. We need to fixup
5174 * the state to forget about this thread being the owner, as the
5175 * entire workloop state machine expects servicers to never be
5176 * owners and everything would basically go downhill from here.
5177 */
5178 kqu.kqwl->kqwl_owner = THREAD_NULL;
5179 if (kqworkloop_override(kqu.kqwl)) {
5180 thread_drop_kevent_override(thread);
5181 }
5182 }
5183
5184 if (ts && (flags & KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE) == 0) {
5185 /*
5186 * Past this point, the interlock is the kq req lock again,
5187 * so we can fix the inheritor for good.
5188 */
5189 filt_wlupdate_inheritor(kqu.kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
5190 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
5191 }
5192
5193 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_BIND), kqu.kqwl->kqwl_dynamicid,
5194 thread_tid(thread), kqr->tr_kq_qos_index,
5195 (kqr->tr_kq_override_index << 16) | kqwl->kqwl_wakeup_qos);
5196
5197 ut->uu_kqueue_override = kqr->tr_kq_override_index;
5198 if (kqr->tr_kq_override_index) {
5199 thread_add_servicer_override(thread, kqr->tr_kq_override_index);
5200 }
5201
5202 #if CONFIG_PREADOPT_TG
5203 /* Remove reference from kqwl and mark it as bound with the SENTINEL */
5204 thread_group_qos_t old_tg;
5205 thread_group_qos_t new_tg;
5206 int ret = os_atomic_rmw_loop(kqr_preadopt_thread_group_addr(kqr), old_tg, new_tg, relaxed, {
5207 if (old_tg == KQWL_PREADOPTED_TG_NEVER) {
5208 os_atomic_rmw_loop_give_up(break); // It's an app, nothing to do
5209 }
5210 assert(old_tg != KQWL_PREADOPTED_TG_PROCESSED);
5211 new_tg = KQWL_PREADOPTED_TG_SENTINEL;
5212 });
5213
5214 if (ret) {
5215 KQWL_PREADOPT_TG_HISTORY_WRITE_ENTRY(kqu.kqwl, KQWL_PREADOPT_OP_SERVICER_BIND, old_tg, new_tg);
5216
5217 if (KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) {
5218 struct thread_group *tg = KQWL_GET_PREADOPTED_TG(old_tg);
5219 assert(tg != NULL);
5220
5221 thread_set_preadopt_thread_group(thread, tg);
5222 thread_group_release_live(tg); // The thread has a reference
5223 } else {
5224 /*
5225 * The thread may already have a preadopt thread group on it -
5226 * we need to make sure to clear that.
5227 */
5228 thread_set_preadopt_thread_group(thread, NULL);
5229 }
5230
5231 /* We have taken action on the preadopted thread group set on the
5232 * set on the kqwl, clear any redrive requests */
5233 os_atomic_store(&kqu.kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_CLEAR_REDRIVE, relaxed);
5234 }
5235 #endif
5236 kqueue_update_iotier_override(kqu);
5237 } else {
5238 assert(kqr->tr_kq_override_index == 0);
5239
5240 #if CONFIG_PREADOPT_TG
5241 /*
5242 * The thread may have a preadopt thread group on it already because it
5243 * got tagged with it as a creator thread. So we need to make sure to
5244 * clear that since we don't have preadopt thread groups for non-kqwl
5245 * cases
5246 */
5247 thread_set_preadopt_thread_group(thread, NULL);
5248 #endif
5249 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), -1,
5250 thread_tid(thread), kqr->tr_kq_qos_index,
5251 (kqr->tr_kq_override_index << 16) |
5252 !TAILQ_EMPTY(&kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1]));
5253 }
5254 }
5255
5256 /*
5257 * kqueue_threadreq_cancel - abort a pending thread request
5258 *
5259 * Called when exiting/exec'ing. Forget our pending request.
5260 */
5261 void
kqueue_threadreq_cancel(struct proc * p,workq_threadreq_t kqr)5262 kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t kqr)
5263 {
5264 kqueue_release(kqr_kqueue(p, kqr));
5265 }
5266
5267 workq_threadreq_param_t
kqueue_threadreq_workloop_param(workq_threadreq_t kqr)5268 kqueue_threadreq_workloop_param(workq_threadreq_t kqr)
5269 {
5270 struct kqworkloop *kqwl;
5271 workq_threadreq_param_t trp;
5272
5273 assert(kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
5274 kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
5275 trp.trp_value = kqwl->kqwl_params;
5276 return trp;
5277 }
5278
5279 /*
5280 * kqueue_threadreq_unbind - unbind thread from processing kqueue
5281 *
5282 * End processing the per-QoS bucket of events and allow other threads
5283 * to be requested for future servicing.
5284 *
5285 * caller holds a reference on the kqueue.
5286 */
5287 void
kqueue_threadreq_unbind(struct proc * p,workq_threadreq_t kqr)5288 kqueue_threadreq_unbind(struct proc *p, workq_threadreq_t kqr)
5289 {
5290 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
5291 kqworkloop_unbind(kqr_kqworkloop(kqr));
5292 } else {
5293 kqworkq_unbind(p, kqr);
5294 }
5295 }
5296
5297 /*
5298 * If we aren't already busy processing events [for this QoS],
5299 * request workq thread support as appropriate.
5300 *
5301 * TBD - for now, we don't segregate out processing by QoS.
5302 *
5303 * - May be called with the kqueue's wait queue set locked,
5304 * so cannot do anything that could recurse on that.
5305 */
5306 static void
kqworkq_wakeup(struct kqworkq * kqwq,kq_index_t qos_index)5307 kqworkq_wakeup(struct kqworkq *kqwq, kq_index_t qos_index)
5308 {
5309 workq_threadreq_t kqr = kqworkq_get_request(kqwq, qos_index);
5310
5311 /* convert to thread qos value */
5312 assert(qos_index > 0 && qos_index <= KQWQ_NBUCKETS);
5313
5314 if (!kqr_thread_requested(kqr)) {
5315 kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr, qos_index, 0);
5316 }
5317 }
5318
5319 /*
5320 * This represent the asynchronous QoS a given workloop contributes,
5321 * hence is the max of the current active knotes (override index)
5322 * and the workloop max qos (userspace async qos).
5323 */
5324 static kq_index_t
kqworkloop_override(struct kqworkloop * kqwl)5325 kqworkloop_override(struct kqworkloop *kqwl)
5326 {
5327 workq_threadreq_t kqr = &kqwl->kqwl_request;
5328 return MAX(kqr->tr_kq_qos_index, kqr->tr_kq_override_index);
5329 }
5330
5331 static inline void
kqworkloop_request_fire_r2k_notification(struct kqworkloop * kqwl)5332 kqworkloop_request_fire_r2k_notification(struct kqworkloop *kqwl)
5333 {
5334 workq_threadreq_t kqr = &kqwl->kqwl_request;
5335
5336 kqlock_held(kqwl);
5337
5338 if (kqwl->kqwl_state & KQ_R2K_ARMED) {
5339 kqwl->kqwl_state &= ~KQ_R2K_ARMED;
5340 act_set_astkevent(kqr_thread_fast(kqr), AST_KEVENT_RETURN_TO_KERNEL);
5341 }
5342 }
5343
5344 static void
kqworkloop_update_threads_qos(struct kqworkloop * kqwl,int op,kq_index_t qos)5345 kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos)
5346 {
5347 workq_threadreq_t kqr = &kqwl->kqwl_request;
5348 struct kqueue *kq = &kqwl->kqwl_kqueue;
5349 kq_index_t old_override = kqworkloop_override(kqwl);
5350
5351 kqlock_held(kqwl);
5352
5353 switch (op) {
5354 case KQWL_UTQ_UPDATE_WAKEUP_QOS:
5355 kqwl->kqwl_wakeup_qos = qos;
5356 kqworkloop_request_fire_r2k_notification(kqwl);
5357 goto recompute;
5358
5359 case KQWL_UTQ_RESET_WAKEUP_OVERRIDE:
5360 kqr->tr_kq_override_index = qos;
5361 goto recompute;
5362
5363 case KQWL_UTQ_PARKING:
5364 case KQWL_UTQ_UNBINDING:
5365 kqr->tr_kq_override_index = qos;
5366 OS_FALLTHROUGH;
5367
5368 case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS:
5369 if (op == KQWL_UTQ_RECOMPUTE_WAKEUP_QOS) {
5370 assert(qos == THREAD_QOS_UNSPECIFIED);
5371 }
5372 if (TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
5373 kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
5374 }
5375 kqwl->kqwl_wakeup_qos = 0;
5376 for (kq_index_t i = KQWL_NBUCKETS; i > 0; i--) {
5377 if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i - 1])) {
5378 kqwl->kqwl_wakeup_qos = i;
5379 kqworkloop_request_fire_r2k_notification(kqwl);
5380 break;
5381 }
5382 }
5383 OS_FALLTHROUGH;
5384
5385 case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE:
5386 recompute:
5387 /*
5388 * When modifying the wakeup QoS or the override QoS, we always need to
5389 * maintain our invariant that kqr_override_index is at least as large
5390 * as the highest QoS for which an event is fired.
5391 *
5392 * However this override index can be larger when there is an overriden
5393 * suppressed knote pushing on the kqueue.
5394 */
5395 if (qos < kqwl->kqwl_wakeup_qos) {
5396 qos = kqwl->kqwl_wakeup_qos;
5397 }
5398 if (kqr->tr_kq_override_index < qos) {
5399 kqr->tr_kq_override_index = qos;
5400 }
5401 break;
5402
5403 case KQWL_UTQ_REDRIVE_EVENTS:
5404 break;
5405
5406 case KQWL_UTQ_SET_QOS_INDEX:
5407 kqr->tr_kq_qos_index = qos;
5408 break;
5409
5410 default:
5411 panic("unknown kqwl thread qos update operation: %d", op);
5412 }
5413
5414 thread_t kqwl_owner = kqwl->kqwl_owner;
5415 thread_t servicer = kqr_thread(kqr);
5416 boolean_t qos_changed = FALSE;
5417 kq_index_t new_override = kqworkloop_override(kqwl);
5418
5419 /*
5420 * Apply the diffs to the owner if applicable
5421 */
5422 if (kqwl_owner) {
5423 #if 0
5424 /* JMM - need new trace hooks for owner overrides */
5425 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST),
5426 kqwl->kqwl_dynamicid, thread_tid(kqwl_owner), kqr->tr_kq_qos_index,
5427 (kqr->tr_kq_override_index << 16) | kqwl->kqwl_wakeup_qos);
5428 #endif
5429 if (new_override == old_override) {
5430 // nothing to do
5431 } else if (old_override == THREAD_QOS_UNSPECIFIED) {
5432 thread_add_kevent_override(kqwl_owner, new_override);
5433 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5434 thread_drop_kevent_override(kqwl_owner);
5435 } else { /* old_override != new_override */
5436 thread_update_kevent_override(kqwl_owner, new_override);
5437 }
5438 }
5439
5440 /*
5441 * apply the diffs to the servicer
5442 */
5443
5444 if (!kqr_thread_requested(kqr)) {
5445 /*
5446 * No servicer, nor thread-request
5447 *
5448 * Make a new thread request, unless there is an owner (or the workloop
5449 * is suspended in userland) or if there is no asynchronous work in the
5450 * first place.
5451 */
5452
5453 if (kqwl_owner == NULL && kqwl->kqwl_wakeup_qos) {
5454 int initiate_flags = 0;
5455 if (op == KQWL_UTQ_UNBINDING) {
5456 initiate_flags = WORKQ_THREADREQ_ATTEMPT_REBIND;
5457 }
5458
5459 /* kqueue_threadreq_initiate handles the acknowledgement of the TG
5460 * if needed */
5461 kqueue_threadreq_initiate(kq, kqr, new_override, initiate_flags);
5462 }
5463 } else if (servicer) {
5464 /*
5465 * Servicer in flight
5466 *
5467 * Just apply the diff to the servicer
5468 */
5469
5470 #if CONFIG_PREADOPT_TG
5471 /* When there's a servicer for the kqwl already, then the servicer will
5472 * adopt the thread group in the kqr, we don't need to poke the
5473 * workqueue subsystem to make different decisions due to the thread
5474 * group. Consider the current request ack-ed.
5475 */
5476 os_atomic_store(&kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_CLEAR_REDRIVE, relaxed);
5477 #endif
5478
5479 struct uthread *ut = get_bsdthread_info(servicer);
5480 if (ut->uu_kqueue_override != new_override) {
5481 if (ut->uu_kqueue_override == THREAD_QOS_UNSPECIFIED) {
5482 thread_add_servicer_override(servicer, new_override);
5483 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5484 thread_drop_servicer_override(servicer);
5485 } else { /* ut->uu_kqueue_override != new_override */
5486 thread_update_servicer_override(servicer, new_override);
5487 }
5488 ut->uu_kqueue_override = new_override;
5489 qos_changed = TRUE;
5490 }
5491 } else if (new_override == THREAD_QOS_UNSPECIFIED) {
5492 /*
5493 * No events to deliver anymore.
5494 *
5495 * However canceling with turnstiles is challenging, so the fact that
5496 * the request isn't useful will be discovered by the servicer himself
5497 * later on.
5498 */
5499 } else if (old_override != new_override) {
5500 /*
5501 * Request is in flight
5502 *
5503 * Apply the diff to the thread request.
5504 */
5505 kqueue_threadreq_modify(kq, kqr, new_override, WORKQ_THREADREQ_NONE);
5506 qos_changed = TRUE;
5507 }
5508
5509 if (qos_changed) {
5510 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), kqwl->kqwl_dynamicid,
5511 thread_tid(servicer), kqr->tr_kq_qos_index,
5512 (kqr->tr_kq_override_index << 16) | kqwl->kqwl_wakeup_qos);
5513 }
5514 }
5515
5516 static void
kqworkloop_update_iotier_override(struct kqworkloop * kqwl)5517 kqworkloop_update_iotier_override(struct kqworkloop *kqwl)
5518 {
5519 workq_threadreq_t kqr = &kqwl->kqwl_request;
5520 thread_t servicer = kqr_thread(kqr);
5521 uint8_t iotier = os_atomic_load(&kqwl->kqwl_iotier_override, relaxed);
5522
5523 kqlock_held(kqwl);
5524
5525 if (servicer) {
5526 thread_update_servicer_iotier_override(servicer, iotier);
5527 }
5528 }
5529
5530 static void
kqworkloop_wakeup(struct kqworkloop * kqwl,kq_index_t qos)5531 kqworkloop_wakeup(struct kqworkloop *kqwl, kq_index_t qos)
5532 {
5533 if (qos <= kqwl->kqwl_wakeup_qos) {
5534 /*
5535 * Shortcut wakeups that really do nothing useful
5536 */
5537 return;
5538 }
5539
5540 if ((kqwl->kqwl_state & KQ_PROCESSING) &&
5541 kqr_thread(&kqwl->kqwl_request) == current_thread()) {
5542 /*
5543 * kqworkloop_end_processing() will perform the required QoS
5544 * computations when it unsets the processing mode.
5545 */
5546 return;
5547 }
5548
5549 kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, qos);
5550 }
5551
5552 static struct kqtailq *
kqueue_get_suppressed_queue(kqueue_t kq,struct knote * kn)5553 kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn)
5554 {
5555 if (kq.kq->kq_state & KQ_WORKLOOP) {
5556 return &kq.kqwl->kqwl_suppressed;
5557 } else if (kq.kq->kq_state & KQ_WORKQ) {
5558 return &kq.kqwq->kqwq_suppressed[kn->kn_qos_index - 1];
5559 } else {
5560 return &kq.kqf->kqf_suppressed;
5561 }
5562 }
5563
5564 struct turnstile *
kqueue_alloc_turnstile(kqueue_t kqu)5565 kqueue_alloc_turnstile(kqueue_t kqu)
5566 {
5567 struct kqworkloop *kqwl = kqu.kqwl;
5568 kq_state_t kq_state;
5569
5570 kq_state = os_atomic_load(&kqu.kq->kq_state, dependency);
5571 if (kq_state & KQ_HAS_TURNSTILE) {
5572 /* force a dependency to pair with the atomic or with release below */
5573 return os_atomic_load_with_dependency_on(&kqwl->kqwl_turnstile,
5574 (uintptr_t)kq_state);
5575 }
5576
5577 if (!(kq_state & KQ_WORKLOOP)) {
5578 return TURNSTILE_NULL;
5579 }
5580
5581 struct turnstile *ts = turnstile_alloc(), *free_ts = TURNSTILE_NULL;
5582 bool workq_locked = false;
5583
5584 kqlock(kqu);
5585
5586 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
5587 workq_locked = true;
5588 workq_kern_threadreq_lock(kqwl->kqwl_p);
5589 }
5590
5591 if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) {
5592 free_ts = ts;
5593 ts = kqwl->kqwl_turnstile;
5594 } else {
5595 ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile,
5596 ts, TURNSTILE_WORKLOOPS);
5597
5598 /* release-barrier to pair with the unlocked load of kqwl_turnstile above */
5599 os_atomic_or(&kqwl->kqwl_state, KQ_HAS_TURNSTILE, release);
5600
5601 if (filt_wlturnstile_interlock_is_workq(kqwl)) {
5602 workq_kern_threadreq_update_inheritor(kqwl->kqwl_p,
5603 &kqwl->kqwl_request, kqwl->kqwl_owner,
5604 ts, TURNSTILE_IMMEDIATE_UPDATE);
5605 /*
5606 * The workq may no longer be the interlock after this.
5607 * In which case the inheritor wasn't updated.
5608 */
5609 }
5610 if (!filt_wlturnstile_interlock_is_workq(kqwl)) {
5611 filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE);
5612 }
5613 }
5614
5615 if (workq_locked) {
5616 workq_kern_threadreq_unlock(kqwl->kqwl_p);
5617 }
5618
5619 kqunlock(kqu);
5620
5621 if (free_ts) {
5622 turnstile_deallocate(free_ts);
5623 } else {
5624 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
5625 }
5626 return ts;
5627 }
5628
5629 __attribute__((always_inline))
5630 struct turnstile *
kqueue_turnstile(kqueue_t kqu)5631 kqueue_turnstile(kqueue_t kqu)
5632 {
5633 kq_state_t kq_state = os_atomic_load(&kqu.kq->kq_state, relaxed);
5634 if (kq_state & KQ_WORKLOOP) {
5635 return os_atomic_load(&kqu.kqwl->kqwl_turnstile, relaxed);
5636 }
5637 return TURNSTILE_NULL;
5638 }
5639
5640 __attribute__((always_inline))
5641 struct turnstile *
kqueue_threadreq_get_turnstile(workq_threadreq_t kqr)5642 kqueue_threadreq_get_turnstile(workq_threadreq_t kqr)
5643 {
5644 struct kqworkloop *kqwl = kqr_kqworkloop(kqr);
5645 if (kqwl) {
5646 return os_atomic_load(&kqwl->kqwl_turnstile, relaxed);
5647 }
5648 return TURNSTILE_NULL;
5649 }
5650
5651 static void
kqworkloop_set_overcommit(struct kqworkloop * kqwl)5652 kqworkloop_set_overcommit(struct kqworkloop *kqwl)
5653 {
5654 workq_threadreq_t kqr = &kqwl->kqwl_request;
5655
5656 /*
5657 * This test is racy, but since we never remove this bit,
5658 * it allows us to avoid taking a lock.
5659 */
5660 if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
5661 return;
5662 }
5663
5664 kqlock_held(kqwl);
5665
5666 if (kqr_thread_requested_pending(kqr)) {
5667 kqueue_threadreq_modify(kqwl, kqr, kqr->tr_qos,
5668 WORKQ_THREADREQ_MAKE_OVERCOMMIT);
5669 } else {
5670 kqr->tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
5671 }
5672 }
5673
5674 static void
kqworkq_update_override(struct kqworkq * kqwq,struct knote * kn,kq_index_t override_index)5675 kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn,
5676 kq_index_t override_index)
5677 {
5678 workq_threadreq_t kqr;
5679 kq_index_t old_override_index;
5680 kq_index_t queue_index = kn->kn_qos_index;
5681
5682 if (override_index <= queue_index) {
5683 return;
5684 }
5685
5686 kqr = kqworkq_get_request(kqwq, queue_index);
5687
5688 kqlock_held(kqwq);
5689
5690 old_override_index = kqr->tr_kq_override_index;
5691 if (override_index > MAX(kqr->tr_kq_qos_index, old_override_index)) {
5692 thread_t servicer = kqr_thread(kqr);
5693 kqr->tr_kq_override_index = override_index;
5694
5695 /* apply the override to [incoming?] servicing thread */
5696 if (servicer) {
5697 if (old_override_index) {
5698 thread_update_kevent_override(servicer, override_index);
5699 } else {
5700 thread_add_kevent_override(servicer, override_index);
5701 }
5702 }
5703 }
5704 }
5705
5706 static void
kqueue_update_iotier_override(kqueue_t kqu)5707 kqueue_update_iotier_override(kqueue_t kqu)
5708 {
5709 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5710 kqworkloop_update_iotier_override(kqu.kqwl);
5711 }
5712 }
5713
5714 static void
kqueue_update_override(kqueue_t kqu,struct knote * kn,thread_qos_t qos)5715 kqueue_update_override(kqueue_t kqu, struct knote *kn, thread_qos_t qos)
5716 {
5717 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5718 kqworkloop_update_threads_qos(kqu.kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE,
5719 qos);
5720 } else {
5721 kqworkq_update_override(kqu.kqwq, kn, qos);
5722 }
5723 }
5724
5725 static void
kqworkloop_unbind_locked(struct kqworkloop * kqwl,thread_t thread,enum kqwl_unbind_locked_mode how)5726 kqworkloop_unbind_locked(struct kqworkloop *kqwl, thread_t thread,
5727 enum kqwl_unbind_locked_mode how)
5728 {
5729 struct uthread *ut = get_bsdthread_info(thread);
5730 workq_threadreq_t kqr = &kqwl->kqwl_request;
5731
5732 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND), kqwl->kqwl_dynamicid,
5733 thread_tid(thread), 0, 0);
5734
5735 kqlock_held(kqwl);
5736
5737 assert(ut->uu_kqr_bound == kqr);
5738 ut->uu_kqr_bound = NULL;
5739 if (how == KQWL_OVERRIDE_DROP_IMMEDIATELY &&
5740 ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
5741 thread_drop_servicer_override(thread);
5742 ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
5743 }
5744
5745 if (kqwl->kqwl_owner == NULL && kqwl->kqwl_turnstile) {
5746 turnstile_update_inheritor(kqwl->kqwl_turnstile,
5747 TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
5748 turnstile_update_inheritor_complete(kqwl->kqwl_turnstile,
5749 TURNSTILE_INTERLOCK_HELD);
5750 }
5751
5752 #if CONFIG_PREADOPT_TG
5753 /* The kqueue is able to adopt a thread group again */
5754
5755 thread_group_qos_t old_tg, new_tg = NULL;
5756 int ret = os_atomic_rmw_loop(kqr_preadopt_thread_group_addr(kqr), old_tg, new_tg, relaxed, {
5757 new_tg = old_tg;
5758 if (old_tg == KQWL_PREADOPTED_TG_SENTINEL || old_tg == KQWL_PREADOPTED_TG_PROCESSED) {
5759 new_tg = KQWL_PREADOPTED_TG_NULL;
5760 }
5761 });
5762 KQWL_PREADOPT_TG_HISTORY_WRITE_ENTRY(kqwl, KQWL_PREADOPT_OP_SERVICER_UNBIND, old_tg, KQWL_PREADOPTED_TG_NULL);
5763
5764 if (ret) {
5765 // Servicer can drop any preadopt thread group it has since it has
5766 // unbound.
5767 thread_set_preadopt_thread_group(thread, NULL);
5768 }
5769 #endif
5770 thread_update_servicer_iotier_override(thread, THROTTLE_LEVEL_END);
5771
5772 kqr->tr_thread = THREAD_NULL;
5773 kqr->tr_state = WORKQ_TR_STATE_IDLE;
5774 kqwl->kqwl_state &= ~KQ_R2K_ARMED;
5775 }
5776
5777 static void
kqworkloop_unbind_delayed_override_drop(thread_t thread)5778 kqworkloop_unbind_delayed_override_drop(thread_t thread)
5779 {
5780 struct uthread *ut = get_bsdthread_info(thread);
5781 assert(ut->uu_kqr_bound == NULL);
5782 if (ut->uu_kqueue_override != THREAD_QOS_UNSPECIFIED) {
5783 thread_drop_servicer_override(thread);
5784 ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED;
5785 }
5786 }
5787
5788 /*
5789 * kqworkloop_unbind - Unbind the servicer thread of a workloop kqueue
5790 *
5791 * It will acknowledge events, and possibly request a new thread if:
5792 * - there were active events left
5793 * - we pended waitq hook callouts during processing
5794 * - we pended wakeups while processing (or unsuppressing)
5795 *
5796 * Called with kqueue lock held.
5797 */
5798 static void
kqworkloop_unbind(struct kqworkloop * kqwl)5799 kqworkloop_unbind(struct kqworkloop *kqwl)
5800 {
5801 struct kqueue *kq = &kqwl->kqwl_kqueue;
5802 workq_threadreq_t kqr = &kqwl->kqwl_request;
5803 thread_t thread = kqr_thread_fast(kqr);
5804 int op = KQWL_UTQ_PARKING;
5805 kq_index_t qos_override = THREAD_QOS_UNSPECIFIED;
5806
5807 assert(thread == current_thread());
5808
5809 kqlock(kqwl);
5810
5811 /*
5812 * Forcing the KQ_PROCESSING flag allows for QoS updates because of
5813 * unsuppressing knotes not to be applied until the eventual call to
5814 * kqworkloop_update_threads_qos() below.
5815 */
5816 assert((kq->kq_state & KQ_PROCESSING) == 0);
5817 if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) {
5818 kq->kq_state |= KQ_PROCESSING;
5819 qos_override = kqworkloop_acknowledge_events(kqwl);
5820 kq->kq_state &= ~KQ_PROCESSING;
5821 }
5822
5823 kqworkloop_unbind_locked(kqwl, thread, KQWL_OVERRIDE_DROP_DELAYED);
5824 kqworkloop_update_threads_qos(kqwl, op, qos_override);
5825
5826 kqunlock(kqwl);
5827
5828 /*
5829 * Drop the override on the current thread last, after the call to
5830 * kqworkloop_update_threads_qos above.
5831 */
5832 kqworkloop_unbind_delayed_override_drop(thread);
5833
5834 /* If last reference, dealloc the workloop kq */
5835 kqworkloop_release(kqwl);
5836 }
5837
5838 static thread_qos_t
kqworkq_unbind_locked(struct kqworkq * kqwq,workq_threadreq_t kqr,thread_t thread)5839 kqworkq_unbind_locked(struct kqworkq *kqwq,
5840 workq_threadreq_t kqr, thread_t thread)
5841 {
5842 struct uthread *ut = get_bsdthread_info(thread);
5843 kq_index_t old_override = kqr->tr_kq_override_index;
5844
5845 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND), -1,
5846 thread_tid(kqr_thread(kqr)), kqr->tr_kq_qos_index, 0);
5847
5848 kqlock_held(kqwq);
5849
5850 assert(ut->uu_kqr_bound == kqr);
5851 ut->uu_kqr_bound = NULL;
5852 kqr->tr_thread = THREAD_NULL;
5853 kqr->tr_state = WORKQ_TR_STATE_IDLE;
5854 kqr->tr_kq_override_index = THREAD_QOS_UNSPECIFIED;
5855 kqwq->kqwq_state &= ~KQ_R2K_ARMED;
5856
5857 return old_override;
5858 }
5859
5860 /*
5861 * kqworkq_unbind - unbind of a workq kqueue from a thread
5862 *
5863 * We may have to request new threads.
5864 * This can happen there are no waiting processing threads and:
5865 * - there were active events we never got to (count > 0)
5866 * - we pended waitq hook callouts during processing
5867 * - we pended wakeups while processing (or unsuppressing)
5868 */
5869 static void
kqworkq_unbind(proc_t p,workq_threadreq_t kqr)5870 kqworkq_unbind(proc_t p, workq_threadreq_t kqr)
5871 {
5872 struct kqworkq *kqwq = (struct kqworkq *)p->p_fd.fd_wqkqueue;
5873 __assert_only int rc;
5874
5875 kqlock(kqwq);
5876 rc = kqworkq_acknowledge_events(kqwq, kqr, 0, KQWQAE_UNBIND);
5877 assert(rc == -1);
5878 kqunlock(kqwq);
5879 }
5880
5881 workq_threadreq_t
kqworkq_get_request(struct kqworkq * kqwq,kq_index_t qos_index)5882 kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index)
5883 {
5884 assert(qos_index > 0 && qos_index <= KQWQ_NBUCKETS);
5885 return &kqwq->kqwq_request[qos_index - 1];
5886 }
5887
5888 static void
knote_reset_priority(kqueue_t kqu,struct knote * kn,pthread_priority_t pp)5889 knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp)
5890 {
5891 kq_index_t qos = _pthread_priority_thread_qos(pp);
5892
5893 if (kqu.kq->kq_state & KQ_WORKLOOP) {
5894 assert((pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) == 0);
5895 pp = _pthread_priority_normalize(pp);
5896 } else if (kqu.kq->kq_state & KQ_WORKQ) {
5897 if (qos == THREAD_QOS_UNSPECIFIED) {
5898 /* On workqueues, outside of QoS means MANAGER */
5899 qos = KQWQ_QOS_MANAGER;
5900 pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
5901 } else {
5902 pp = _pthread_priority_normalize(pp);
5903 }
5904 } else {
5905 pp = _pthread_unspecified_priority();
5906 qos = THREAD_QOS_UNSPECIFIED;
5907 }
5908
5909 kn->kn_qos = (int32_t)pp;
5910
5911 if ((kn->kn_status & KN_MERGE_QOS) == 0 || qos > kn->kn_qos_override) {
5912 /* Never lower QoS when in "Merge" mode */
5913 kn->kn_qos_override = qos;
5914 }
5915
5916 /* only adjust in-use qos index when not suppressed */
5917 if (kn->kn_status & KN_SUPPRESSED) {
5918 kqueue_update_override(kqu, kn, qos);
5919 } else if (kn->kn_qos_index != qos) {
5920 knote_dequeue(kqu, kn);
5921 kn->kn_qos_index = qos;
5922 }
5923 }
5924
5925 static void
knote_adjust_qos(struct kqueue * kq,struct knote * kn,int result)5926 knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result)
5927 {
5928 thread_qos_t qos_index = (result >> FILTER_ADJUST_EVENT_QOS_SHIFT) & 7;
5929
5930 kqlock_held(kq);
5931
5932 assert(result & FILTER_ADJUST_EVENT_QOS_BIT);
5933 assert(qos_index < THREAD_QOS_LAST);
5934
5935 /*
5936 * Early exit for knotes that should not change QoS
5937 */
5938 if (__improbable(!knote_fops(kn)->f_adjusts_qos)) {
5939 panic("filter %d cannot change QoS", kn->kn_filtid);
5940 } else if (__improbable(!knote_has_qos(kn))) {
5941 return;
5942 }
5943
5944 /*
5945 * knotes with the FALLBACK flag will only use their registration QoS if the
5946 * incoming event has no QoS, else, the registration QoS acts as a floor.
5947 */
5948 thread_qos_t req_qos = _pthread_priority_thread_qos_fast(kn->kn_qos);
5949 if (kn->kn_qos & _PTHREAD_PRIORITY_FALLBACK_FLAG) {
5950 if (qos_index == THREAD_QOS_UNSPECIFIED) {
5951 qos_index = req_qos;
5952 }
5953 } else {
5954 if (qos_index < req_qos) {
5955 qos_index = req_qos;
5956 }
5957 }
5958 if ((kn->kn_status & KN_MERGE_QOS) && (qos_index < kn->kn_qos_override)) {
5959 /* Never lower QoS when in "Merge" mode */
5960 return;
5961 }
5962
5963 if ((kn->kn_status & KN_LOCKED) && (kn->kn_status & KN_POSTING)) {
5964 /*
5965 * When we're trying to update the QoS override and that both an
5966 * f_event() and other f_* calls are running concurrently, any of these
5967 * in flight calls may want to perform overrides that aren't properly
5968 * serialized with each other.
5969 *
5970 * The first update that observes this racy situation enters a "Merge"
5971 * mode which causes subsequent override requests to saturate the
5972 * override instead of replacing its value.
5973 *
5974 * This mode is left when knote_unlock() or knote_post()
5975 * observe that no other f_* routine is in flight.
5976 */
5977 kn->kn_status |= KN_MERGE_QOS;
5978 }
5979
5980 /*
5981 * Now apply the override if it changed.
5982 */
5983
5984 if (kn->kn_qos_override == qos_index) {
5985 return;
5986 }
5987
5988 kn->kn_qos_override = qos_index;
5989
5990 if (kn->kn_status & KN_SUPPRESSED) {
5991 /*
5992 * For suppressed events, the kn_qos_index field cannot be touched as it
5993 * allows us to know on which supress queue the knote is for a kqworkq.
5994 *
5995 * Also, there's no natural push applied on the kqueues when this field
5996 * changes anyway. We hence need to apply manual overrides in this case,
5997 * which will be cleared when the events are later acknowledged.
5998 */
5999 kqueue_update_override(kq, kn, qos_index);
6000 } else if (kn->kn_qos_index != qos_index) {
6001 knote_dequeue(kq, kn);
6002 kn->kn_qos_index = qos_index;
6003 }
6004 }
6005
6006 void
klist_init(struct klist * list)6007 klist_init(struct klist *list)
6008 {
6009 SLIST_INIT(list);
6010 }
6011
6012
6013 /*
6014 * Query/Post each knote in the object's list
6015 *
6016 * The object lock protects the list. It is assumed
6017 * that the filter/event routine for the object can
6018 * determine that the object is already locked (via
6019 * the hint) and not deadlock itself.
6020 *
6021 * The object lock should also hold off pending
6022 * detach/drop operations.
6023 */
6024 void
knote(struct klist * list,long hint)6025 knote(struct klist *list, long hint)
6026 {
6027 struct knote *kn;
6028
6029 SLIST_FOREACH(kn, list, kn_selnext) {
6030 knote_post(kn, hint);
6031 }
6032 }
6033
6034 /*
6035 * attach a knote to the specified list. Return true if this is the first entry.
6036 * The list is protected by whatever lock the object it is associated with uses.
6037 */
6038 int
knote_attach(struct klist * list,struct knote * kn)6039 knote_attach(struct klist *list, struct knote *kn)
6040 {
6041 int ret = SLIST_EMPTY(list);
6042 SLIST_INSERT_HEAD(list, kn, kn_selnext);
6043 return ret;
6044 }
6045
6046 /*
6047 * detach a knote from the specified list. Return true if that was the last entry.
6048 * The list is protected by whatever lock the object it is associated with uses.
6049 */
6050 int
knote_detach(struct klist * list,struct knote * kn)6051 knote_detach(struct klist *list, struct knote *kn)
6052 {
6053 SLIST_REMOVE(list, kn, knote, kn_selnext);
6054 return SLIST_EMPTY(list);
6055 }
6056
6057 /*
6058 * knote_vanish - Indicate that the source has vanished
6059 *
6060 * Used only for vanishing ports - vanishing fds go
6061 * through knote_fdclose()
6062 *
6063 * If the knote has requested EV_VANISHED delivery,
6064 * arrange for that. Otherwise, deliver a NOTE_REVOKE
6065 * event for backward compatibility.
6066 *
6067 * The knote is marked as having vanished. The source's
6068 * reference to the knote is dropped by caller, but the knote's
6069 * source reference is only cleaned up later when the knote is dropped.
6070 *
6071 * Our caller already has the object lock held. Calling
6072 * the detach routine would try to take that lock
6073 * recursively - which likely is not supported.
6074 */
6075 void
knote_vanish(struct klist * list,bool make_active)6076 knote_vanish(struct klist *list, bool make_active)
6077 {
6078 struct knote *kn;
6079 struct knote *kn_next;
6080
6081 SLIST_FOREACH_SAFE(kn, list, kn_selnext, kn_next) {
6082 struct kqueue *kq = knote_get_kq(kn);
6083
6084 kqlock(kq);
6085 if (__probable(kn->kn_status & KN_REQVANISH)) {
6086 /*
6087 * If EV_VANISH supported - prepare to deliver one
6088 */
6089 kn->kn_status |= KN_VANISHED;
6090 } else {
6091 /*
6092 * Handle the legacy way to indicate that the port/portset was
6093 * deallocated or left the current Mach portspace (modern technique
6094 * is with an EV_VANISHED protocol).
6095 *
6096 * Deliver an EV_EOF event for these changes (hopefully it will get
6097 * delivered before the port name recycles to the same generation
6098 * count and someone tries to re-register a kevent for it or the
6099 * events are udata-specific - avoiding a conflict).
6100 */
6101 kn->kn_flags |= EV_EOF | EV_ONESHOT;
6102 }
6103 if (make_active) {
6104 knote_activate(kq, kn, FILTER_ACTIVE);
6105 }
6106 kqunlock(kq);
6107 }
6108 }
6109
6110 /*
6111 * remove all knotes referencing a specified fd
6112 *
6113 * Entered with the proc_fd lock already held.
6114 * It returns the same way, but may drop it temporarily.
6115 */
6116 void
knote_fdclose(struct proc * p,int fd)6117 knote_fdclose(struct proc *p, int fd)
6118 {
6119 struct filedesc *fdt = &p->p_fd;
6120 struct klist *list;
6121 struct knote *kn;
6122 KNOTE_LOCK_CTX(knlc);
6123
6124 restart:
6125 list = &fdt->fd_knlist[fd];
6126 SLIST_FOREACH(kn, list, kn_link) {
6127 struct kqueue *kq = knote_get_kq(kn);
6128
6129 kqlock(kq);
6130
6131 if (kq->kq_p != p) {
6132 panic("%s: proc mismatch (kq->kq_p=%p != p=%p)",
6133 __func__, kq->kq_p, p);
6134 }
6135
6136 /*
6137 * If the knote supports EV_VANISHED delivery,
6138 * transition it to vanished mode (or skip over
6139 * it if already vanished).
6140 */
6141 if (kn->kn_status & KN_VANISHED) {
6142 kqunlock(kq);
6143 continue;
6144 }
6145
6146 proc_fdunlock(p);
6147 if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) {
6148 /* the knote was dropped by someone, nothing to do */
6149 } else if (kn->kn_status & KN_REQVANISH) {
6150 /*
6151 * Since we have REQVANISH for this knote, we need to notify clients about
6152 * the EV_VANISHED.
6153 *
6154 * But unlike mach ports, we want to do the detach here as well and not
6155 * defer it so that we can release the iocount that is on the knote and
6156 * close the fp.
6157 */
6158 kn->kn_status |= KN_VANISHED;
6159
6160 /*
6161 * There may be a concurrent post happening, make sure to wait for it
6162 * before we detach. knote_wait_for_post() unlocks on kq on exit
6163 */
6164 knote_wait_for_post(kq, kn);
6165
6166 knote_fops(kn)->f_detach(kn);
6167 if (kn->kn_is_fd) {
6168 fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0);
6169 }
6170 kn->kn_filtid = EVFILTID_DETACHED;
6171 kqlock(kq);
6172
6173 knote_activate(kq, kn, FILTER_ACTIVE);
6174 knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK);
6175 } else {
6176 knote_drop(kq, kn, &knlc);
6177 }
6178
6179 proc_fdlock(p);
6180 goto restart;
6181 }
6182 }
6183
6184 /*
6185 * knote_fdfind - lookup a knote in the fd table for process
6186 *
6187 * If the filter is file-based, lookup based on fd index.
6188 * Otherwise use a hash based on the ident.
6189 *
6190 * Matching is based on kq, filter, and ident. Optionally,
6191 * it may also be based on the udata field in the kevent -
6192 * allowing multiple event registration for the file object
6193 * per kqueue.
6194 *
6195 * fd_knhashlock or fdlock held on entry (and exit)
6196 */
6197 static struct knote *
knote_fdfind(struct kqueue * kq,const struct kevent_internal_s * kev,bool is_fd,struct proc * p)6198 knote_fdfind(struct kqueue *kq,
6199 const struct kevent_internal_s *kev,
6200 bool is_fd,
6201 struct proc *p)
6202 {
6203 struct filedesc *fdp = &p->p_fd;
6204 struct klist *list = NULL;
6205 struct knote *kn = NULL;
6206
6207 /*
6208 * determine where to look for the knote
6209 */
6210 if (is_fd) {
6211 /* fd-based knotes are linked off the fd table */
6212 if (kev->kei_ident < (u_int)fdp->fd_knlistsize) {
6213 list = &fdp->fd_knlist[kev->kei_ident];
6214 }
6215 } else if (fdp->fd_knhashmask != 0) {
6216 /* hash non-fd knotes here too */
6217 list = &fdp->fd_knhash[KN_HASH((u_long)kev->kei_ident, fdp->fd_knhashmask)];
6218 }
6219
6220 /*
6221 * scan the selected list looking for a match
6222 */
6223 if (list != NULL) {
6224 SLIST_FOREACH(kn, list, kn_link) {
6225 if (kq == knote_get_kq(kn) &&
6226 kev->kei_ident == kn->kn_id &&
6227 kev->kei_filter == kn->kn_filter) {
6228 if (kev->kei_flags & EV_UDATA_SPECIFIC) {
6229 if ((kn->kn_flags & EV_UDATA_SPECIFIC) &&
6230 kev->kei_udata == kn->kn_udata) {
6231 break; /* matching udata-specific knote */
6232 }
6233 } else if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0) {
6234 break; /* matching non-udata-specific knote */
6235 }
6236 }
6237 }
6238 }
6239 return kn;
6240 }
6241
6242 /*
6243 * kq_add_knote- Add knote to the fd table for process
6244 * while checking for duplicates.
6245 *
6246 * All file-based filters associate a list of knotes by file
6247 * descriptor index. All other filters hash the knote by ident.
6248 *
6249 * May have to grow the table of knote lists to cover the
6250 * file descriptor index presented.
6251 *
6252 * fd_knhashlock and fdlock unheld on entry (and exit).
6253 *
6254 * Takes a rwlock boost if inserting the knote is successful.
6255 */
6256 static int
kq_add_knote(struct kqueue * kq,struct knote * kn,struct knote_lock_ctx * knlc,struct proc * p)6257 kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc,
6258 struct proc *p)
6259 {
6260 struct filedesc *fdp = &p->p_fd;
6261 struct klist *list = NULL;
6262 int ret = 0;
6263 bool is_fd = kn->kn_is_fd;
6264
6265 if (is_fd) {
6266 proc_fdlock(p);
6267 } else {
6268 knhash_lock(fdp);
6269 }
6270
6271 if (knote_fdfind(kq, &kn->kn_kevent, is_fd, p) != NULL) {
6272 /* found an existing knote: we can't add this one */
6273 ret = ERESTART;
6274 goto out_locked;
6275 }
6276
6277 /* knote was not found: add it now */
6278 if (!is_fd) {
6279 if (fdp->fd_knhashmask == 0) {
6280 u_long size = 0;
6281
6282 list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE, &size);
6283 if (list == NULL) {
6284 ret = ENOMEM;
6285 goto out_locked;
6286 }
6287
6288 fdp->fd_knhash = list;
6289 fdp->fd_knhashmask = size;
6290 }
6291
6292 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
6293 SLIST_INSERT_HEAD(list, kn, kn_link);
6294 ret = 0;
6295 goto out_locked;
6296 } else {
6297 /* knote is fd based */
6298
6299 if ((u_int)fdp->fd_knlistsize <= kn->kn_id) {
6300 u_int size = 0;
6301
6302 /* Make sure that fd stays below current process's soft limit AND system allowed per-process limits */
6303 if (kn->kn_id >= (uint64_t)proc_limitgetcur_nofile(p)) {
6304 ret = EINVAL;
6305 goto out_locked;
6306 }
6307 /* have to grow the fd_knlist */
6308 size = fdp->fd_knlistsize;
6309 while (size <= kn->kn_id) {
6310 size += KQEXTENT;
6311 }
6312
6313 if (size >= (UINT_MAX / sizeof(struct klist))) {
6314 ret = EINVAL;
6315 goto out_locked;
6316 }
6317
6318 list = kalloc_type(struct klist, size, Z_WAITOK | Z_ZERO);
6319 if (list == NULL) {
6320 ret = ENOMEM;
6321 goto out_locked;
6322 }
6323
6324 bcopy(fdp->fd_knlist, list,
6325 fdp->fd_knlistsize * sizeof(struct klist));
6326 kfree_type(struct klist, fdp->fd_knlistsize, fdp->fd_knlist);
6327 fdp->fd_knlist = list;
6328 fdp->fd_knlistsize = size;
6329 }
6330
6331 list = &fdp->fd_knlist[kn->kn_id];
6332 SLIST_INSERT_HEAD(list, kn, kn_link);
6333 ret = 0;
6334 goto out_locked;
6335 }
6336
6337 out_locked:
6338 if (ret == 0) {
6339 kqlock(kq);
6340 assert((kn->kn_status & KN_LOCKED) == 0);
6341 (void)knote_lock(kq, kn, knlc, KNOTE_KQ_UNLOCK);
6342 kqueue_retain(kq); /* retain a kq ref */
6343 }
6344 if (is_fd) {
6345 proc_fdunlock(p);
6346 } else {
6347 knhash_unlock(fdp);
6348 }
6349
6350 return ret;
6351 }
6352
6353 /*
6354 * kq_remove_knote - remove a knote from the fd table for process
6355 *
6356 * If the filter is file-based, remove based on fd index.
6357 * Otherwise remove from the hash based on the ident.
6358 *
6359 * fd_knhashlock and fdlock unheld on entry (and exit).
6360 */
6361 static void
kq_remove_knote(struct kqueue * kq,struct knote * kn,struct proc * p,struct knote_lock_ctx * knlc)6362 kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p,
6363 struct knote_lock_ctx *knlc)
6364 {
6365 struct filedesc *fdp = &p->p_fd;
6366 struct klist *list = NULL;
6367 uint16_t kq_state;
6368 bool is_fd = kn->kn_is_fd;
6369
6370 if (is_fd) {
6371 proc_fdlock(p);
6372 } else {
6373 knhash_lock(fdp);
6374 }
6375
6376 if (is_fd) {
6377 assert((u_int)fdp->fd_knlistsize > kn->kn_id);
6378 list = &fdp->fd_knlist[kn->kn_id];
6379 } else {
6380 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
6381 }
6382 SLIST_REMOVE(list, kn, knote, kn_link);
6383
6384 kqlock(kq);
6385
6386 /* Update the servicer iotier override */
6387 kqueue_update_iotier_override(kq);
6388
6389 kq_state = kq->kq_state;
6390 if (knlc) {
6391 knote_unlock_cancel(kq, kn, knlc);
6392 } else {
6393 kqunlock(kq);
6394 }
6395 if (is_fd) {
6396 proc_fdunlock(p);
6397 } else {
6398 knhash_unlock(fdp);
6399 }
6400
6401 if (kq_state & KQ_DYNAMIC) {
6402 kqworkloop_release((struct kqworkloop *)kq);
6403 }
6404 }
6405
6406 /*
6407 * kq_find_knote_and_kq_lock - lookup a knote in the fd table for process
6408 * and, if the knote is found, acquires the kqlock while holding the fd table lock/spinlock.
6409 *
6410 * fd_knhashlock or fdlock unheld on entry (and exit)
6411 */
6412
6413 static struct knote *
kq_find_knote_and_kq_lock(struct kqueue * kq,struct kevent_qos_s * kev,bool is_fd,struct proc * p)6414 kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_qos_s *kev,
6415 bool is_fd, struct proc *p)
6416 {
6417 struct filedesc *fdp = &p->p_fd;
6418 struct knote *kn;
6419
6420 if (is_fd) {
6421 proc_fdlock(p);
6422 } else {
6423 knhash_lock(fdp);
6424 }
6425
6426 /*
6427 * Temporary horrible hack:
6428 * this cast is gross and will go away in a future change.
6429 * It is OK to do because we don't look at xflags/s_fflags,
6430 * and that when we cast down the kev this way,
6431 * the truncated filter field works.
6432 */
6433 kn = knote_fdfind(kq, (struct kevent_internal_s *)kev, is_fd, p);
6434
6435 if (kn) {
6436 kqlock(kq);
6437 assert(knote_get_kq(kn) == kq);
6438 }
6439
6440 if (is_fd) {
6441 proc_fdunlock(p);
6442 } else {
6443 knhash_unlock(fdp);
6444 }
6445
6446 return kn;
6447 }
6448
6449 static struct kqtailq *
knote_get_tailq(kqueue_t kqu,struct knote * kn)6450 knote_get_tailq(kqueue_t kqu, struct knote *kn)
6451 {
6452 kq_index_t qos_index = kn->kn_qos_index;
6453
6454 if (kqu.kq->kq_state & KQ_WORKLOOP) {
6455 assert(qos_index > 0 && qos_index <= KQWL_NBUCKETS);
6456 return &kqu.kqwl->kqwl_queue[qos_index - 1];
6457 } else if (kqu.kq->kq_state & KQ_WORKQ) {
6458 assert(qos_index > 0 && qos_index <= KQWQ_NBUCKETS);
6459 return &kqu.kqwq->kqwq_queue[qos_index - 1];
6460 } else {
6461 assert(qos_index == QOS_INDEX_KQFILE);
6462 return &kqu.kqf->kqf_queue;
6463 }
6464 }
6465
6466 static void
knote_enqueue(kqueue_t kqu,struct knote * kn)6467 knote_enqueue(kqueue_t kqu, struct knote *kn)
6468 {
6469 kqlock_held(kqu);
6470
6471 if ((kn->kn_status & KN_ACTIVE) == 0) {
6472 return;
6473 }
6474
6475 if (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING | KN_QUEUED)) {
6476 return;
6477 }
6478
6479 struct kqtailq *queue = knote_get_tailq(kqu, kn);
6480 bool wakeup = TAILQ_EMPTY(queue);
6481
6482 TAILQ_INSERT_TAIL(queue, kn, kn_tqe);
6483 kn->kn_status |= KN_QUEUED;
6484 kqu.kq->kq_count++;
6485
6486 if (wakeup) {
6487 if (kqu.kq->kq_state & KQ_WORKLOOP) {
6488 kqworkloop_wakeup(kqu.kqwl, kn->kn_qos_index);
6489 } else if (kqu.kq->kq_state & KQ_WORKQ) {
6490 kqworkq_wakeup(kqu.kqwq, kn->kn_qos_index);
6491 } else {
6492 kqfile_wakeup(kqu.kqf, 0, THREAD_AWAKENED);
6493 }
6494 }
6495 }
6496
6497 __attribute__((always_inline))
6498 static inline void
knote_dequeue(kqueue_t kqu,struct knote * kn)6499 knote_dequeue(kqueue_t kqu, struct knote *kn)
6500 {
6501 if (kn->kn_status & KN_QUEUED) {
6502 struct kqtailq *queue = knote_get_tailq(kqu, kn);
6503
6504 // attaching the knote calls knote_reset_priority() without
6505 // the kqlock which is fine, so we can't call kqlock_held()
6506 // if we're not queued.
6507 kqlock_held(kqu);
6508
6509 TAILQ_REMOVE(queue, kn, kn_tqe);
6510 kn->kn_status &= ~KN_QUEUED;
6511 kqu.kq->kq_count--;
6512 if ((kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) {
6513 assert((kqu.kq->kq_count == 0) ==
6514 (bool)TAILQ_EMPTY(queue));
6515 }
6516 }
6517 }
6518
6519 /* called with kqueue lock held */
6520 static void
knote_suppress(kqueue_t kqu,struct knote * kn)6521 knote_suppress(kqueue_t kqu, struct knote *kn)
6522 {
6523 struct kqtailq *suppressq;
6524
6525 kqlock_held(kqu);
6526
6527 assert((kn->kn_status & KN_SUPPRESSED) == 0);
6528 assert(kn->kn_status & KN_QUEUED);
6529
6530 knote_dequeue(kqu, kn);
6531 /* deactivate - so new activations indicate a wakeup */
6532 kn->kn_status &= ~KN_ACTIVE;
6533 kn->kn_status |= KN_SUPPRESSED;
6534 suppressq = kqueue_get_suppressed_queue(kqu, kn);
6535 TAILQ_INSERT_TAIL(suppressq, kn, kn_tqe);
6536 }
6537
6538 __attribute__((always_inline))
6539 static inline void
knote_unsuppress_noqueue(kqueue_t kqu,struct knote * kn)6540 knote_unsuppress_noqueue(kqueue_t kqu, struct knote *kn)
6541 {
6542 struct kqtailq *suppressq;
6543
6544 kqlock_held(kqu);
6545
6546 assert(kn->kn_status & KN_SUPPRESSED);
6547
6548 kn->kn_status &= ~KN_SUPPRESSED;
6549 suppressq = kqueue_get_suppressed_queue(kqu, kn);
6550 TAILQ_REMOVE(suppressq, kn, kn_tqe);
6551
6552 /*
6553 * If the knote is no longer active, reset its push,
6554 * and resynchronize kn_qos_index with kn_qos_override
6555 * for knotes with a real qos.
6556 */
6557 if ((kn->kn_status & KN_ACTIVE) == 0 && knote_has_qos(kn)) {
6558 kn->kn_qos_override = _pthread_priority_thread_qos_fast(kn->kn_qos);
6559 }
6560 kn->kn_qos_index = kn->kn_qos_override;
6561 }
6562
6563 /* called with kqueue lock held */
6564 static void
knote_unsuppress(kqueue_t kqu,struct knote * kn)6565 knote_unsuppress(kqueue_t kqu, struct knote *kn)
6566 {
6567 knote_unsuppress_noqueue(kqu, kn);
6568 knote_enqueue(kqu, kn);
6569 }
6570
6571 __attribute__((always_inline))
6572 static inline void
knote_mark_active(struct knote * kn)6573 knote_mark_active(struct knote *kn)
6574 {
6575 if ((kn->kn_status & KN_ACTIVE) == 0) {
6576 KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_ACTIVATE),
6577 kn->kn_udata, kn->kn_status | (kn->kn_id << 32),
6578 kn->kn_filtid);
6579 }
6580
6581 kn->kn_status |= KN_ACTIVE;
6582 }
6583
6584 /* called with kqueue lock held */
6585 static void
knote_activate(kqueue_t kqu,struct knote * kn,int result)6586 knote_activate(kqueue_t kqu, struct knote *kn, int result)
6587 {
6588 assert(result & FILTER_ACTIVE);
6589 if (result & FILTER_ADJUST_EVENT_QOS_BIT) {
6590 // may dequeue the knote
6591 knote_adjust_qos(kqu.kq, kn, result);
6592 }
6593 knote_mark_active(kn);
6594 knote_enqueue(kqu, kn);
6595 }
6596
6597 /*
6598 * This function applies changes requested by f_attach or f_touch for
6599 * a given filter. It proceeds in a carefully chosen order to help
6600 * every single transition do the minimal amount of work possible.
6601 */
6602 static void
knote_apply_touch(kqueue_t kqu,struct knote * kn,struct kevent_qos_s * kev,int result)6603 knote_apply_touch(kqueue_t kqu, struct knote *kn, struct kevent_qos_s *kev,
6604 int result)
6605 {
6606 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
6607 kn->kn_status &= ~KN_DISABLED;
6608
6609 /*
6610 * it is possible for userland to have knotes registered for a given
6611 * workloop `wl_orig` but really handled on another workloop `wl_new`.
6612 *
6613 * In that case, rearming will happen from the servicer thread of
6614 * `wl_new` which if `wl_orig` is no longer being serviced, would cause
6615 * this knote to stay suppressed forever if we only relied on
6616 * kqworkloop_acknowledge_events to be called by `wl_orig`.
6617 *
6618 * However if we see the KQ_PROCESSING bit on `wl_orig` set, we can't
6619 * unsuppress because that would mess with the processing phase of
6620 * `wl_orig`, however it also means kqworkloop_acknowledge_events()
6621 * will be called.
6622 */
6623 if (__improbable(kn->kn_status & KN_SUPPRESSED)) {
6624 if ((kqu.kq->kq_state & KQ_PROCESSING) == 0) {
6625 knote_unsuppress_noqueue(kqu, kn);
6626 }
6627 }
6628 }
6629
6630 if (result & FILTER_ADJUST_EVENT_IOTIER_BIT) {
6631 kqueue_update_iotier_override(kqu);
6632 }
6633
6634 if ((result & FILTER_UPDATE_REQ_QOS) && kev->qos && kev->qos != kn->kn_qos) {
6635 // may dequeue the knote
6636 knote_reset_priority(kqu, kn, kev->qos);
6637 }
6638
6639 /*
6640 * When we unsuppress above, or because of knote_reset_priority(),
6641 * the knote may have been dequeued, we need to restore the invariant
6642 * that if the knote is active it needs to be queued now that
6643 * we're done applying changes.
6644 */
6645 if (result & FILTER_ACTIVE) {
6646 knote_activate(kqu, kn, result);
6647 } else {
6648 knote_enqueue(kqu, kn);
6649 }
6650
6651 if ((result & FILTER_THREADREQ_NODEFEER) &&
6652 act_clear_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ)) {
6653 workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
6654 }
6655 }
6656
6657 /*
6658 * knote_drop - disconnect and drop the knote
6659 *
6660 * Called with the kqueue locked, returns with the kqueue unlocked.
6661 *
6662 * If a knote locking context is passed, it is canceled.
6663 *
6664 * The knote may have already been detached from
6665 * (or not yet attached to) its source object.
6666 */
6667 static void
knote_drop(struct kqueue * kq,struct knote * kn,struct knote_lock_ctx * knlc)6668 knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc)
6669 {
6670 struct proc *p = kq->kq_p;
6671
6672 kqlock_held(kq);
6673
6674 assert((kn->kn_status & KN_DROPPING) == 0);
6675 if (knlc == NULL) {
6676 assert((kn->kn_status & KN_LOCKED) == 0);
6677 }
6678 kn->kn_status |= KN_DROPPING;
6679
6680 if (kn->kn_status & KN_SUPPRESSED) {
6681 knote_unsuppress_noqueue(kq, kn);
6682 } else {
6683 knote_dequeue(kq, kn);
6684 }
6685 knote_wait_for_post(kq, kn);
6686
6687 knote_fops(kn)->f_detach(kn);
6688
6689 /* kq may be freed when kq_remove_knote() returns */
6690 kq_remove_knote(kq, kn, p, knlc);
6691 if (kn->kn_is_fd && ((kn->kn_status & KN_VANISHED) == 0)) {
6692 fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0);
6693 }
6694
6695 knote_free(kn);
6696 }
6697
6698 void
knote_init(void)6699 knote_init(void)
6700 {
6701 #if CONFIG_MEMORYSTATUS
6702 /* Initialize the memorystatus list lock */
6703 memorystatus_kevent_init(&kq_lck_grp, LCK_ATTR_NULL);
6704 #endif
6705 }
6706 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
6707
6708 const struct filterops *
knote_fops(struct knote * kn)6709 knote_fops(struct knote *kn)
6710 {
6711 return sysfilt_ops[kn->kn_filtid];
6712 }
6713
6714 static struct knote *
knote_alloc(void)6715 knote_alloc(void)
6716 {
6717 return zalloc_flags(knote_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
6718 }
6719
6720 static void
knote_free(struct knote * kn)6721 knote_free(struct knote *kn)
6722 {
6723 assert((kn->kn_status & (KN_LOCKED | KN_POSTING)) == 0);
6724 zfree(knote_zone, kn);
6725 }
6726
6727 #pragma mark - syscalls: kevent, kevent64, kevent_qos, kevent_id
6728
6729 kevent_ctx_t
kevent_get_context(thread_t thread)6730 kevent_get_context(thread_t thread)
6731 {
6732 uthread_t ut = get_bsdthread_info(thread);
6733 return &ut->uu_save.uus_kevent;
6734 }
6735
6736 static inline bool
kevent_args_requesting_events(unsigned int flags,int nevents)6737 kevent_args_requesting_events(unsigned int flags, int nevents)
6738 {
6739 return !(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0;
6740 }
6741
6742 static inline int
kevent_adjust_flags_for_proc(proc_t p,int flags)6743 kevent_adjust_flags_for_proc(proc_t p, int flags)
6744 {
6745 __builtin_assume(p);
6746 return flags | (IS_64BIT_PROCESS(p) ? KEVENT_FLAG_PROC64 : 0);
6747 }
6748
6749 /*!
6750 * @function kevent_get_kqfile
6751 *
6752 * @brief
6753 * Lookup a kqfile by fd.
6754 *
6755 * @discussion
6756 * Callers: kevent, kevent64, kevent_qos
6757 *
6758 * This is not assumed to be a fastpath (kqfile interfaces are legacy)
6759 */
6760 OS_NOINLINE
6761 static int
kevent_get_kqfile(struct proc * p,int fd,int flags,struct fileproc ** fpp,struct kqueue ** kqp)6762 kevent_get_kqfile(struct proc *p, int fd, int flags,
6763 struct fileproc **fpp, struct kqueue **kqp)
6764 {
6765 int error = 0;
6766 struct kqueue *kq;
6767
6768 error = fp_get_ftype(p, fd, DTYPE_KQUEUE, EBADF, fpp);
6769 if (__improbable(error)) {
6770 return error;
6771 }
6772 kq = (struct kqueue *)fp_get_data((*fpp));
6773
6774 uint16_t kq_state = os_atomic_load(&kq->kq_state, relaxed);
6775 if (__improbable((kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) == 0)) {
6776 kqlock(kq);
6777 kq_state = kq->kq_state;
6778 if (!(kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS))) {
6779 if (flags & KEVENT_FLAG_LEGACY32) {
6780 kq_state |= KQ_KEV32;
6781 } else if (flags & KEVENT_FLAG_LEGACY64) {
6782 kq_state |= KQ_KEV64;
6783 } else {
6784 kq_state |= KQ_KEV_QOS;
6785 }
6786 kq->kq_state = kq_state;
6787 }
6788 kqunlock(kq);
6789 }
6790
6791 /*
6792 * kqfiles can't be used through the legacy kevent()
6793 * and other interfaces at the same time.
6794 */
6795 if (__improbable((bool)(flags & KEVENT_FLAG_LEGACY32) !=
6796 (bool)(kq_state & KQ_KEV32))) {
6797 fp_drop(p, fd, *fpp, 0);
6798 return EINVAL;
6799 }
6800
6801 *kqp = kq;
6802 return 0;
6803 }
6804
6805 /*!
6806 * @function kevent_get_kqwq
6807 *
6808 * @brief
6809 * Lookup or create the process kqwq (faspath).
6810 *
6811 * @discussion
6812 * Callers: kevent64, kevent_qos
6813 */
6814 OS_ALWAYS_INLINE
6815 static int
kevent_get_kqwq(proc_t p,int flags,int nevents,struct kqueue ** kqp)6816 kevent_get_kqwq(proc_t p, int flags, int nevents, struct kqueue **kqp)
6817 {
6818 struct kqworkq *kqwq = p->p_fd.fd_wqkqueue;
6819
6820 if (__improbable(kevent_args_requesting_events(flags, nevents))) {
6821 return EINVAL;
6822 }
6823 if (__improbable(kqwq == NULL)) {
6824 kqwq = kqworkq_alloc(p, flags);
6825 if (__improbable(kqwq == NULL)) {
6826 return ENOMEM;
6827 }
6828 }
6829
6830 *kqp = &kqwq->kqwq_kqueue;
6831 return 0;
6832 }
6833
6834 #pragma mark kevent copyio
6835
6836 /*!
6837 * @function kevent_get_data_size
6838 *
6839 * @brief
6840 * Copies in the extra data size from user-space.
6841 */
6842 static int
kevent_get_data_size(int flags,user_addr_t data_avail,user_addr_t data_out,kevent_ctx_t kectx)6843 kevent_get_data_size(int flags, user_addr_t data_avail, user_addr_t data_out,
6844 kevent_ctx_t kectx)
6845 {
6846 if (!data_avail || !data_out) {
6847 kectx->kec_data_size = 0;
6848 kectx->kec_data_resid = 0;
6849 } else if (flags & KEVENT_FLAG_PROC64) {
6850 user64_size_t usize = 0;
6851 int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
6852 if (__improbable(error)) {
6853 return error;
6854 }
6855 kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
6856 } else {
6857 user32_size_t usize = 0;
6858 int error = copyin((user_addr_t)data_avail, &usize, sizeof(usize));
6859 if (__improbable(error)) {
6860 return error;
6861 }
6862 kectx->kec_data_avail = data_avail;
6863 kectx->kec_data_resid = kectx->kec_data_size = (user_size_t)usize;
6864 }
6865 kectx->kec_data_out = data_out;
6866 kectx->kec_data_avail = data_avail;
6867 return 0;
6868 }
6869
6870 /*!
6871 * @function kevent_put_data_size
6872 *
6873 * @brief
6874 * Copies out the residual data size to user-space if any has been used.
6875 */
6876 static int
kevent_put_data_size(unsigned int flags,kevent_ctx_t kectx)6877 kevent_put_data_size(unsigned int flags, kevent_ctx_t kectx)
6878 {
6879 if (kectx->kec_data_resid == kectx->kec_data_size) {
6880 return 0;
6881 }
6882 if (flags & KEVENT_FLAG_KERNEL) {
6883 *(user_size_t *)(uintptr_t)kectx->kec_data_avail = kectx->kec_data_resid;
6884 return 0;
6885 }
6886 if (flags & KEVENT_FLAG_PROC64) {
6887 user64_size_t usize = (user64_size_t)kectx->kec_data_resid;
6888 return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
6889 } else {
6890 user32_size_t usize = (user32_size_t)kectx->kec_data_resid;
6891 return copyout(&usize, (user_addr_t)kectx->kec_data_avail, sizeof(usize));
6892 }
6893 }
6894
6895 /*!
6896 * @function kevent_legacy_copyin
6897 *
6898 * @brief
6899 * Handles the copyin of a kevent/kevent64 event.
6900 */
6901 static int
kevent_legacy_copyin(user_addr_t * addrp,struct kevent_qos_s * kevp,unsigned int flags)6902 kevent_legacy_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp, unsigned int flags)
6903 {
6904 int error;
6905
6906 assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
6907
6908 if (flags & KEVENT_FLAG_LEGACY64) {
6909 struct kevent64_s kev64;
6910
6911 error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
6912 if (__improbable(error)) {
6913 return error;
6914 }
6915 *addrp += sizeof(kev64);
6916 *kevp = (struct kevent_qos_s){
6917 .ident = kev64.ident,
6918 .filter = kev64.filter,
6919 /* Make sure user doesn't pass in any system flags */
6920 .flags = kev64.flags & ~EV_SYSFLAGS,
6921 .udata = kev64.udata,
6922 .fflags = kev64.fflags,
6923 .data = kev64.data,
6924 .ext[0] = kev64.ext[0],
6925 .ext[1] = kev64.ext[1],
6926 };
6927 } else if (flags & KEVENT_FLAG_PROC64) {
6928 struct user64_kevent kev64;
6929
6930 error = copyin(*addrp, (caddr_t)&kev64, sizeof(kev64));
6931 if (__improbable(error)) {
6932 return error;
6933 }
6934 *addrp += sizeof(kev64);
6935 *kevp = (struct kevent_qos_s){
6936 .ident = kev64.ident,
6937 .filter = kev64.filter,
6938 /* Make sure user doesn't pass in any system flags */
6939 .flags = kev64.flags & ~EV_SYSFLAGS,
6940 .udata = kev64.udata,
6941 .fflags = kev64.fflags,
6942 .data = kev64.data,
6943 };
6944 } else {
6945 struct user32_kevent kev32;
6946
6947 error = copyin(*addrp, (caddr_t)&kev32, sizeof(kev32));
6948 if (__improbable(error)) {
6949 return error;
6950 }
6951 *addrp += sizeof(kev32);
6952 *kevp = (struct kevent_qos_s){
6953 .ident = (uintptr_t)kev32.ident,
6954 .filter = kev32.filter,
6955 /* Make sure user doesn't pass in any system flags */
6956 .flags = kev32.flags & ~EV_SYSFLAGS,
6957 .udata = CAST_USER_ADDR_T(kev32.udata),
6958 .fflags = kev32.fflags,
6959 .data = (intptr_t)kev32.data,
6960 };
6961 }
6962
6963 return 0;
6964 }
6965
6966 /*!
6967 * @function kevent_modern_copyin
6968 *
6969 * @brief
6970 * Handles the copyin of a kevent_qos/kevent_id event.
6971 */
6972 static int
kevent_modern_copyin(user_addr_t * addrp,struct kevent_qos_s * kevp)6973 kevent_modern_copyin(user_addr_t *addrp, struct kevent_qos_s *kevp)
6974 {
6975 int error = copyin(*addrp, (caddr_t)kevp, sizeof(struct kevent_qos_s));
6976 if (__probable(!error)) {
6977 /* Make sure user doesn't pass in any system flags */
6978 *addrp += sizeof(struct kevent_qos_s);
6979 kevp->flags &= ~EV_SYSFLAGS;
6980 }
6981 return error;
6982 }
6983
6984 /*!
6985 * @function kevent_legacy_copyout
6986 *
6987 * @brief
6988 * Handles the copyout of a kevent/kevent64 event.
6989 */
6990 static int
kevent_legacy_copyout(struct kevent_qos_s * kevp,user_addr_t * addrp,unsigned int flags)6991 kevent_legacy_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp, unsigned int flags)
6992 {
6993 int advance;
6994 int error;
6995
6996 assert((flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64)) != 0);
6997
6998 /*
6999 * fully initialize the differnt output event structure
7000 * types from the internal kevent (and some universal
7001 * defaults for fields not represented in the internal
7002 * form).
7003 *
7004 * Note: these structures have no padding hence the C99
7005 * initializers below do not leak kernel info.
7006 */
7007 if (flags & KEVENT_FLAG_LEGACY64) {
7008 struct kevent64_s kev64 = {
7009 .ident = kevp->ident,
7010 .filter = kevp->filter,
7011 .flags = kevp->flags,
7012 .fflags = kevp->fflags,
7013 .data = (int64_t)kevp->data,
7014 .udata = kevp->udata,
7015 .ext[0] = kevp->ext[0],
7016 .ext[1] = kevp->ext[1],
7017 };
7018 advance = sizeof(struct kevent64_s);
7019 error = copyout((caddr_t)&kev64, *addrp, advance);
7020 } else if (flags & KEVENT_FLAG_PROC64) {
7021 /*
7022 * deal with the special case of a user-supplied
7023 * value of (uintptr_t)-1.
7024 */
7025 uint64_t ident = (kevp->ident == (uintptr_t)-1) ?
7026 (uint64_t)-1LL : (uint64_t)kevp->ident;
7027 struct user64_kevent kev64 = {
7028 .ident = ident,
7029 .filter = kevp->filter,
7030 .flags = kevp->flags,
7031 .fflags = kevp->fflags,
7032 .data = (int64_t) kevp->data,
7033 .udata = (user_addr_t) kevp->udata,
7034 };
7035 advance = sizeof(kev64);
7036 error = copyout((caddr_t)&kev64, *addrp, advance);
7037 } else {
7038 struct user32_kevent kev32 = {
7039 .ident = (uint32_t)kevp->ident,
7040 .filter = kevp->filter,
7041 .flags = kevp->flags,
7042 .fflags = kevp->fflags,
7043 .data = (int32_t)kevp->data,
7044 .udata = (uint32_t)kevp->udata,
7045 };
7046 advance = sizeof(kev32);
7047 error = copyout((caddr_t)&kev32, *addrp, advance);
7048 }
7049 if (__probable(!error)) {
7050 *addrp += advance;
7051 }
7052 return error;
7053 }
7054
7055 /*!
7056 * @function kevent_modern_copyout
7057 *
7058 * @brief
7059 * Handles the copyout of a kevent_qos/kevent_id event.
7060 */
7061 OS_ALWAYS_INLINE
7062 static inline int
kevent_modern_copyout(struct kevent_qos_s * kevp,user_addr_t * addrp)7063 kevent_modern_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp)
7064 {
7065 int error = copyout((caddr_t)kevp, *addrp, sizeof(struct kevent_qos_s));
7066 if (__probable(!error)) {
7067 *addrp += sizeof(struct kevent_qos_s);
7068 }
7069 return error;
7070 }
7071
7072 #pragma mark kevent core implementation
7073
7074 /*!
7075 * @function kevent_callback_inline
7076 *
7077 * @brief
7078 * Callback for each individual event
7079 *
7080 * @discussion
7081 * This is meant to be inlined in kevent_modern_callback and
7082 * kevent_legacy_callback.
7083 */
7084 OS_ALWAYS_INLINE
7085 static inline int
kevent_callback_inline(struct kevent_qos_s * kevp,kevent_ctx_t kectx,bool legacy)7086 kevent_callback_inline(struct kevent_qos_s *kevp, kevent_ctx_t kectx, bool legacy)
7087 {
7088 int error;
7089
7090 assert(kectx->kec_process_noutputs < kectx->kec_process_nevents);
7091
7092 /*
7093 * Copy out the appropriate amount of event data for this user.
7094 */
7095 if (legacy) {
7096 error = kevent_legacy_copyout(kevp, &kectx->kec_process_eventlist,
7097 kectx->kec_process_flags);
7098 } else {
7099 error = kevent_modern_copyout(kevp, &kectx->kec_process_eventlist);
7100 }
7101
7102 /*
7103 * If there isn't space for additional events, return
7104 * a harmless error to stop the processing here
7105 */
7106 if (error == 0 && ++kectx->kec_process_noutputs == kectx->kec_process_nevents) {
7107 error = EWOULDBLOCK;
7108 }
7109 return error;
7110 }
7111
7112 /*!
7113 * @function kevent_modern_callback
7114 *
7115 * @brief
7116 * Callback for each individual modern event.
7117 *
7118 * @discussion
7119 * This callback handles kevent_qos/kevent_id events.
7120 */
7121 static int
kevent_modern_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)7122 kevent_modern_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
7123 {
7124 return kevent_callback_inline(kevp, kectx, /*legacy*/ false);
7125 }
7126
7127 /*!
7128 * @function kevent_legacy_callback
7129 *
7130 * @brief
7131 * Callback for each individual legacy event.
7132 *
7133 * @discussion
7134 * This callback handles kevent/kevent64 events.
7135 */
7136 static int
kevent_legacy_callback(struct kevent_qos_s * kevp,kevent_ctx_t kectx)7137 kevent_legacy_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
7138 {
7139 return kevent_callback_inline(kevp, kectx, /*legacy*/ true);
7140 }
7141
7142 /*!
7143 * @function kevent_cleanup
7144 *
7145 * @brief
7146 * Handles the cleanup returning from a kevent call.
7147 *
7148 * @discussion
7149 * kevent entry points will take a reference on workloops,
7150 * and a usecount on the fileglob of kqfiles.
7151 *
7152 * This function undoes this on the exit paths of kevents.
7153 *
7154 * @returns
7155 * The error to return to userspace.
7156 */
7157 static int
kevent_cleanup(kqueue_t kqu,int flags,int error,kevent_ctx_t kectx)7158 kevent_cleanup(kqueue_t kqu, int flags, int error, kevent_ctx_t kectx)
7159 {
7160 // poll should not call any codepath leading to this
7161 assert((flags & KEVENT_FLAG_POLL) == 0);
7162
7163 if (flags & KEVENT_FLAG_WORKLOOP) {
7164 kqworkloop_release(kqu.kqwl);
7165 } else if (flags & KEVENT_FLAG_WORKQ) {
7166 /* nothing held */
7167 } else {
7168 fp_drop(kqu.kqf->kqf_p, kectx->kec_fd, kectx->kec_fp, 0);
7169 }
7170
7171 /* don't restart after signals... */
7172 if (error == ERESTART) {
7173 error = EINTR;
7174 } else if (error == 0) {
7175 /* don't abandon other output just because of residual copyout failures */
7176 (void)kevent_put_data_size(flags, kectx);
7177 }
7178
7179 if (flags & KEVENT_FLAG_PARKING) {
7180 thread_t th = current_thread();
7181 struct uthread *uth = get_bsdthread_info(th);
7182 if (uth->uu_kqr_bound) {
7183 thread_unfreeze_base_pri(th);
7184 }
7185 }
7186 return error;
7187 }
7188
7189 /*!
7190 * @function kqueue_process
7191 *
7192 * @brief
7193 * Process the triggered events in a kqueue.
7194 *
7195 * @discussion
7196 * Walk the queued knotes and validate that they are really still triggered
7197 * events by calling the filter routines (if necessary).
7198 *
7199 * For each event that is still considered triggered, invoke the callback
7200 * routine provided.
7201 *
7202 * caller holds a reference on the kqueue.
7203 * kqueue locked on entry and exit - but may be dropped
7204 * kqueue list locked (held for duration of call)
7205 *
7206 * This is only called by kqueue_scan() so that the compiler can inline it.
7207 *
7208 * @returns
7209 * - 0: no event was returned, no other error occured
7210 * - EBADF: the kqueue is being destroyed (KQ_DRAIN is set)
7211 * - EWOULDBLOCK: (not an error) events have been found and we should return
7212 * - EFAULT: copyout failed
7213 * - filter specific errors
7214 */
7215 static int
kqueue_process(kqueue_t kqu,int flags,kevent_ctx_t kectx,kevent_callback_t callback)7216 kqueue_process(kqueue_t kqu, int flags, kevent_ctx_t kectx,
7217 kevent_callback_t callback)
7218 {
7219 workq_threadreq_t kqr = current_uthread()->uu_kqr_bound;
7220 struct knote *kn;
7221 int error = 0, rc = 0;
7222 struct kqtailq *base_queue, *queue;
7223 uint16_t kq_type = (kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP));
7224
7225 if (kq_type & KQ_WORKQ) {
7226 rc = kqworkq_begin_processing(kqu.kqwq, kqr, flags);
7227 } else if (kq_type & KQ_WORKLOOP) {
7228 rc = kqworkloop_begin_processing(kqu.kqwl, flags);
7229 } else {
7230 kqfile_retry:
7231 rc = kqfile_begin_processing(kqu.kqf);
7232 if (rc == EBADF) {
7233 return EBADF;
7234 }
7235 }
7236
7237 if (rc == -1) {
7238 /* Nothing to process */
7239 return 0;
7240 }
7241
7242 /*
7243 * loop through the enqueued knotes associated with this request,
7244 * processing each one. Each request may have several queues
7245 * of knotes to process (depending on the type of kqueue) so we
7246 * have to loop through all the queues as long as we have additional
7247 * space.
7248 */
7249
7250 process_again:
7251 if (kq_type & KQ_WORKQ) {
7252 base_queue = queue = &kqu.kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1];
7253 } else if (kq_type & KQ_WORKLOOP) {
7254 base_queue = &kqu.kqwl->kqwl_queue[0];
7255 queue = &kqu.kqwl->kqwl_queue[KQWL_NBUCKETS - 1];
7256 } else {
7257 base_queue = queue = &kqu.kqf->kqf_queue;
7258 }
7259
7260 do {
7261 while ((kn = TAILQ_FIRST(queue)) != NULL) {
7262 error = knote_process(kn, kectx, callback);
7263 if (error == EJUSTRETURN) {
7264 error = 0;
7265 } else if (__improbable(error)) {
7266 /* error is EWOULDBLOCK when the out event array is full */
7267 goto stop_processing;
7268 }
7269 }
7270 } while (queue-- > base_queue);
7271
7272 if (kectx->kec_process_noutputs) {
7273 /* callers will transform this into no error */
7274 error = EWOULDBLOCK;
7275 }
7276
7277 stop_processing:
7278 /*
7279 * If KEVENT_FLAG_PARKING is set, and no kevents have been returned,
7280 * we want to unbind the kqrequest from the thread.
7281 *
7282 * However, because the kq locks are dropped several times during process,
7283 * new knotes may have fired again, in which case, we want to fail the end
7284 * processing and process again, until it converges.
7285 *
7286 * If we have an error or returned events, end processing never fails.
7287 */
7288 if (error) {
7289 flags &= ~KEVENT_FLAG_PARKING;
7290 }
7291 if (kq_type & KQ_WORKQ) {
7292 rc = kqworkq_end_processing(kqu.kqwq, kqr, flags);
7293 } else if (kq_type & KQ_WORKLOOP) {
7294 rc = kqworkloop_end_processing(kqu.kqwl, KQ_PROCESSING, flags);
7295 } else {
7296 rc = kqfile_end_processing(kqu.kqf);
7297 }
7298
7299 if (__probable(error)) {
7300 return error;
7301 }
7302
7303 if (__probable(rc >= 0)) {
7304 assert(rc == 0 || rc == EBADF);
7305 return rc;
7306 }
7307
7308 if (kq_type & (KQ_WORKQ | KQ_WORKLOOP)) {
7309 assert(flags & KEVENT_FLAG_PARKING);
7310 goto process_again;
7311 } else {
7312 goto kqfile_retry;
7313 }
7314 }
7315
7316 /*!
7317 * @function kqueue_scan_continue
7318 *
7319 * @brief
7320 * The continuation used by kqueue_scan for kevent entry points.
7321 *
7322 * @discussion
7323 * Assumes we inherit a use/ref count on the kq or its fileglob.
7324 *
7325 * This is called by kqueue_scan if neither KEVENT_FLAG_POLL nor
7326 * KEVENT_FLAG_KERNEL was set, and the caller had to wait.
7327 */
7328 OS_NORETURN OS_NOINLINE
7329 static void
kqueue_scan_continue(void * data,wait_result_t wait_result)7330 kqueue_scan_continue(void *data, wait_result_t wait_result)
7331 {
7332 uthread_t ut = current_uthread();
7333 kevent_ctx_t kectx = &ut->uu_save.uus_kevent;
7334 int error = 0, flags = kectx->kec_process_flags;
7335 struct kqueue *kq = data;
7336
7337 /*
7338 * only kevent variants call in here, so we know the callback is
7339 * kevent_legacy_callback or kevent_modern_callback.
7340 */
7341 assert((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0);
7342
7343 switch (wait_result) {
7344 case THREAD_AWAKENED:
7345 if (__improbable(flags & (KEVENT_FLAG_LEGACY32 | KEVENT_FLAG_LEGACY64))) {
7346 error = kqueue_scan(kq, flags, kectx, kevent_legacy_callback);
7347 } else {
7348 error = kqueue_scan(kq, flags, kectx, kevent_modern_callback);
7349 }
7350 break;
7351 case THREAD_TIMED_OUT:
7352 error = 0;
7353 break;
7354 case THREAD_INTERRUPTED:
7355 error = EINTR;
7356 break;
7357 case THREAD_RESTART:
7358 error = EBADF;
7359 break;
7360 default:
7361 panic("%s: - invalid wait_result (%d)", __func__, wait_result);
7362 }
7363
7364
7365 error = kevent_cleanup(kq, flags, error, kectx);
7366 *(int32_t *)&ut->uu_rval = kectx->kec_process_noutputs;
7367 unix_syscall_return(error);
7368 }
7369
7370 /*!
7371 * @function kqueue_scan
7372 *
7373 * @brief
7374 * Scan and wait for events in a kqueue (used by poll & kevent).
7375 *
7376 * @discussion
7377 * Process the triggered events in a kqueue.
7378 *
7379 * If there are no events triggered arrange to wait for them:
7380 * - unless KEVENT_FLAG_IMMEDIATE is set in kectx->kec_process_flags
7381 * - possibly until kectx->kec_deadline expires
7382 *
7383 * When it waits, and that neither KEVENT_FLAG_POLL nor KEVENT_FLAG_KERNEL
7384 * are set, then it will wait in the kqueue_scan_continue continuation.
7385 *
7386 * poll() will block in place, and KEVENT_FLAG_KERNEL calls
7387 * all pass KEVENT_FLAG_IMMEDIATE and will not wait.
7388 *
7389 * @param kqu
7390 * The kqueue being scanned.
7391 *
7392 * @param flags
7393 * The KEVENT_FLAG_* flags for this call.
7394 *
7395 * @param kectx
7396 * The context used for this scan.
7397 * The uthread_t::uu_save.uus_kevent storage is used for this purpose.
7398 *
7399 * @param callback
7400 * The callback to be called on events sucessfully processed.
7401 * (Either kevent_legacy_callback, kevent_modern_callback or poll_callback)
7402 */
7403 int
kqueue_scan(kqueue_t kqu,int flags,kevent_ctx_t kectx,kevent_callback_t callback)7404 kqueue_scan(kqueue_t kqu, int flags, kevent_ctx_t kectx,
7405 kevent_callback_t callback)
7406 {
7407 int error;
7408
7409 for (;;) {
7410 kqlock(kqu);
7411 error = kqueue_process(kqu, flags, kectx, callback);
7412
7413 /*
7414 * If we got an error, events returned (EWOULDBLOCK)
7415 * or blocking was disallowed (KEVENT_FLAG_IMMEDIATE),
7416 * just return.
7417 */
7418 if (__probable(error || (flags & KEVENT_FLAG_IMMEDIATE))) {
7419 kqunlock(kqu);
7420 return error == EWOULDBLOCK ? 0 : error;
7421 }
7422
7423 assert((kqu.kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0);
7424
7425 kqu.kqf->kqf_state |= KQ_SLEEP;
7426 assert_wait_deadline(&kqu.kqf->kqf_count, THREAD_ABORTSAFE,
7427 kectx->kec_deadline);
7428 kqunlock(kqu);
7429
7430 if (__probable((flags & (KEVENT_FLAG_POLL | KEVENT_FLAG_KERNEL)) == 0)) {
7431 thread_block_parameter(kqueue_scan_continue, kqu.kqf);
7432 __builtin_unreachable();
7433 }
7434
7435 wait_result_t wr = thread_block(THREAD_CONTINUE_NULL);
7436 switch (wr) {
7437 case THREAD_AWAKENED:
7438 break;
7439 case THREAD_TIMED_OUT:
7440 return 0;
7441 case THREAD_INTERRUPTED:
7442 return EINTR;
7443 case THREAD_RESTART:
7444 return EBADF;
7445 default:
7446 panic("%s: - bad wait_result (%d)", __func__, wr);
7447 }
7448 }
7449 }
7450
7451 /*!
7452 * @function kevent_internal
7453 *
7454 * @brief
7455 * Common kevent code.
7456 *
7457 * @discussion
7458 * Needs to be inlined to specialize for legacy or modern and
7459 * eliminate dead code.
7460 *
7461 * This is the core logic of kevent entry points, that will:
7462 * - register kevents
7463 * - optionally scan the kqueue for events
7464 *
7465 * The caller is giving kevent_internal a reference on the kqueue
7466 * or its fileproc that needs to be cleaned up by kevent_cleanup().
7467 */
7468 OS_ALWAYS_INLINE
7469 static inline int
kevent_internal(kqueue_t kqu,user_addr_t changelist,int nchanges,user_addr_t ueventlist,int nevents,int flags,kevent_ctx_t kectx,int32_t * retval,bool legacy)7470 kevent_internal(kqueue_t kqu,
7471 user_addr_t changelist, int nchanges,
7472 user_addr_t ueventlist, int nevents,
7473 int flags, kevent_ctx_t kectx, int32_t *retval,
7474 bool legacy)
7475 {
7476 int error = 0, noutputs = 0, register_rc;
7477
7478 /* only bound threads can receive events on workloops */
7479 if (!legacy && (flags & KEVENT_FLAG_WORKLOOP)) {
7480 #if CONFIG_WORKLOOP_DEBUG
7481 UU_KEVENT_HISTORY_WRITE_ENTRY(current_uthread(), {
7482 .uu_kqid = kqu.kqwl->kqwl_dynamicid,
7483 .uu_kq = error ? NULL : kqu.kq,
7484 .uu_error = error,
7485 .uu_nchanges = nchanges,
7486 .uu_nevents = nevents,
7487 .uu_flags = flags,
7488 });
7489 #endif // CONFIG_WORKLOOP_DEBUG
7490
7491 if (flags & KEVENT_FLAG_KERNEL) {
7492 /* see kevent_workq_internal */
7493 error = copyout(&kqu.kqwl->kqwl_dynamicid,
7494 ueventlist - sizeof(kqueue_id_t), sizeof(kqueue_id_t));
7495 kectx->kec_data_resid -= sizeof(kqueue_id_t);
7496 if (__improbable(error)) {
7497 goto out;
7498 }
7499 }
7500
7501 if (kevent_args_requesting_events(flags, nevents)) {
7502 /*
7503 * Disable the R2K notification while doing a register, if the
7504 * caller wants events too, we don't want the AST to be set if we
7505 * will process these events soon.
7506 */
7507 kqlock(kqu);
7508 kqu.kq->kq_state &= ~KQ_R2K_ARMED;
7509 kqunlock(kqu);
7510 flags |= KEVENT_FLAG_NEEDS_END_PROCESSING;
7511 }
7512 }
7513
7514 /* register all the change requests the user provided... */
7515 while (nchanges > 0 && error == 0) {
7516 struct kevent_qos_s kev;
7517 struct knote *kn = NULL;
7518
7519 if (legacy) {
7520 error = kevent_legacy_copyin(&changelist, &kev, flags);
7521 } else {
7522 error = kevent_modern_copyin(&changelist, &kev);
7523 }
7524 if (error) {
7525 break;
7526 }
7527
7528 register_rc = kevent_register(kqu.kq, &kev, &kn);
7529 if (__improbable(!legacy && (register_rc & FILTER_REGISTER_WAIT))) {
7530 thread_t thread = current_thread();
7531
7532 kqlock_held(kqu);
7533
7534 if (act_clear_astkevent(thread, AST_KEVENT_REDRIVE_THREADREQ)) {
7535 workq_kern_threadreq_redrive(kqu.kq->kq_p, WORKQ_THREADREQ_NONE);
7536 }
7537
7538 // f_post_register_wait is meant to call a continuation and not to
7539 // return, which is why we don't support FILTER_REGISTER_WAIT if
7540 // KEVENT_FLAG_ERROR_EVENTS is not passed, or if the event that
7541 // waits isn't the last.
7542 //
7543 // It is implementable, but not used by any userspace code at the
7544 // moment, so for now return ENOTSUP if someone tries to do it.
7545 if (nchanges == 1 && noutputs < nevents &&
7546 (flags & KEVENT_FLAG_KERNEL) == 0 &&
7547 (flags & KEVENT_FLAG_PARKING) == 0 &&
7548 (flags & KEVENT_FLAG_ERROR_EVENTS) &&
7549 (flags & KEVENT_FLAG_WORKLOOP)) {
7550 uthread_t ut = get_bsdthread_info(thread);
7551
7552 /*
7553 * store the continuation/completion data in the uthread
7554 *
7555 * Note: the kectx aliases with this,
7556 * and is destroyed in the process.
7557 */
7558 ut->uu_save.uus_kevent_register = (struct _kevent_register){
7559 .kev = kev,
7560 .kqwl = kqu.kqwl,
7561 .eventout = noutputs,
7562 .ueventlist = ueventlist,
7563 };
7564 knote_fops(kn)->f_post_register_wait(ut, kn,
7565 &ut->uu_save.uus_kevent_register);
7566 __builtin_unreachable();
7567 }
7568 kqunlock(kqu);
7569
7570 kev.flags |= EV_ERROR;
7571 kev.data = ENOTSUP;
7572 } else {
7573 assert((register_rc & FILTER_REGISTER_WAIT) == 0);
7574 }
7575
7576 // keep in sync with kevent_register_wait_return()
7577 if (noutputs < nevents && (kev.flags & (EV_ERROR | EV_RECEIPT))) {
7578 if ((kev.flags & EV_ERROR) == 0) {
7579 kev.flags |= EV_ERROR;
7580 kev.data = 0;
7581 }
7582 if (legacy) {
7583 error = kevent_legacy_copyout(&kev, &ueventlist, flags);
7584 } else {
7585 error = kevent_modern_copyout(&kev, &ueventlist);
7586 }
7587 if (error == 0) {
7588 noutputs++;
7589 }
7590 } else if (kev.flags & EV_ERROR) {
7591 error = (int)kev.data;
7592 }
7593 nchanges--;
7594 }
7595
7596 if ((flags & KEVENT_FLAG_ERROR_EVENTS) == 0 &&
7597 nevents > 0 && noutputs == 0 && error == 0) {
7598 kectx->kec_process_flags = flags;
7599 kectx->kec_process_nevents = nevents;
7600 kectx->kec_process_noutputs = 0;
7601 kectx->kec_process_eventlist = ueventlist;
7602
7603 if (legacy) {
7604 error = kqueue_scan(kqu.kq, flags, kectx, kevent_legacy_callback);
7605 } else {
7606 error = kqueue_scan(kqu.kq, flags, kectx, kevent_modern_callback);
7607 }
7608
7609 noutputs = kectx->kec_process_noutputs;
7610 } else if (!legacy && (flags & KEVENT_FLAG_NEEDS_END_PROCESSING)) {
7611 /*
7612 * If we didn't through kqworkloop_end_processing(),
7613 * we need to do it here.
7614 *
7615 * kqueue_scan will call kqworkloop_end_processing(),
7616 * so we only need to do it if we didn't scan.
7617 */
7618 kqlock(kqu);
7619 kqworkloop_end_processing(kqu.kqwl, 0, 0);
7620 kqunlock(kqu);
7621 }
7622
7623 *retval = noutputs;
7624 out:
7625 return kevent_cleanup(kqu.kq, flags, error, kectx);
7626 }
7627
7628 #pragma mark modern syscalls: kevent_qos, kevent_id, kevent_workq_internal
7629
7630 /*!
7631 * @function kevent_modern_internal
7632 *
7633 * @brief
7634 * The backend of the kevent_id and kevent_workq_internal entry points.
7635 *
7636 * @discussion
7637 * Needs to be inline due to the number of arguments.
7638 */
7639 OS_NOINLINE
7640 static int
kevent_modern_internal(kqueue_t kqu,user_addr_t changelist,int nchanges,user_addr_t ueventlist,int nevents,int flags,kevent_ctx_t kectx,int32_t * retval)7641 kevent_modern_internal(kqueue_t kqu,
7642 user_addr_t changelist, int nchanges,
7643 user_addr_t ueventlist, int nevents,
7644 int flags, kevent_ctx_t kectx, int32_t *retval)
7645 {
7646 return kevent_internal(kqu.kq, changelist, nchanges,
7647 ueventlist, nevents, flags, kectx, retval, /*legacy*/ false);
7648 }
7649
7650 /*!
7651 * @function kevent_id
7652 *
7653 * @brief
7654 * The kevent_id() syscall.
7655 */
7656 int
kevent_id(struct proc * p,struct kevent_id_args * uap,int32_t * retval)7657 kevent_id(struct proc *p, struct kevent_id_args *uap, int32_t *retval)
7658 {
7659 int error, flags = uap->flags & KEVENT_FLAG_USER;
7660 uthread_t uth = current_uthread();
7661 workq_threadreq_t kqr = uth->uu_kqr_bound;
7662 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7663 kqueue_t kqu;
7664
7665 flags = kevent_adjust_flags_for_proc(p, flags);
7666 flags |= KEVENT_FLAG_DYNAMIC_KQUEUE;
7667
7668 if (__improbable((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP)) !=
7669 KEVENT_FLAG_WORKLOOP)) {
7670 return EINVAL;
7671 }
7672
7673 error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
7674 if (__improbable(error)) {
7675 return error;
7676 }
7677
7678 kectx->kec_deadline = 0;
7679 kectx->kec_fp = NULL;
7680 kectx->kec_fd = -1;
7681 /* the kec_process_* fields are filled if kqueue_scann is called only */
7682
7683 /*
7684 * Get the kq we are going to be working on
7685 * As a fastpath, look at the currently bound workloop.
7686 */
7687 kqu.kqwl = kqr ? kqr_kqworkloop(kqr) : NULL;
7688 if (kqu.kqwl && kqu.kqwl->kqwl_dynamicid == uap->id) {
7689 if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) {
7690 return EEXIST;
7691 }
7692 kqworkloop_retain(kqu.kqwl);
7693 } else if (__improbable(kevent_args_requesting_events(flags, uap->nevents))) {
7694 return EXDEV;
7695 } else {
7696 error = kqworkloop_get_or_create(p, uap->id, NULL, flags, &kqu.kqwl);
7697 if (__improbable(error)) {
7698 return error;
7699 }
7700 }
7701
7702 return kevent_modern_internal(kqu, uap->changelist, uap->nchanges,
7703 uap->eventlist, uap->nevents, flags, kectx, retval);
7704 }
7705
7706 /**!
7707 * @function kevent_workq_internal
7708 *
7709 * @discussion
7710 * This function is exported for the sake of the workqueue subsystem.
7711 *
7712 * It is called in two ways:
7713 * - when a thread is about to go to userspace to ask for pending event
7714 * - when a thread is returning from userspace with events back
7715 *
7716 * the workqueue subsystem will only use the following flags:
7717 * - KEVENT_FLAG_STACK_DATA (always)
7718 * - KEVENT_FLAG_IMMEDIATE (always)
7719 * - KEVENT_FLAG_PARKING (depending on whether it is going to or returning from
7720 * userspace).
7721 *
7722 * It implicitly acts on the bound kqueue, and for the case of workloops
7723 * will copyout the kqueue ID before anything else.
7724 *
7725 *
7726 * Pthread will have setup the various arguments to fit this stack layout:
7727 *
7728 * +-------....----+--------------+-----------+--------------------+
7729 * | user stack | data avail | nevents | pthread_self() |
7730 * +-------....----+--------------+-----------+--------------------+
7731 * ^ ^
7732 * data_out eventlist
7733 *
7734 * When a workloop is used, the workloop ID is copied out right before
7735 * the eventlist and is taken from the data buffer.
7736 *
7737 * @warning
7738 * This function is carefuly tailored to not make any call except the final tail
7739 * call into kevent_modern_internal. (LTO inlines current_uthread()).
7740 *
7741 * This function is performance sensitive due to the workq subsystem.
7742 */
7743 int
kevent_workq_internal(struct proc * p,user_addr_t changelist,int nchanges,user_addr_t eventlist,int nevents,user_addr_t data_out,user_size_t * data_available,unsigned int flags,int32_t * retval)7744 kevent_workq_internal(struct proc *p,
7745 user_addr_t changelist, int nchanges,
7746 user_addr_t eventlist, int nevents,
7747 user_addr_t data_out, user_size_t *data_available,
7748 unsigned int flags, int32_t *retval)
7749 {
7750 uthread_t uth = current_uthread();
7751 workq_threadreq_t kqr = uth->uu_kqr_bound;
7752 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7753 kqueue_t kqu;
7754
7755 assert(flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE) ||
7756 flags == (KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_PARKING));
7757
7758 kectx->kec_data_out = data_out;
7759 kectx->kec_data_avail = (uint64_t)data_available;
7760 kectx->kec_data_size = *data_available;
7761 kectx->kec_data_resid = *data_available;
7762 kectx->kec_deadline = 0;
7763 kectx->kec_fp = NULL;
7764 kectx->kec_fd = -1;
7765 /* the kec_process_* fields are filled if kqueue_scann is called only */
7766
7767 flags = kevent_adjust_flags_for_proc(p, flags);
7768
7769 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
7770 kqu.kqwl = __container_of(kqr, struct kqworkloop, kqwl_request);
7771 kqworkloop_retain(kqu.kqwl);
7772
7773 flags |= KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_DYNAMIC_KQUEUE |
7774 KEVENT_FLAG_KERNEL;
7775 } else {
7776 kqu.kqwq = p->p_fd.fd_wqkqueue;
7777
7778 flags |= KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL;
7779 }
7780
7781 return kevent_modern_internal(kqu, changelist, nchanges,
7782 eventlist, nevents, flags, kectx, retval);
7783 }
7784
7785 /*!
7786 * @function kevent_qos
7787 *
7788 * @brief
7789 * The kevent_qos() syscall.
7790 */
7791 int
kevent_qos(struct proc * p,struct kevent_qos_args * uap,int32_t * retval)7792 kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval)
7793 {
7794 uthread_t uth = current_uthread();
7795 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7796 int error, flags = uap->flags & KEVENT_FLAG_USER;
7797 struct kqueue *kq;
7798
7799 if (__improbable(flags & KEVENT_ID_FLAG_USER)) {
7800 return EINVAL;
7801 }
7802
7803 flags = kevent_adjust_flags_for_proc(p, flags);
7804
7805 error = kevent_get_data_size(flags, uap->data_available, uap->data_out, kectx);
7806 if (__improbable(error)) {
7807 return error;
7808 }
7809
7810 kectx->kec_deadline = 0;
7811 kectx->kec_fp = NULL;
7812 kectx->kec_fd = uap->fd;
7813 /* the kec_process_* fields are filled if kqueue_scann is called only */
7814
7815 /* get the kq we are going to be working on */
7816 if (__probable(flags & KEVENT_FLAG_WORKQ)) {
7817 error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
7818 } else {
7819 error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
7820 }
7821 if (__improbable(error)) {
7822 return error;
7823 }
7824
7825 return kevent_modern_internal(kq, uap->changelist, uap->nchanges,
7826 uap->eventlist, uap->nevents, flags, kectx, retval);
7827 }
7828
7829 #pragma mark legacy syscalls: kevent, kevent64
7830
7831 /*!
7832 * @function kevent_legacy_get_deadline
7833 *
7834 * @brief
7835 * Compute the deadline for the legacy kevent syscalls.
7836 *
7837 * @discussion
7838 * This is not necessary if KEVENT_FLAG_IMMEDIATE is specified,
7839 * as this takes precedence over the deadline.
7840 *
7841 * This function will fail if utimeout is USER_ADDR_NULL
7842 * (the caller should check).
7843 */
7844 static int
kevent_legacy_get_deadline(int flags,user_addr_t utimeout,uint64_t * deadline)7845 kevent_legacy_get_deadline(int flags, user_addr_t utimeout, uint64_t *deadline)
7846 {
7847 struct timespec ts;
7848
7849 if (flags & KEVENT_FLAG_PROC64) {
7850 struct user64_timespec ts64;
7851 int error = copyin(utimeout, &ts64, sizeof(ts64));
7852 if (__improbable(error)) {
7853 return error;
7854 }
7855 ts.tv_sec = (unsigned long)ts64.tv_sec;
7856 ts.tv_nsec = (long)ts64.tv_nsec;
7857 } else {
7858 struct user32_timespec ts32;
7859 int error = copyin(utimeout, &ts32, sizeof(ts32));
7860 if (__improbable(error)) {
7861 return error;
7862 }
7863 ts.tv_sec = ts32.tv_sec;
7864 ts.tv_nsec = ts32.tv_nsec;
7865 }
7866 if (!timespec_is_valid(&ts)) {
7867 return EINVAL;
7868 }
7869
7870 clock_absolutetime_interval_to_deadline(tstoabstime(&ts), deadline);
7871 return 0;
7872 }
7873
7874 /*!
7875 * @function kevent_legacy_internal
7876 *
7877 * @brief
7878 * The core implementation for kevent and kevent64
7879 */
7880 OS_NOINLINE
7881 static int
kevent_legacy_internal(struct proc * p,struct kevent64_args * uap,int32_t * retval,int flags)7882 kevent_legacy_internal(struct proc *p, struct kevent64_args *uap,
7883 int32_t *retval, int flags)
7884 {
7885 uthread_t uth = current_uthread();
7886 kevent_ctx_t kectx = &uth->uu_save.uus_kevent;
7887 struct kqueue *kq;
7888 int error;
7889
7890 if (__improbable(uap->flags & KEVENT_ID_FLAG_USER)) {
7891 return EINVAL;
7892 }
7893
7894 flags = kevent_adjust_flags_for_proc(p, flags);
7895
7896 kectx->kec_data_out = 0;
7897 kectx->kec_data_avail = 0;
7898 kectx->kec_data_size = 0;
7899 kectx->kec_data_resid = 0;
7900 kectx->kec_deadline = 0;
7901 kectx->kec_fp = NULL;
7902 kectx->kec_fd = uap->fd;
7903 /* the kec_process_* fields are filled if kqueue_scann is called only */
7904
7905 /* convert timeout to absolute - if we have one (and not immediate) */
7906 if (__improbable(uap->timeout && !(flags & KEVENT_FLAG_IMMEDIATE))) {
7907 error = kevent_legacy_get_deadline(flags, uap->timeout,
7908 &kectx->kec_deadline);
7909 if (__improbable(error)) {
7910 return error;
7911 }
7912 }
7913
7914 /* get the kq we are going to be working on */
7915 if (flags & KEVENT_FLAG_WORKQ) {
7916 error = kevent_get_kqwq(p, flags, uap->nevents, &kq);
7917 } else {
7918 error = kevent_get_kqfile(p, uap->fd, flags, &kectx->kec_fp, &kq);
7919 }
7920 if (__improbable(error)) {
7921 return error;
7922 }
7923
7924 return kevent_internal(kq, uap->changelist, uap->nchanges,
7925 uap->eventlist, uap->nevents, flags, kectx, retval,
7926 /*legacy*/ true);
7927 }
7928
7929 /*!
7930 * @function kevent
7931 *
7932 * @brief
7933 * The legacy kevent() syscall.
7934 */
7935 int
kevent(struct proc * p,struct kevent_args * uap,int32_t * retval)7936 kevent(struct proc *p, struct kevent_args *uap, int32_t *retval)
7937 {
7938 struct kevent64_args args = {
7939 .fd = uap->fd,
7940 .changelist = uap->changelist,
7941 .nchanges = uap->nchanges,
7942 .eventlist = uap->eventlist,
7943 .nevents = uap->nevents,
7944 .timeout = uap->timeout,
7945 };
7946
7947 return kevent_legacy_internal(p, &args, retval, KEVENT_FLAG_LEGACY32);
7948 }
7949
7950 /*!
7951 * @function kevent64
7952 *
7953 * @brief
7954 * The legacy kevent64() syscall.
7955 */
7956 int
kevent64(struct proc * p,struct kevent64_args * uap,int32_t * retval)7957 kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval)
7958 {
7959 int flags = (uap->flags & KEVENT_FLAG_USER) | KEVENT_FLAG_LEGACY64;
7960 return kevent_legacy_internal(p, uap, retval, flags);
7961 }
7962
7963 #pragma mark - socket interface
7964
7965 #if SOCKETS
7966 #include <sys/param.h>
7967 #include <sys/socket.h>
7968 #include <sys/protosw.h>
7969 #include <sys/domain.h>
7970 #include <sys/mbuf.h>
7971 #include <sys/kern_event.h>
7972 #include <sys/malloc.h>
7973 #include <sys/sys_domain.h>
7974 #include <sys/syslog.h>
7975
7976 #ifndef ROUNDUP64
7977 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
7978 #endif
7979
7980 #ifndef ADVANCE64
7981 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
7982 #endif
7983
7984 static LCK_GRP_DECLARE(kev_lck_grp, "Kernel Event Protocol");
7985 static LCK_RW_DECLARE(kev_rwlock, &kev_lck_grp);
7986
7987 static int kev_attach(struct socket *so, int proto, struct proc *p);
7988 static int kev_detach(struct socket *so);
7989 static int kev_control(struct socket *so, u_long cmd, caddr_t data,
7990 struct ifnet *ifp, struct proc *p);
7991 static lck_mtx_t * event_getlock(struct socket *, int);
7992 static int event_lock(struct socket *, int, void *);
7993 static int event_unlock(struct socket *, int, void *);
7994
7995 static int event_sofreelastref(struct socket *);
7996 static void kev_delete(struct kern_event_pcb *);
7997
7998 static struct pr_usrreqs event_usrreqs = {
7999 .pru_attach = kev_attach,
8000 .pru_control = kev_control,
8001 .pru_detach = kev_detach,
8002 .pru_soreceive = soreceive,
8003 };
8004
8005 static struct protosw eventsw[] = {
8006 {
8007 .pr_type = SOCK_RAW,
8008 .pr_protocol = SYSPROTO_EVENT,
8009 .pr_flags = PR_ATOMIC,
8010 .pr_usrreqs = &event_usrreqs,
8011 .pr_lock = event_lock,
8012 .pr_unlock = event_unlock,
8013 .pr_getlock = event_getlock,
8014 }
8015 };
8016
8017 __private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
8018 __private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
8019
8020 SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
8021 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel event family");
8022
8023 struct kevtstat kevtstat;
8024 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
8025 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8026 kevt_getstat, "S,kevtstat", "");
8027
8028 SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
8029 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8030 kevt_pcblist, "S,xkevtpcb", "");
8031
8032 static lck_mtx_t *
event_getlock(struct socket * so,int flags)8033 event_getlock(struct socket *so, int flags)
8034 {
8035 #pragma unused(flags)
8036 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
8037
8038 if (so->so_pcb != NULL) {
8039 if (so->so_usecount < 0) {
8040 panic("%s: so=%p usecount=%d lrh= %s", __func__,
8041 so, so->so_usecount, solockhistory_nr(so));
8042 }
8043 /* NOTREACHED */
8044 } else {
8045 panic("%s: so=%p NULL NO so_pcb %s", __func__,
8046 so, solockhistory_nr(so));
8047 /* NOTREACHED */
8048 }
8049 return &ev_pcb->evp_mtx;
8050 }
8051
8052 static int
event_lock(struct socket * so,int refcount,void * lr)8053 event_lock(struct socket *so, int refcount, void *lr)
8054 {
8055 void *lr_saved;
8056
8057 if (lr == NULL) {
8058 lr_saved = __builtin_return_address(0);
8059 } else {
8060 lr_saved = lr;
8061 }
8062
8063 if (so->so_pcb != NULL) {
8064 lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
8065 } else {
8066 panic("%s: so=%p NO PCB! lr=%p lrh= %s", __func__,
8067 so, lr_saved, solockhistory_nr(so));
8068 /* NOTREACHED */
8069 }
8070
8071 if (so->so_usecount < 0) {
8072 panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s", __func__,
8073 so, so->so_pcb, lr_saved, so->so_usecount,
8074 solockhistory_nr(so));
8075 /* NOTREACHED */
8076 }
8077
8078 if (refcount) {
8079 so->so_usecount++;
8080 }
8081
8082 so->lock_lr[so->next_lock_lr] = lr_saved;
8083 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
8084 return 0;
8085 }
8086
8087 static int
event_unlock(struct socket * so,int refcount,void * lr)8088 event_unlock(struct socket *so, int refcount, void *lr)
8089 {
8090 void *lr_saved;
8091 lck_mtx_t *mutex_held;
8092
8093 if (lr == NULL) {
8094 lr_saved = __builtin_return_address(0);
8095 } else {
8096 lr_saved = lr;
8097 }
8098
8099 if (refcount) {
8100 so->so_usecount--;
8101 }
8102 if (so->so_usecount < 0) {
8103 panic("%s: so=%p usecount=%d lrh= %s", __func__,
8104 so, so->so_usecount, solockhistory_nr(so));
8105 /* NOTREACHED */
8106 }
8107 if (so->so_pcb == NULL) {
8108 panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s", __func__,
8109 so, so->so_usecount, (void *)lr_saved,
8110 solockhistory_nr(so));
8111 /* NOTREACHED */
8112 }
8113 mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
8114
8115 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
8116 so->unlock_lr[so->next_unlock_lr] = lr_saved;
8117 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
8118
8119 if (so->so_usecount == 0) {
8120 VERIFY(so->so_flags & SOF_PCBCLEARING);
8121 event_sofreelastref(so);
8122 } else {
8123 lck_mtx_unlock(mutex_held);
8124 }
8125
8126 return 0;
8127 }
8128
8129 static int
event_sofreelastref(struct socket * so)8130 event_sofreelastref(struct socket *so)
8131 {
8132 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
8133
8134 LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
8135
8136 so->so_pcb = NULL;
8137
8138 /*
8139 * Disable upcall in the event another thread is in kev_post_msg()
8140 * appending record to the receive socket buffer, since sbwakeup()
8141 * may release the socket lock otherwise.
8142 */
8143 so->so_rcv.sb_flags &= ~SB_UPCALL;
8144 so->so_snd.sb_flags &= ~SB_UPCALL;
8145 so->so_event = sonullevent;
8146 lck_mtx_unlock(&(ev_pcb->evp_mtx));
8147
8148 LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
8149 lck_rw_lock_exclusive(&kev_rwlock);
8150 LIST_REMOVE(ev_pcb, evp_link);
8151 kevtstat.kes_pcbcount--;
8152 kevtstat.kes_gencnt++;
8153 lck_rw_done(&kev_rwlock);
8154 kev_delete(ev_pcb);
8155
8156 sofreelastref(so, 1);
8157 return 0;
8158 }
8159
8160 static int event_proto_count = (sizeof(eventsw) / sizeof(struct protosw));
8161
8162 static
8163 struct kern_event_head kern_event_head;
8164
8165 static u_int32_t static_event_id = 0;
8166
8167 static KALLOC_TYPE_DEFINE(ev_pcb_zone, struct kern_event_pcb, NET_KT_DEFAULT);
8168
8169 /*
8170 * Install the protosw's for the NKE manager. Invoked at extension load time
8171 */
8172 void
kern_event_init(struct domain * dp)8173 kern_event_init(struct domain *dp)
8174 {
8175 struct protosw *pr;
8176 int i;
8177
8178 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
8179 VERIFY(dp == systemdomain);
8180
8181 for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++) {
8182 net_add_proto(pr, dp, 1);
8183 }
8184 }
8185
8186 static int
kev_attach(struct socket * so,__unused int proto,__unused struct proc * p)8187 kev_attach(struct socket *so, __unused int proto, __unused struct proc *p)
8188 {
8189 int error = 0;
8190 struct kern_event_pcb *ev_pcb;
8191
8192 error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE);
8193 if (error != 0) {
8194 return error;
8195 }
8196
8197 ev_pcb = zalloc_flags(ev_pcb_zone, Z_WAITOK | Z_ZERO);
8198 lck_mtx_init(&ev_pcb->evp_mtx, &kev_lck_grp, LCK_ATTR_NULL);
8199
8200 ev_pcb->evp_socket = so;
8201 ev_pcb->evp_vendor_code_filter = 0xffffffff;
8202
8203 so->so_pcb = (caddr_t) ev_pcb;
8204 lck_rw_lock_exclusive(&kev_rwlock);
8205 LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link);
8206 kevtstat.kes_pcbcount++;
8207 kevtstat.kes_gencnt++;
8208 lck_rw_done(&kev_rwlock);
8209
8210 return error;
8211 }
8212
8213 static void
kev_delete(struct kern_event_pcb * ev_pcb)8214 kev_delete(struct kern_event_pcb *ev_pcb)
8215 {
8216 VERIFY(ev_pcb != NULL);
8217 lck_mtx_destroy(&ev_pcb->evp_mtx, &kev_lck_grp);
8218 zfree(ev_pcb_zone, ev_pcb);
8219 }
8220
8221 static int
kev_detach(struct socket * so)8222 kev_detach(struct socket *so)
8223 {
8224 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8225
8226 if (ev_pcb != NULL) {
8227 soisdisconnected(so);
8228 so->so_flags |= SOF_PCBCLEARING;
8229 }
8230
8231 return 0;
8232 }
8233
8234 /*
8235 * For now, kev_vendor_code and mbuf_tags use the same
8236 * mechanism.
8237 */
8238 errno_t
kev_vendor_code_find(const char * string,u_int32_t * out_vendor_code)8239 kev_vendor_code_find(
8240 const char *string,
8241 u_int32_t *out_vendor_code)
8242 {
8243 if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) {
8244 return EINVAL;
8245 }
8246 return net_str_id_find_internal(string, out_vendor_code,
8247 NSI_VENDOR_CODE, 1);
8248 }
8249
8250 errno_t
kev_msg_post(struct kev_msg * event_msg)8251 kev_msg_post(struct kev_msg *event_msg)
8252 {
8253 mbuf_tag_id_t min_vendor, max_vendor;
8254
8255 net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE);
8256
8257 if (event_msg == NULL) {
8258 return EINVAL;
8259 }
8260
8261 /*
8262 * Limit third parties to posting events for registered vendor codes
8263 * only
8264 */
8265 if (event_msg->vendor_code < min_vendor ||
8266 event_msg->vendor_code > max_vendor) {
8267 os_atomic_inc(&kevtstat.kes_badvendor, relaxed);
8268 return EINVAL;
8269 }
8270 return kev_post_msg(event_msg);
8271 }
8272
8273 static int
kev_post_msg_internal(struct kev_msg * event_msg,int wait)8274 kev_post_msg_internal(struct kev_msg *event_msg, int wait)
8275 {
8276 struct mbuf *m, *m2;
8277 struct kern_event_pcb *ev_pcb;
8278 struct kern_event_msg *ev;
8279 char *tmp;
8280 u_int32_t total_size;
8281 int i;
8282
8283 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
8284 /*
8285 * Special hook for ALF state updates
8286 */
8287 if (event_msg->vendor_code == KEV_VENDOR_APPLE &&
8288 event_msg->kev_class == KEV_NKE_CLASS &&
8289 event_msg->kev_subclass == KEV_NKE_ALF_SUBCLASS &&
8290 event_msg->event_code == KEV_NKE_ALF_STATE_CHANGED) {
8291 #if (DEBUG || DEVELOPMENT)
8292 os_log_info(OS_LOG_DEFAULT, "KEV_NKE_ALF_STATE_CHANGED posted");
8293 #endif /* DEBUG || DEVELOPMENT */
8294 net_filter_event_mark(NET_FILTER_EVENT_ALF,
8295 net_check_compatible_alf());
8296 }
8297 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
8298
8299 /* Verify the message is small enough to fit in one mbuf w/o cluster */
8300 total_size = KEV_MSG_HEADER_SIZE;
8301
8302 for (i = 0; i < 5; i++) {
8303 if (event_msg->dv[i].data_length == 0) {
8304 break;
8305 }
8306 total_size += event_msg->dv[i].data_length;
8307 }
8308
8309 if (total_size > MLEN) {
8310 os_atomic_inc(&kevtstat.kes_toobig, relaxed);
8311 return EMSGSIZE;
8312 }
8313
8314 m = m_get(wait, MT_DATA);
8315 if (m == 0) {
8316 os_atomic_inc(&kevtstat.kes_nomem, relaxed);
8317 return ENOMEM;
8318 }
8319 ev = mtod(m, struct kern_event_msg *);
8320 total_size = KEV_MSG_HEADER_SIZE;
8321
8322 tmp = (char *) &ev->event_data[0];
8323 for (i = 0; i < 5; i++) {
8324 if (event_msg->dv[i].data_length == 0) {
8325 break;
8326 }
8327
8328 total_size += event_msg->dv[i].data_length;
8329 bcopy(event_msg->dv[i].data_ptr, tmp,
8330 event_msg->dv[i].data_length);
8331 tmp += event_msg->dv[i].data_length;
8332 }
8333
8334 ev->id = ++static_event_id;
8335 ev->total_size = total_size;
8336 ev->vendor_code = event_msg->vendor_code;
8337 ev->kev_class = event_msg->kev_class;
8338 ev->kev_subclass = event_msg->kev_subclass;
8339 ev->event_code = event_msg->event_code;
8340
8341 m->m_len = total_size;
8342 lck_rw_lock_shared(&kev_rwlock);
8343 for (ev_pcb = LIST_FIRST(&kern_event_head);
8344 ev_pcb;
8345 ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
8346 lck_mtx_lock(&ev_pcb->evp_mtx);
8347 if (ev_pcb->evp_socket->so_pcb == NULL) {
8348 lck_mtx_unlock(&ev_pcb->evp_mtx);
8349 continue;
8350 }
8351 if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) {
8352 if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) {
8353 lck_mtx_unlock(&ev_pcb->evp_mtx);
8354 continue;
8355 }
8356
8357 if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) {
8358 if (ev_pcb->evp_class_filter != ev->kev_class) {
8359 lck_mtx_unlock(&ev_pcb->evp_mtx);
8360 continue;
8361 }
8362
8363 if ((ev_pcb->evp_subclass_filter !=
8364 KEV_ANY_SUBCLASS) &&
8365 (ev_pcb->evp_subclass_filter !=
8366 ev->kev_subclass)) {
8367 lck_mtx_unlock(&ev_pcb->evp_mtx);
8368 continue;
8369 }
8370 }
8371 }
8372
8373 m2 = m_copym(m, 0, m->m_len, wait);
8374 if (m2 == 0) {
8375 os_atomic_inc(&kevtstat.kes_nomem, relaxed);
8376 m_free(m);
8377 lck_mtx_unlock(&ev_pcb->evp_mtx);
8378 lck_rw_done(&kev_rwlock);
8379 return ENOMEM;
8380 }
8381 if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) {
8382 /*
8383 * We use "m" for the socket stats as it would be
8384 * unsafe to use "m2"
8385 */
8386 so_inc_recv_data_stat(ev_pcb->evp_socket,
8387 1, m->m_len, MBUF_TC_BE);
8388
8389 sorwakeup(ev_pcb->evp_socket);
8390 os_atomic_inc(&kevtstat.kes_posted, relaxed);
8391 } else {
8392 os_atomic_inc(&kevtstat.kes_fullsock, relaxed);
8393 }
8394 lck_mtx_unlock(&ev_pcb->evp_mtx);
8395 }
8396 m_free(m);
8397 lck_rw_done(&kev_rwlock);
8398
8399 return 0;
8400 }
8401
8402 int
kev_post_msg(struct kev_msg * event_msg)8403 kev_post_msg(struct kev_msg *event_msg)
8404 {
8405 return kev_post_msg_internal(event_msg, M_WAIT);
8406 }
8407
8408 int
kev_post_msg_nowait(struct kev_msg * event_msg)8409 kev_post_msg_nowait(struct kev_msg *event_msg)
8410 {
8411 return kev_post_msg_internal(event_msg, M_NOWAIT);
8412 }
8413
8414 static int
kev_control(struct socket * so,u_long cmd,caddr_t data,__unused struct ifnet * ifp,__unused struct proc * p)8415 kev_control(struct socket *so,
8416 u_long cmd,
8417 caddr_t data,
8418 __unused struct ifnet *ifp,
8419 __unused struct proc *p)
8420 {
8421 struct kev_request *kev_req = (struct kev_request *) data;
8422 struct kern_event_pcb *ev_pcb;
8423 struct kev_vendor_code *kev_vendor;
8424 u_int32_t *id_value = (u_int32_t *) data;
8425
8426 switch (cmd) {
8427 case SIOCGKEVID:
8428 *id_value = static_event_id;
8429 break;
8430 case SIOCSKEVFILT:
8431 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8432 ev_pcb->evp_vendor_code_filter = kev_req->vendor_code;
8433 ev_pcb->evp_class_filter = kev_req->kev_class;
8434 ev_pcb->evp_subclass_filter = kev_req->kev_subclass;
8435 break;
8436 case SIOCGKEVFILT:
8437 ev_pcb = (struct kern_event_pcb *) so->so_pcb;
8438 kev_req->vendor_code = ev_pcb->evp_vendor_code_filter;
8439 kev_req->kev_class = ev_pcb->evp_class_filter;
8440 kev_req->kev_subclass = ev_pcb->evp_subclass_filter;
8441 break;
8442 case SIOCGKEVVENDOR:
8443 kev_vendor = (struct kev_vendor_code *)data;
8444 /* Make sure string is NULL terminated */
8445 kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN - 1] = 0;
8446 return net_str_id_find_internal(kev_vendor->vendor_string,
8447 &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0);
8448 default:
8449 return ENOTSUP;
8450 }
8451
8452 return 0;
8453 }
8454
8455 int
8456 kevt_getstat SYSCTL_HANDLER_ARGS
8457 {
8458 #pragma unused(oidp, arg1, arg2)
8459 int error = 0;
8460
8461 lck_rw_lock_shared(&kev_rwlock);
8462
8463 if (req->newptr != USER_ADDR_NULL) {
8464 error = EPERM;
8465 goto done;
8466 }
8467 if (req->oldptr == USER_ADDR_NULL) {
8468 req->oldidx = sizeof(struct kevtstat);
8469 goto done;
8470 }
8471
8472 error = SYSCTL_OUT(req, &kevtstat,
8473 MIN(sizeof(struct kevtstat), req->oldlen));
8474 done:
8475 lck_rw_done(&kev_rwlock);
8476
8477 return error;
8478 }
8479
8480 __private_extern__ int
8481 kevt_pcblist SYSCTL_HANDLER_ARGS
8482 {
8483 #pragma unused(oidp, arg1, arg2)
8484 int error = 0;
8485 uint64_t n, i;
8486 struct xsystmgen xsg;
8487 void *buf = NULL;
8488 size_t item_size = ROUNDUP64(sizeof(struct xkevtpcb)) +
8489 ROUNDUP64(sizeof(struct xsocket_n)) +
8490 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
8491 ROUNDUP64(sizeof(struct xsockstat_n));
8492 struct kern_event_pcb *ev_pcb;
8493
8494 buf = kalloc_data(item_size, Z_WAITOK | Z_ZERO);
8495 if (buf == NULL) {
8496 return ENOMEM;
8497 }
8498
8499 lck_rw_lock_shared(&kev_rwlock);
8500
8501 n = kevtstat.kes_pcbcount;
8502
8503 if (req->oldptr == USER_ADDR_NULL) {
8504 req->oldidx = (size_t) ((n + n / 8) * item_size);
8505 goto done;
8506 }
8507 if (req->newptr != USER_ADDR_NULL) {
8508 error = EPERM;
8509 goto done;
8510 }
8511 bzero(&xsg, sizeof(xsg));
8512 xsg.xg_len = sizeof(xsg);
8513 xsg.xg_count = n;
8514 xsg.xg_gen = kevtstat.kes_gencnt;
8515 xsg.xg_sogen = so_gencnt;
8516 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
8517 if (error) {
8518 goto done;
8519 }
8520 /*
8521 * We are done if there is no pcb
8522 */
8523 if (n == 0) {
8524 goto done;
8525 }
8526
8527 i = 0;
8528 for (i = 0, ev_pcb = LIST_FIRST(&kern_event_head);
8529 i < n && ev_pcb != NULL;
8530 i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) {
8531 struct xkevtpcb *xk = (struct xkevtpcb *)buf;
8532 struct xsocket_n *xso = (struct xsocket_n *)
8533 ADVANCE64(xk, sizeof(*xk));
8534 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
8535 ADVANCE64(xso, sizeof(*xso));
8536 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
8537 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
8538 struct xsockstat_n *xsostats = (struct xsockstat_n *)
8539 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
8540
8541 bzero(buf, item_size);
8542
8543 lck_mtx_lock(&ev_pcb->evp_mtx);
8544
8545 xk->kep_len = sizeof(struct xkevtpcb);
8546 xk->kep_kind = XSO_EVT;
8547 xk->kep_evtpcb = (uint64_t)VM_KERNEL_ADDRPERM(ev_pcb);
8548 xk->kep_vendor_code_filter = ev_pcb->evp_vendor_code_filter;
8549 xk->kep_class_filter = ev_pcb->evp_class_filter;
8550 xk->kep_subclass_filter = ev_pcb->evp_subclass_filter;
8551
8552 sotoxsocket_n(ev_pcb->evp_socket, xso);
8553 sbtoxsockbuf_n(ev_pcb->evp_socket ?
8554 &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv);
8555 sbtoxsockbuf_n(ev_pcb->evp_socket ?
8556 &ev_pcb->evp_socket->so_snd : NULL, xsbsnd);
8557 sbtoxsockstat_n(ev_pcb->evp_socket, xsostats);
8558
8559 lck_mtx_unlock(&ev_pcb->evp_mtx);
8560
8561 error = SYSCTL_OUT(req, buf, item_size);
8562 }
8563
8564 if (error == 0) {
8565 /*
8566 * Give the user an updated idea of our state.
8567 * If the generation differs from what we told
8568 * her before, she knows that something happened
8569 * while we were processing this request, and it
8570 * might be necessary to retry.
8571 */
8572 bzero(&xsg, sizeof(xsg));
8573 xsg.xg_len = sizeof(xsg);
8574 xsg.xg_count = n;
8575 xsg.xg_gen = kevtstat.kes_gencnt;
8576 xsg.xg_sogen = so_gencnt;
8577 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
8578 if (error) {
8579 goto done;
8580 }
8581 }
8582
8583 done:
8584 lck_rw_done(&kev_rwlock);
8585
8586 kfree_data(buf, item_size);
8587 return error;
8588 }
8589
8590 #endif /* SOCKETS */
8591
8592
8593 int
fill_kqueueinfo(kqueue_t kqu,struct kqueue_info * kinfo)8594 fill_kqueueinfo(kqueue_t kqu, struct kqueue_info * kinfo)
8595 {
8596 struct vinfo_stat * st;
8597
8598 st = &kinfo->kq_stat;
8599
8600 st->vst_size = kqu.kq->kq_count;
8601 if (kqu.kq->kq_state & KQ_KEV_QOS) {
8602 st->vst_blksize = sizeof(struct kevent_qos_s);
8603 } else if (kqu.kq->kq_state & KQ_KEV64) {
8604 st->vst_blksize = sizeof(struct kevent64_s);
8605 } else {
8606 st->vst_blksize = sizeof(struct kevent);
8607 }
8608 st->vst_mode = S_IFIFO;
8609 st->vst_ino = (kqu.kq->kq_state & KQ_DYNAMIC) ?
8610 kqu.kqwl->kqwl_dynamicid : 0;
8611
8612 /* flags exported to libproc as PROC_KQUEUE_* (sys/proc_info.h) */
8613 #define PROC_KQUEUE_MASK (KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS|KQ_WORKQ|KQ_WORKLOOP)
8614 static_assert(PROC_KQUEUE_SLEEP == KQ_SLEEP);
8615 static_assert(PROC_KQUEUE_32 == KQ_KEV32);
8616 static_assert(PROC_KQUEUE_64 == KQ_KEV64);
8617 static_assert(PROC_KQUEUE_QOS == KQ_KEV_QOS);
8618 static_assert(PROC_KQUEUE_WORKQ == KQ_WORKQ);
8619 static_assert(PROC_KQUEUE_WORKLOOP == KQ_WORKLOOP);
8620 kinfo->kq_state = kqu.kq->kq_state & PROC_KQUEUE_MASK;
8621 if ((kqu.kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0) {
8622 if (kqu.kqf->kqf_sel.si_flags & SI_RECORDED) {
8623 kinfo->kq_state |= PROC_KQUEUE_SELECT;
8624 }
8625 }
8626
8627 return 0;
8628 }
8629
8630 static int
fill_kqueue_dyninfo(struct kqworkloop * kqwl,struct kqueue_dyninfo * kqdi)8631 fill_kqueue_dyninfo(struct kqworkloop *kqwl, struct kqueue_dyninfo *kqdi)
8632 {
8633 workq_threadreq_t kqr = &kqwl->kqwl_request;
8634 workq_threadreq_param_t trp = {};
8635 int err;
8636
8637 if ((kqwl->kqwl_state & KQ_WORKLOOP) == 0) {
8638 return EINVAL;
8639 }
8640
8641 if ((err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi->kqdi_info))) {
8642 return err;
8643 }
8644
8645 kqlock(kqwl);
8646
8647 kqdi->kqdi_servicer = thread_tid(kqr_thread(kqr));
8648 kqdi->kqdi_owner = thread_tid(kqwl->kqwl_owner);
8649 kqdi->kqdi_request_state = kqr->tr_state;
8650 kqdi->kqdi_async_qos = kqr->tr_kq_qos_index;
8651 kqdi->kqdi_events_qos = kqr->tr_kq_override_index;
8652 kqdi->kqdi_sync_waiters = 0;
8653 kqdi->kqdi_sync_waiter_qos = 0;
8654
8655 trp.trp_value = kqwl->kqwl_params;
8656 if (trp.trp_flags & TRP_PRIORITY) {
8657 kqdi->kqdi_pri = trp.trp_pri;
8658 } else {
8659 kqdi->kqdi_pri = 0;
8660 }
8661
8662 if (trp.trp_flags & TRP_POLICY) {
8663 kqdi->kqdi_pol = trp.trp_pol;
8664 } else {
8665 kqdi->kqdi_pol = 0;
8666 }
8667
8668 if (trp.trp_flags & TRP_CPUPERCENT) {
8669 kqdi->kqdi_cpupercent = trp.trp_cpupercent;
8670 } else {
8671 kqdi->kqdi_cpupercent = 0;
8672 }
8673
8674 kqunlock(kqwl);
8675
8676 return 0;
8677 }
8678
8679
8680 static unsigned long
kevent_extinfo_emit(struct kqueue * kq,struct knote * kn,struct kevent_extinfo * buf,unsigned long buflen,unsigned long nknotes)8681 kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf,
8682 unsigned long buflen, unsigned long nknotes)
8683 {
8684 for (; kn; kn = SLIST_NEXT(kn, kn_link)) {
8685 if (kq == knote_get_kq(kn)) {
8686 if (nknotes < buflen) {
8687 struct kevent_extinfo *info = &buf[nknotes];
8688
8689 kqlock(kq);
8690
8691 if (knote_fops(kn)->f_sanitized_copyout) {
8692 knote_fops(kn)->f_sanitized_copyout(kn, &info->kqext_kev);
8693 } else {
8694 info->kqext_kev = *(struct kevent_qos_s *)&kn->kn_kevent;
8695 }
8696
8697 if (knote_has_qos(kn)) {
8698 info->kqext_kev.qos =
8699 _pthread_priority_thread_qos_fast(kn->kn_qos);
8700 } else {
8701 info->kqext_kev.qos = kn->kn_qos_override;
8702 }
8703 info->kqext_kev.filter |= 0xff00; /* sign extend filter */
8704 info->kqext_kev.xflags = 0; /* this is where sfflags lives */
8705 info->kqext_kev.data = 0; /* this is where sdata lives */
8706 info->kqext_sdata = kn->kn_sdata;
8707 info->kqext_status = kn->kn_status;
8708 info->kqext_sfflags = kn->kn_sfflags;
8709
8710 kqunlock(kq);
8711 }
8712
8713 /* we return total number of knotes, which may be more than requested */
8714 nknotes++;
8715 }
8716 }
8717
8718 return nknotes;
8719 }
8720
8721 int
kevent_copyout_proc_dynkqids(void * proc,user_addr_t ubuf,uint32_t ubufsize,int32_t * nkqueues_out)8722 kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize,
8723 int32_t *nkqueues_out)
8724 {
8725 proc_t p = (proc_t)proc;
8726 struct filedesc *fdp = &p->p_fd;
8727 unsigned int nkqueues = 0;
8728 unsigned long ubuflen = ubufsize / sizeof(kqueue_id_t);
8729 size_t buflen, bufsize;
8730 kqueue_id_t *kq_ids = NULL;
8731 int err = 0;
8732
8733 assert(p != NULL);
8734
8735 if (ubuf == USER_ADDR_NULL && ubufsize != 0) {
8736 err = EINVAL;
8737 goto out;
8738 }
8739
8740 buflen = MIN(ubuflen, PROC_PIDDYNKQUEUES_MAX);
8741
8742 if (ubuflen != 0) {
8743 if (os_mul_overflow(sizeof(kqueue_id_t), buflen, &bufsize)) {
8744 err = ERANGE;
8745 goto out;
8746 }
8747 kq_ids = (kqueue_id_t *)kalloc_data(bufsize, Z_WAITOK | Z_ZERO);
8748 if (!kq_ids) {
8749 err = ENOMEM;
8750 goto out;
8751 }
8752 }
8753
8754 kqhash_lock(fdp);
8755
8756 if (fdp->fd_kqhashmask > 0) {
8757 for (uint32_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
8758 struct kqworkloop *kqwl;
8759
8760 LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
8761 /* report the number of kqueues, even if they don't all fit */
8762 if (nkqueues < buflen) {
8763 kq_ids[nkqueues] = kqwl->kqwl_dynamicid;
8764 }
8765 nkqueues++;
8766 }
8767 }
8768 }
8769
8770 kqhash_unlock(fdp);
8771
8772 if (kq_ids) {
8773 size_t copysize;
8774 if (os_mul_overflow(sizeof(kqueue_id_t), MIN(buflen, nkqueues), ©size)) {
8775 err = ERANGE;
8776 goto out;
8777 }
8778
8779 assert(ubufsize >= copysize);
8780 err = copyout(kq_ids, ubuf, copysize);
8781 }
8782
8783 out:
8784 if (kq_ids) {
8785 kfree_data(kq_ids, bufsize);
8786 }
8787
8788 if (!err) {
8789 *nkqueues_out = (int)min(nkqueues, PROC_PIDDYNKQUEUES_MAX);
8790 }
8791 return err;
8792 }
8793
8794 int
kevent_copyout_dynkqinfo(void * proc,kqueue_id_t kq_id,user_addr_t ubuf,uint32_t ubufsize,int32_t * size_out)8795 kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
8796 uint32_t ubufsize, int32_t *size_out)
8797 {
8798 proc_t p = (proc_t)proc;
8799 struct kqworkloop *kqwl;
8800 int err = 0;
8801 struct kqueue_dyninfo kqdi = { };
8802
8803 assert(p != NULL);
8804
8805 if (ubufsize < sizeof(struct kqueue_info)) {
8806 return ENOBUFS;
8807 }
8808
8809 kqwl = kqworkloop_hash_lookup_and_retain(&p->p_fd, kq_id);
8810 if (!kqwl) {
8811 return ESRCH;
8812 }
8813
8814 /*
8815 * backward compatibility: allow the argument to this call to only be
8816 * a struct kqueue_info
8817 */
8818 if (ubufsize >= sizeof(struct kqueue_dyninfo)) {
8819 ubufsize = sizeof(struct kqueue_dyninfo);
8820 err = fill_kqueue_dyninfo(kqwl, &kqdi);
8821 } else {
8822 ubufsize = sizeof(struct kqueue_info);
8823 err = fill_kqueueinfo(&kqwl->kqwl_kqueue, &kqdi.kqdi_info);
8824 }
8825 if (err == 0 && (err = copyout(&kqdi, ubuf, ubufsize)) == 0) {
8826 *size_out = ubufsize;
8827 }
8828 kqworkloop_release(kqwl);
8829 return err;
8830 }
8831
8832 int
kevent_copyout_dynkqextinfo(void * proc,kqueue_id_t kq_id,user_addr_t ubuf,uint32_t ubufsize,int32_t * nknotes_out)8833 kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf,
8834 uint32_t ubufsize, int32_t *nknotes_out)
8835 {
8836 proc_t p = (proc_t)proc;
8837 struct kqworkloop *kqwl;
8838 int err;
8839
8840 kqwl = kqworkloop_hash_lookup_and_retain(&p->p_fd, kq_id);
8841 if (!kqwl) {
8842 return ESRCH;
8843 }
8844
8845 err = pid_kqueue_extinfo(p, &kqwl->kqwl_kqueue, ubuf, ubufsize, nknotes_out);
8846 kqworkloop_release(kqwl);
8847 return err;
8848 }
8849
8850 int
pid_kqueue_extinfo(proc_t p,struct kqueue * kq,user_addr_t ubuf,uint32_t bufsize,int32_t * retval)8851 pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf,
8852 uint32_t bufsize, int32_t *retval)
8853 {
8854 struct knote *kn;
8855 int i;
8856 int err = 0;
8857 struct filedesc *fdp = &p->p_fd;
8858 unsigned long nknotes = 0;
8859 unsigned long buflen = bufsize / sizeof(struct kevent_extinfo);
8860 struct kevent_extinfo *kqext = NULL;
8861
8862 /* arbitrary upper limit to cap kernel memory usage, copyout size, etc. */
8863 buflen = MIN(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX);
8864
8865 kqext = (struct kevent_extinfo *)kalloc_data(buflen * sizeof(struct kevent_extinfo), Z_WAITOK | Z_ZERO);
8866 if (kqext == NULL) {
8867 err = ENOMEM;
8868 goto out;
8869 }
8870
8871 proc_fdlock(p);
8872 for (i = 0; i < fdp->fd_knlistsize; i++) {
8873 kn = SLIST_FIRST(&fdp->fd_knlist[i]);
8874 nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
8875 }
8876 proc_fdunlock(p);
8877
8878 if (fdp->fd_knhashmask != 0) {
8879 for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) {
8880 knhash_lock(fdp);
8881 kn = SLIST_FIRST(&fdp->fd_knhash[i]);
8882 nknotes = kevent_extinfo_emit(kq, kn, kqext, buflen, nknotes);
8883 knhash_unlock(fdp);
8884 }
8885 }
8886
8887 assert(bufsize >= sizeof(struct kevent_extinfo) * MIN(buflen, nknotes));
8888 err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * MIN(buflen, nknotes));
8889
8890 out:
8891 kfree_data(kqext, buflen * sizeof(struct kevent_extinfo));
8892
8893 if (!err) {
8894 *retval = (int32_t)MIN(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX);
8895 }
8896 return err;
8897 }
8898
8899 static unsigned int
klist_copy_udata(struct klist * list,uint64_t * buf,unsigned int buflen,unsigned int nknotes)8900 klist_copy_udata(struct klist *list, uint64_t *buf,
8901 unsigned int buflen, unsigned int nknotes)
8902 {
8903 struct knote *kn;
8904 SLIST_FOREACH(kn, list, kn_link) {
8905 if (nknotes < buflen) {
8906 /*
8907 * kevent_register will always set kn_udata atomically
8908 * so that we don't have to take any kqlock here.
8909 */
8910 buf[nknotes] = os_atomic_load_wide(&kn->kn_udata, relaxed);
8911 }
8912 /* we return total number of knotes, which may be more than requested */
8913 nknotes++;
8914 }
8915
8916 return nknotes;
8917 }
8918
8919 int
kevent_proc_copy_uptrs(void * proc,uint64_t * buf,uint32_t bufsize)8920 kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize)
8921 {
8922 proc_t p = (proc_t)proc;
8923 struct filedesc *fdp = &p->p_fd;
8924 unsigned int nuptrs = 0;
8925 unsigned int buflen = bufsize / sizeof(uint64_t);
8926 struct kqworkloop *kqwl;
8927
8928 if (buflen > 0) {
8929 assert(buf != NULL);
8930 }
8931
8932 proc_fdlock(p);
8933 for (int i = 0; i < fdp->fd_knlistsize; i++) {
8934 nuptrs = klist_copy_udata(&fdp->fd_knlist[i], buf, buflen, nuptrs);
8935 }
8936 proc_fdunlock(p);
8937
8938 knhash_lock(fdp);
8939 if (fdp->fd_knhashmask != 0) {
8940 for (size_t i = 0; i < fdp->fd_knhashmask + 1; i++) {
8941 nuptrs = klist_copy_udata(&fdp->fd_knhash[i], buf, buflen, nuptrs);
8942 }
8943 }
8944 knhash_unlock(fdp);
8945
8946 kqhash_lock(fdp);
8947 if (fdp->fd_kqhashmask != 0) {
8948 for (size_t i = 0; i < fdp->fd_kqhashmask + 1; i++) {
8949 LIST_FOREACH(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink) {
8950 if (nuptrs < buflen) {
8951 buf[nuptrs] = kqwl->kqwl_dynamicid;
8952 }
8953 nuptrs++;
8954 }
8955 }
8956 }
8957 kqhash_unlock(fdp);
8958
8959 return (int)nuptrs;
8960 }
8961
8962 static void
kevent_set_return_to_kernel_user_tsd(proc_t p,thread_t thread)8963 kevent_set_return_to_kernel_user_tsd(proc_t p, thread_t thread)
8964 {
8965 uint64_t ast_addr;
8966 bool proc_is_64bit = !!(p->p_flag & P_LP64);
8967 size_t user_addr_size = proc_is_64bit ? 8 : 4;
8968 uint32_t ast_flags32 = 0;
8969 uint64_t ast_flags64 = 0;
8970 struct uthread *ut = get_bsdthread_info(thread);
8971
8972 if (ut->uu_kqr_bound != NULL) {
8973 ast_flags64 |= R2K_WORKLOOP_PENDING_EVENTS;
8974 }
8975
8976 if (ast_flags64 == 0) {
8977 return;
8978 }
8979
8980 if (!(p->p_flag & P_LP64)) {
8981 ast_flags32 = (uint32_t)ast_flags64;
8982 assert(ast_flags64 < 0x100000000ull);
8983 }
8984
8985 ast_addr = thread_rettokern_addr(thread);
8986 if (ast_addr == 0) {
8987 return;
8988 }
8989
8990 if (copyout((proc_is_64bit ? (void *)&ast_flags64 : (void *)&ast_flags32),
8991 (user_addr_t)ast_addr,
8992 user_addr_size) != 0) {
8993 printf("pid %d (tid:%llu): copyout of return_to_kernel ast flags failed with "
8994 "ast_addr = %llu\n", proc_getpid(p), thread_tid(current_thread()), ast_addr);
8995 }
8996 }
8997
8998 /*
8999 * Semantics of writing to TSD value:
9000 *
9001 * 1. It is written to by the kernel and cleared by userspace.
9002 * 2. When the userspace code clears the TSD field, it takes responsibility for
9003 * taking action on the quantum expiry action conveyed by kernel.
9004 * 3. The TSD value is always cleared upon entry into userspace and upon exit of
9005 * userspace back to kernel to make sure that it is never leaked across thread
9006 * requests.
9007 */
9008 void
kevent_set_workq_quantum_expiry_user_tsd(proc_t p,thread_t thread,uint64_t flags)9009 kevent_set_workq_quantum_expiry_user_tsd(proc_t p, thread_t thread,
9010 uint64_t flags)
9011 {
9012 uint64_t ast_addr;
9013 bool proc_is_64bit = !!(p->p_flag & P_LP64);
9014 uint32_t ast_flags32 = 0;
9015 uint64_t ast_flags64 = flags;
9016
9017 if (ast_flags64 == 0) {
9018 return;
9019 }
9020
9021 if (!(p->p_flag & P_LP64)) {
9022 ast_flags32 = (uint32_t)ast_flags64;
9023 assert(ast_flags64 < 0x100000000ull);
9024 }
9025
9026 ast_addr = thread_wqquantum_addr(thread);
9027 assert(ast_addr != 0);
9028
9029 if (proc_is_64bit) {
9030 if (copyout_atomic64(ast_flags64, (user_addr_t) ast_addr)) {
9031 #if DEBUG || DEVELOPMENT
9032 printf("pid %d (tid:%llu): copyout of workq quantum ast flags failed with "
9033 "ast_addr = %llu\n", proc_getpid(p), thread_tid(thread), ast_addr);
9034 #endif
9035 }
9036 } else {
9037 if (copyout_atomic32(ast_flags32, (user_addr_t) ast_addr)) {
9038 #if DEBUG || DEVELOPMENT
9039 printf("pid %d (tid:%llu): copyout of workq quantum ast flags failed with "
9040 "ast_addr = %llu\n", proc_getpid(p), thread_tid(thread), ast_addr);
9041 #endif
9042 }
9043 }
9044 }
9045
9046 void
kevent_ast(thread_t thread,uint16_t bits)9047 kevent_ast(thread_t thread, uint16_t bits)
9048 {
9049 proc_t p = current_proc();
9050
9051
9052 if (bits & AST_KEVENT_REDRIVE_THREADREQ) {
9053 workq_kern_threadreq_redrive(p, WORKQ_THREADREQ_CAN_CREATE_THREADS);
9054 }
9055 if (bits & AST_KEVENT_RETURN_TO_KERNEL) {
9056 kevent_set_return_to_kernel_user_tsd(p, thread);
9057 }
9058
9059 if (bits & AST_KEVENT_WORKQ_QUANTUM_EXPIRED) {
9060 workq_kern_quantum_expiry_reevaluate(p, thread);
9061 }
9062 }
9063
9064 #if DEVELOPMENT || DEBUG
9065
9066 #define KEVENT_SYSCTL_BOUND_ID 1
9067
9068 static int
9069 kevent_sysctl SYSCTL_HANDLER_ARGS
9070 {
9071 #pragma unused(oidp, arg2)
9072 uintptr_t type = (uintptr_t)arg1;
9073 uint64_t bound_id = 0;
9074
9075 if (type != KEVENT_SYSCTL_BOUND_ID) {
9076 return EINVAL;
9077 }
9078
9079 if (req->newptr) {
9080 return EINVAL;
9081 }
9082
9083 struct uthread *ut = current_uthread();
9084 if (!ut) {
9085 return EFAULT;
9086 }
9087
9088 workq_threadreq_t kqr = ut->uu_kqr_bound;
9089 if (kqr) {
9090 if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
9091 bound_id = kqr_kqworkloop(kqr)->kqwl_dynamicid;
9092 } else {
9093 bound_id = -1;
9094 }
9095 }
9096
9097 return sysctl_io_number(req, bound_id, sizeof(bound_id), NULL, NULL);
9098 }
9099
9100 SYSCTL_NODE(_kern, OID_AUTO, kevent, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
9101 "kevent information");
9102
9103 SYSCTL_PROC(_kern_kevent, OID_AUTO, bound_id,
9104 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
9105 (void *)KEVENT_SYSCTL_BOUND_ID,
9106 sizeof(kqueue_id_t), kevent_sysctl, "Q",
9107 "get the ID of the bound kqueue");
9108
9109 #endif /* DEVELOPMENT || DEBUG */
9110