1 /*
2 * Copyright (c) 2016-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 1999 Michael Smith <[email protected]>
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 */
53
54 #include <sys/cdefs.h>
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <kern/queue.h>
58 #include <kern/locks.h>
59 #include <sys/malloc.h>
60 #include <sys/proc.h>
61 #include <sys/systm.h>
62 #include <sys/eventhandler.h>
63 #include <sys/sysctl.h>
64 #include <sys/mcache.h> /* for VERIFY() */
65 #include <os/log.h>
66
67 int evh_debug = 0;
68
69 SYSCTL_NODE(_kern, OID_AUTO, eventhandler, CTLFLAG_RW | CTLFLAG_LOCKED,
70 0, "Eventhandler");
71 SYSCTL_INT(_kern_eventhandler, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
72 &evh_debug, 0, "Eventhandler debug mode");
73
74 struct eventhandler_entry_arg eventhandler_entry_dummy_arg = { .ee_fm_uuid = { 0 }, .ee_fr_uuid = { 0 } };
75
76 /* List of 'slow' lists */
77 static struct eventhandler_lists_ctxt evthdlr_lists_ctxt_glb;
78 static LCK_GRP_DECLARE(eventhandler_mutex_grp, "eventhandler");
79
80 LCK_GRP_DECLARE(el_lock_grp, "eventhandler list");
81 LCK_ATTR_DECLARE(el_lock_attr, 0, 0);
82
83 struct eventhandler_entry_generic {
84 struct eventhandler_entry ee;
85 void *func;
86 };
87
88 static struct eventhandler_list *_eventhandler_find_list(
89 struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, const char *name);
90
91 void
eventhandler_lists_ctxt_init(struct eventhandler_lists_ctxt * evthdlr_lists_ctxt)92 eventhandler_lists_ctxt_init(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt)
93 {
94 VERIFY(evthdlr_lists_ctxt != NULL);
95
96 TAILQ_INIT(&evthdlr_lists_ctxt->eventhandler_lists);
97 evthdlr_lists_ctxt->eventhandler_lists_initted = 1;
98 lck_mtx_init(&evthdlr_lists_ctxt->eventhandler_mutex,
99 &eventhandler_mutex_grp, LCK_ATTR_NULL);
100 }
101
102 /*
103 * Initialize the eventhandler list.
104 */
105 void
eventhandler_init(void)106 eventhandler_init(void)
107 {
108 evhlog(debug, "%s: init", __func__);
109 eventhandler_lists_ctxt_init(&evthdlr_lists_ctxt_glb);
110 }
111
112 /*
113 * Insertion is O(n) due to the priority scan, but optimises to O(1)
114 * if all priorities are identical.
115 */
116 static eventhandler_tag
eventhandler_register_internal(struct eventhandler_lists_ctxt * evthdlr_lists_ctxt,struct eventhandler_list * list,const char * name,eventhandler_tag epn)117 eventhandler_register_internal(
118 struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
119 struct eventhandler_list *list,
120 const char *name, eventhandler_tag epn)
121 {
122 struct eventhandler_list *__single new_list;
123 struct eventhandler_entry *__single ep;
124
125 VERIFY(strlen(name) <= (sizeof(new_list->el_name) - 1));
126
127 if (evthdlr_lists_ctxt == NULL) {
128 evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb;
129 }
130
131 VERIFY(evthdlr_lists_ctxt->eventhandler_lists_initted); /* eventhandler registered too early */
132 VERIFY(epn != NULL); /* cannot register NULL event */
133
134 evhlog(debug, "%s: registering event_type=%s\n", __func__, name);
135
136 /* lock the eventhandler lists */
137 lck_mtx_lock_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
138
139 /* Do we need to find/create the (slow) list? */
140 if (list == NULL) {
141 /* look for a matching, existing list */
142 list = _eventhandler_find_list(evthdlr_lists_ctxt, name);
143
144 /* Do we need to create the list? */
145 if (list == NULL) {
146 lck_mtx_convert_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
147 new_list = kalloc_type(struct eventhandler_list, Z_WAITOK_ZERO);
148 evhlog2(debug, "%s: creating list \"%s\"", __func__, name);
149 list = new_list;
150 list->el_flags = 0;
151 list->el_runcount = 0;
152 bzero(&list->el_lock, sizeof(list->el_lock));
153 (void) snprintf(list->el_name, sizeof(list->el_name), "%s", name);
154 TAILQ_INSERT_HEAD(&evthdlr_lists_ctxt->eventhandler_lists, list, el_link);
155 }
156 }
157 if (!(list->el_flags & EHL_INITTED)) {
158 TAILQ_INIT(&list->el_entries);
159 EHL_LOCK_INIT(list);
160 list->el_flags |= EHL_INITTED;
161 }
162 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
163
164 KASSERT(epn->ee_priority != EHE_DEAD_PRIORITY,
165 ("%s: handler for %s registered with dead priority", __func__, name));
166
167 /* sort it into the list */
168 evhlog2(debug, "%s: adding item %p (function %p to \"%s\"", __func__, (void *)VM_KERNEL_ADDRPERM(epn),
169 (void *)VM_KERNEL_UNSLIDE(((struct eventhandler_entry_generic *)epn)->func), name);
170 EHL_LOCK(list);
171 TAILQ_FOREACH(ep, &list->el_entries, ee_link) {
172 if (ep->ee_priority != EHE_DEAD_PRIORITY &&
173 epn->ee_priority < ep->ee_priority) {
174 TAILQ_INSERT_BEFORE(ep, epn, ee_link);
175 break;
176 }
177 }
178 if (ep == NULL) {
179 TAILQ_INSERT_TAIL(&list->el_entries, epn, ee_link);
180 }
181 EHL_UNLOCK(list);
182 return epn;
183 }
184
185 eventhandler_tag
eventhandler_register(struct eventhandler_lists_ctxt * evthdlr_lists_ctxt,struct eventhandler_list * list,const char * name,void * func,struct eventhandler_entry_arg arg,int priority)186 eventhandler_register(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
187 struct eventhandler_list *list, const char *name,
188 void *func, struct eventhandler_entry_arg arg, int priority)
189 {
190 struct eventhandler_entry_generic *__single eg;
191
192 /* allocate an entry for this handler, populate it */
193 eg = kalloc_type(struct eventhandler_entry_generic, Z_WAITOK_ZERO);
194 eg->func = func;
195 eg->ee.ee_arg = arg;
196 eg->ee.ee_priority = priority;
197
198 return eventhandler_register_internal(evthdlr_lists_ctxt, list, name, &eg->ee);
199 }
200
201 void
eventhandler_deregister(struct eventhandler_list * list,eventhandler_tag tag)202 eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag)
203 {
204 struct eventhandler_entry *__single ep = tag;
205
206 EHL_LOCK_ASSERT(list, LCK_MTX_ASSERT_OWNED);
207 if (ep != NULL) {
208 /* remove just this entry */
209 if (list->el_runcount == 0) {
210 evhlog2(debug, "%s: removing item %p from \"%s\"", __func__, (void *)VM_KERNEL_ADDRPERM(ep),
211 list->el_name);
212 /*
213 * We may have purged the list because of certain events.
214 * Make sure that is not the case when a specific entry
215 * is being removed.
216 */
217 if (!TAILQ_EMPTY(&list->el_entries)) {
218 TAILQ_REMOVE(&list->el_entries, ep, ee_link);
219 }
220 EHL_LOCK_CONVERT(list);
221 kfree_type(struct eventhandler_entry, ep);
222 } else {
223 evhlog2(debug, "%s: marking item %p from \"%s\" as dead", __func__,
224 (void *)VM_KERNEL_ADDRPERM(ep), list->el_name);
225 ep->ee_priority = EHE_DEAD_PRIORITY;
226 }
227 } else {
228 /* remove entire list */
229 if (list->el_runcount == 0) {
230 evhlog2(debug, "%s: removing all items from \"%s\"", __func__,
231 list->el_name);
232 EHL_LOCK_CONVERT(list);
233 while (!TAILQ_EMPTY(&list->el_entries)) {
234 ep = TAILQ_FIRST(&list->el_entries);
235 TAILQ_REMOVE(&list->el_entries, ep, ee_link);
236 kfree_type(struct eventhandler_entry, ep);
237 }
238 } else {
239 evhlog2(debug, "%s: marking all items from \"%s\" as dead",
240 __func__, list->el_name);
241 TAILQ_FOREACH(ep, &list->el_entries, ee_link)
242 ep->ee_priority = EHE_DEAD_PRIORITY;
243 }
244 }
245 while (list->el_runcount > 0) {
246 msleep((caddr_t)list, &list->el_lock, PSPIN, "evhrm", 0);
247 }
248 EHL_UNLOCK(list);
249 }
250
251 /*
252 * Internal version for use when eventhandler list is already locked.
253 */
254 static struct eventhandler_list *
_eventhandler_find_list(struct eventhandler_lists_ctxt * evthdlr_lists_ctxt,const char * name)255 _eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
256 const char *name)
257 {
258 struct eventhandler_list *__single list;
259
260 VERIFY(evthdlr_lists_ctxt != NULL);
261
262 LCK_MTX_ASSERT(&evthdlr_lists_ctxt->eventhandler_mutex, LCK_MTX_ASSERT_OWNED);
263 TAILQ_FOREACH(list, &evthdlr_lists_ctxt->eventhandler_lists, el_link) {
264 if (!strlcmp(list->el_name, name, EVENTHANDLER_MAX_NAME)) {
265 break;
266 }
267 }
268 return list;
269 }
270
271 /*
272 * Lookup a "slow" list by name. Returns with the list locked.
273 */
274 struct eventhandler_list *
eventhandler_find_list(struct eventhandler_lists_ctxt * evthdlr_lists_ctxt,const char * name)275 eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
276 const char *name)
277 {
278 struct eventhandler_list *__single list;
279
280 if (evthdlr_lists_ctxt == NULL) {
281 evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb;
282 }
283
284 if (!evthdlr_lists_ctxt->eventhandler_lists_initted) {
285 return NULL;
286 }
287
288 /* scan looking for the requested list */
289 lck_mtx_lock_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
290 list = _eventhandler_find_list(evthdlr_lists_ctxt, name);
291 if (list != NULL) {
292 lck_mtx_convert_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
293 EHL_LOCK_SPIN(list);
294 }
295 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
296
297 return list;
298 }
299
300 /*
301 * Prune "dead" entries from an eventhandler list.
302 */
303 void
eventhandler_prune_list(struct eventhandler_list * list)304 eventhandler_prune_list(struct eventhandler_list *list)
305 {
306 struct eventhandler_entry *__single ep, *__single en;
307
308 int pruned = 0;
309
310 evhlog2(debug, "%s: pruning list \"%s\"", __func__, list->el_name);
311 EHL_LOCK_ASSERT(list, LCK_MTX_ASSERT_OWNED);
312 TAILQ_FOREACH_SAFE(ep, &list->el_entries, ee_link, en) {
313 if (ep->ee_priority == EHE_DEAD_PRIORITY) {
314 TAILQ_REMOVE(&list->el_entries, ep, ee_link);
315 kfree_type(struct eventhandler_entry, ep);
316 pruned++;
317 }
318 }
319 if (pruned > 0) {
320 wakeup(list);
321 }
322 }
323
324 /*
325 * This should be called when last reference to an object
326 * is being released.
327 * The individual event type lists must be purged when the object
328 * becomes defunct.
329 */
330 void
eventhandler_lists_ctxt_destroy(struct eventhandler_lists_ctxt * evthdlr_lists_ctxt)331 eventhandler_lists_ctxt_destroy(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt)
332 {
333 struct eventhandler_list *__single list = NULL;
334 struct eventhandler_list *__single list_next = NULL;
335
336 lck_mtx_lock(&evthdlr_lists_ctxt->eventhandler_mutex);
337 TAILQ_FOREACH_SAFE(list, &evthdlr_lists_ctxt->eventhandler_lists,
338 el_link, list_next) {
339 VERIFY(TAILQ_EMPTY(&list->el_entries));
340 EHL_LOCK_DESTROY(list);
341 kfree_type(struct eventhandler_list, list);
342 }
343 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
344 lck_mtx_destroy(&evthdlr_lists_ctxt->eventhandler_mutex,
345 &eventhandler_mutex_grp);
346 return;
347 }
348