1 /*
2 * Copyright (c) 2003-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
37 #include <sys/proc.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
42 #include <net/if.h>
43 #include <net/net_api_stats.h>
44 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
45 #include <skywalk/lib/net_filter_event.h>
46 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
47 #include <netinet/in_var.h>
48 #include <netinet/ip.h>
49 #include <netinet/ip_var.h>
50 #include <netinet/tcp.h>
51 #include <netinet/tcp_var.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54
55 #include <libkern/libkern.h>
56 #include <libkern/OSAtomic.h>
57
58 #include <libkern/sysctl.h>
59 #include <libkern/OSDebug.h>
60
61 #include <os/refcnt.h>
62
63 #include <stdbool.h>
64 #include <string.h>
65
66 #if SKYWALK
67 #include <skywalk/core/skywalk_var.h>
68 #endif /* SKYWALK */
69
70 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
71 #define SFEF_NODETACH 0x2 /* Detach should not be called */
72 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
73
74 struct socket_filter_entry {
75 struct socket_filter_entry *sfe_next_onsocket;
76 struct socket_filter_entry *sfe_next_onfilter;
77 struct socket_filter_entry *sfe_next_oncleanup;
78
79 struct socket_filter *sfe_filter;
80 struct socket *sfe_socket;
81 void *sfe_cookie;
82
83 uint32_t sfe_flags;
84 int32_t sfe_refcount;
85 };
86
87 struct socket_filter {
88 TAILQ_ENTRY(socket_filter) sf_protosw_next;
89 TAILQ_ENTRY(socket_filter) sf_global_next;
90 struct socket_filter_entry *sf_entry_head;
91
92 struct protosw *sf_proto;
93 struct sflt_filter sf_filter;
94 struct os_refcnt sf_refcount;
95 uint32_t sf_flags;
96 };
97
98 #define SFF_INTERNAL 0x1
99
100 TAILQ_HEAD(socket_filter_list, socket_filter);
101
102 static LCK_GRP_DECLARE(sock_filter_lock_grp, "socket filter lock");
103 static LCK_RW_DECLARE(sock_filter_lock, &sock_filter_lock_grp);
104 static LCK_MTX_DECLARE(sock_filter_cleanup_lock, &sock_filter_lock_grp);
105
106 static struct socket_filter_list sock_filter_head =
107 TAILQ_HEAD_INITIALIZER(sock_filter_head);
108 static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
109 static thread_t sock_filter_cleanup_thread = NULL;
110
111 static void sflt_cleanup_thread(void *, wait_result_t);
112 static void sflt_detach_locked(struct socket_filter_entry *entry);
113
114 #undef sflt_register
115 static errno_t sflt_register_common(const struct sflt_filter *filter, int domain,
116 int type, int protocol, bool is_internal);
117 errno_t sflt_register(const struct sflt_filter *filter, int domain,
118 int type, int protocol);
119
120 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
121 static bool net_check_compatible_sfltr(void);
122 bool net_check_compatible_alf(void);
123 static bool net_check_compatible_parental_controls(void);
124 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
125
126 #pragma mark -- Internal State Management --
127
128 __private_extern__ int
sflt_permission_check(struct inpcb * inp)129 sflt_permission_check(struct inpcb *inp)
130 {
131 /* Only IPv4 or IPv6 sockets can bypass filters */
132 if (!(inp->inp_vflag & INP_IPV4) &&
133 !(inp->inp_vflag & INP_IPV6)) {
134 return 0;
135 }
136 /* Sockets that have this entitlement bypass socket filters. */
137 if (INP_INTCOPROC_ALLOWED(inp)) {
138 return 1;
139 }
140 /* Sockets bound to an intcoproc interface bypass socket filters. */
141 if ((inp->inp_flags & INP_BOUND_IF) &&
142 IFNET_IS_INTCOPROC(inp->inp_boundifp)) {
143 return 1;
144 }
145 #if NECP
146 /*
147 * Make sure that the NECP policy is populated.
148 * If result is not populated, the policy ID will be
149 * NECP_KERNEL_POLICY_ID_NONE. Note that if the result
150 * is populated, but there was no match, it will be
151 * NECP_KERNEL_POLICY_ID_NO_MATCH.
152 * Do not call inp_update_necp_policy() to avoid scoping
153 * a socket prior to calls to bind().
154 */
155 if (inp->inp_policyresult.policy_id == NECP_KERNEL_POLICY_ID_NONE) {
156 necp_socket_find_policy_match(inp, NULL, NULL, 0);
157 }
158
159 /* If the filter unit is marked to be "no filter", bypass filters */
160 if (inp->inp_policyresult.results.filter_control_unit ==
161 NECP_FILTER_UNIT_NO_FILTER) {
162 return 1;
163 }
164 #endif /* NECP */
165 return 0;
166 }
167
168 static void
sflt_retain_locked(struct socket_filter * filter)169 sflt_retain_locked(struct socket_filter *filter)
170 {
171 os_ref_retain_locked(&filter->sf_refcount);
172 }
173
174 static void
sflt_release_locked(struct socket_filter * filter)175 sflt_release_locked(struct socket_filter *filter)
176 {
177 if (os_ref_release_locked(&filter->sf_refcount) == 0) {
178 /* Call the unregistered function */
179 if (filter->sf_filter.sf_unregistered) {
180 lck_rw_unlock_exclusive(&sock_filter_lock);
181 filter->sf_filter.sf_unregistered(
182 filter->sf_filter.sf_handle);
183 lck_rw_lock_exclusive(&sock_filter_lock);
184 }
185
186 /* Free the entry */
187 kfree_type(struct socket_filter, filter);
188 }
189 }
190
191 static void
sflt_entry_retain(struct socket_filter_entry * entry)192 sflt_entry_retain(struct socket_filter_entry *entry)
193 {
194 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) {
195 panic("sflt_entry_retain - sfe_refcount <= 0");
196 /* NOTREACHED */
197 }
198 }
199
200 static void
sflt_entry_release(struct socket_filter_entry * entry)201 sflt_entry_release(struct socket_filter_entry *entry)
202 {
203 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
204 if (old == 1) {
205 /* That was the last reference */
206
207 /* Take the cleanup lock */
208 lck_mtx_lock(&sock_filter_cleanup_lock);
209
210 /* Put this item on the cleanup list */
211 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
212 sock_filter_cleanup_entries = entry;
213
214 /* If the item is the first item in the list */
215 if (entry->sfe_next_oncleanup == NULL) {
216 if (sock_filter_cleanup_thread == NULL) {
217 /* Create a thread */
218 kernel_thread_start(sflt_cleanup_thread,
219 NULL, &sock_filter_cleanup_thread);
220 } else {
221 /* Wakeup the thread */
222 wakeup(&sock_filter_cleanup_entries);
223 }
224 }
225
226 /* Drop the cleanup lock */
227 lck_mtx_unlock(&sock_filter_cleanup_lock);
228 } else if (old <= 0) {
229 panic("sflt_entry_release - sfe_refcount (%d) <= 0",
230 (int)old);
231 /* NOTREACHED */
232 }
233 }
234
235 __attribute__((noreturn))
236 static void
sflt_cleanup_thread(void * blah,wait_result_t blah2)237 sflt_cleanup_thread(void *blah, wait_result_t blah2)
238 {
239 #pragma unused(blah, blah2)
240 while (1) {
241 lck_mtx_lock(&sock_filter_cleanup_lock);
242 while (sock_filter_cleanup_entries == NULL) {
243 /* Sleep until we've got something better to do */
244 msleep(&sock_filter_cleanup_entries,
245 &sock_filter_cleanup_lock, PWAIT,
246 "sflt_cleanup", NULL);
247 }
248
249 /* Pull the current list of dead items */
250 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
251 sock_filter_cleanup_entries = NULL;
252
253 /* Drop the lock */
254 lck_mtx_unlock(&sock_filter_cleanup_lock);
255
256 /* Take the socket filter lock */
257 lck_rw_lock_exclusive(&sock_filter_lock);
258
259 /* Cleanup every dead item */
260 struct socket_filter_entry *entry;
261 for (entry = dead; entry; entry = dead) {
262 struct socket_filter_entry **nextpp;
263
264 dead = entry->sfe_next_oncleanup;
265
266 /* Call detach function if necessary - drop the lock */
267 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
268 entry->sfe_filter->sf_filter.sf_detach) {
269 entry->sfe_flags |= SFEF_NODETACH;
270 lck_rw_unlock_exclusive(&sock_filter_lock);
271
272 /*
273 * Warning - passing a potentially
274 * dead socket may be bad
275 */
276 entry->sfe_filter->sf_filter.sf_detach(
277 entry->sfe_cookie, entry->sfe_socket);
278
279 lck_rw_lock_exclusive(&sock_filter_lock);
280 }
281
282 /*
283 * Pull entry off the socket list --
284 * if the socket still exists
285 */
286 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
287 for (nextpp = &entry->sfe_socket->so_filt;
288 *nextpp;
289 nextpp = &(*nextpp)->sfe_next_onsocket) {
290 if (*nextpp == entry) {
291 *nextpp =
292 entry->sfe_next_onsocket;
293 break;
294 }
295 }
296 }
297
298 /* Pull entry off the filter list */
299 for (nextpp = &entry->sfe_filter->sf_entry_head;
300 *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) {
301 if (*nextpp == entry) {
302 *nextpp = entry->sfe_next_onfilter;
303 break;
304 }
305 }
306
307 /*
308 * Release the filter -- may drop lock, but that's okay
309 */
310 sflt_release_locked(entry->sfe_filter);
311 entry->sfe_socket = NULL;
312 entry->sfe_filter = NULL;
313 kfree_type(struct socket_filter_entry, entry);
314 }
315
316 /* Drop the socket filter lock */
317 lck_rw_unlock_exclusive(&sock_filter_lock);
318 }
319 /* NOTREACHED */
320 }
321
322 static int
sflt_attach_locked(struct socket * so,struct socket_filter * filter,int socklocked)323 sflt_attach_locked(struct socket *so, struct socket_filter *filter,
324 int socklocked)
325 {
326 int error = 0;
327 struct socket_filter_entry *entry = NULL;
328
329 if (sflt_permission_check(sotoinpcb(so))) {
330 return 0;
331 }
332
333 if (filter == NULL) {
334 return ENOENT;
335 }
336
337 for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) {
338 if (entry->sfe_filter->sf_filter.sf_handle ==
339 filter->sf_filter.sf_handle) {
340 return EEXIST;
341 }
342 }
343 /* allocate the socket filter entry */
344 entry = kalloc_type(struct socket_filter_entry, Z_WAITOK | Z_NOFAIL);
345
346 /* Initialize the socket filter entry */
347 entry->sfe_cookie = NULL;
348 entry->sfe_flags = SFEF_ATTACHED;
349 entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */
350
351 /* Put the entry in the filter list */
352 sflt_retain_locked(filter);
353 entry->sfe_filter = filter;
354 entry->sfe_next_onfilter = filter->sf_entry_head;
355 filter->sf_entry_head = entry;
356
357 /* Put the entry on the socket filter list */
358 entry->sfe_socket = so;
359 entry->sfe_next_onsocket = so->so_filt;
360 so->so_filt = entry;
361
362 if (entry->sfe_filter->sf_filter.sf_attach) {
363 /* Retain the entry while we call attach */
364 sflt_entry_retain(entry);
365
366 /*
367 * Release the filter lock --
368 * callers must be aware we will do this
369 */
370 lck_rw_unlock_exclusive(&sock_filter_lock);
371
372 /* Unlock the socket */
373 if (socklocked) {
374 socket_unlock(so, 0);
375 }
376
377 /* It's finally safe to call the filter function */
378 error = entry->sfe_filter->sf_filter.sf_attach(
379 &entry->sfe_cookie, so);
380
381 /* Lock the socket again */
382 if (socklocked) {
383 socket_lock(so, 0);
384 }
385
386 /* Lock the filters again */
387 lck_rw_lock_exclusive(&sock_filter_lock);
388
389 /*
390 * If the attach function returns an error,
391 * this filter must be detached
392 */
393 if (error) {
394 /* don't call sf_detach */
395 entry->sfe_flags |= SFEF_NODETACH;
396 sflt_detach_locked(entry);
397 }
398
399 /* Release the retain we held through the attach call */
400 sflt_entry_release(entry);
401 }
402
403 return error;
404 }
405
406 errno_t
sflt_attach_internal(socket_t socket,sflt_handle handle)407 sflt_attach_internal(socket_t socket, sflt_handle handle)
408 {
409 if (socket == NULL || handle == 0) {
410 return EINVAL;
411 }
412
413 int result = EINVAL;
414
415 lck_rw_lock_exclusive(&sock_filter_lock);
416
417 struct socket_filter *filter = NULL;
418 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
419 if (filter->sf_filter.sf_handle == handle) {
420 break;
421 }
422 }
423
424 if (filter) {
425 result = sflt_attach_locked(socket, filter, 1);
426 }
427
428 lck_rw_unlock_exclusive(&sock_filter_lock);
429
430 return result;
431 }
432
433 static void
sflt_detach_locked(struct socket_filter_entry * entry)434 sflt_detach_locked(struct socket_filter_entry *entry)
435 {
436 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
437 entry->sfe_flags &= ~SFEF_ATTACHED;
438 sflt_entry_release(entry);
439 }
440 }
441
442 #pragma mark -- Socket Layer Hooks --
443
444 __private_extern__ void
sflt_initsock(struct socket * so)445 sflt_initsock(struct socket *so)
446 {
447 /*
448 * Can only register socket filter for internet protocols
449 */
450 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
451 return;
452 }
453
454 /*
455 * Point to the real protosw, as so_proto might have been
456 * pointed to a modified version.
457 */
458 struct protosw *proto = so->so_proto->pr_protosw;
459
460 lck_rw_lock_shared(&sock_filter_lock);
461 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
462 /* Promote lock to exclusive */
463 if (!lck_rw_lock_shared_to_exclusive(&sock_filter_lock)) {
464 lck_rw_lock_exclusive(&sock_filter_lock);
465 }
466
467 /*
468 * Warning: A filter unregistering will be pulled out of
469 * the list. This could happen while we drop the lock in
470 * sftl_attach_locked or sflt_release_locked. For this
471 * reason we retain a reference on the filter (or next_filter)
472 * while calling this function. This protects us from a panic,
473 * but it could result in a socket being created without all
474 * of the global filters if we're attaching a filter as it
475 * is removed, if that's possible.
476 */
477 struct socket_filter *filter =
478 TAILQ_FIRST(&proto->pr_filter_head);
479
480 sflt_retain_locked(filter);
481
482 while (filter) {
483 struct socket_filter *filter_next;
484 /*
485 * Warning: sflt_attach_private_locked
486 * will drop the lock
487 */
488 sflt_attach_locked(so, filter, 0);
489
490 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
491 if (filter_next) {
492 sflt_retain_locked(filter_next);
493 }
494
495 /*
496 * Warning: filt_release_locked may remove
497 * the filter from the queue
498 */
499 sflt_release_locked(filter);
500 filter = filter_next;
501 }
502 }
503 lck_rw_done(&sock_filter_lock);
504 }
505
506 /*
507 * sflt_termsock
508 *
509 * Detaches all filters from the socket.
510 */
511 __private_extern__ void
sflt_termsock(struct socket * so)512 sflt_termsock(struct socket *so)
513 {
514 /*
515 * Fast path to avoid taking the lock
516 */
517 if (so->so_filt == NULL) {
518 return;
519 }
520
521 lck_rw_lock_exclusive(&sock_filter_lock);
522
523 struct socket_filter_entry *entry;
524
525 while ((entry = so->so_filt) != NULL) {
526 /* Pull filter off the socket */
527 so->so_filt = entry->sfe_next_onsocket;
528 entry->sfe_flags |= SFEF_NOSOCKET;
529
530 /* Call detach */
531 sflt_detach_locked(entry);
532
533 /*
534 * On sflt_termsock, we can't return until the detach function
535 * has been called. Call the detach function - this is gross
536 * because the socket filter entry could be freed when we drop
537 * the lock, so we make copies on the stack and retain
538 * everything we need before dropping the lock.
539 */
540 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
541 entry->sfe_filter->sf_filter.sf_detach) {
542 void *sfe_cookie = entry->sfe_cookie;
543 struct socket_filter *sfe_filter = entry->sfe_filter;
544
545 /* Retain the socket filter */
546 sflt_retain_locked(sfe_filter);
547
548 /* Mark that we've called the detach function */
549 entry->sfe_flags |= SFEF_NODETACH;
550
551 /* Drop the lock before calling the detach function */
552 lck_rw_unlock_exclusive(&sock_filter_lock);
553 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
554 lck_rw_lock_exclusive(&sock_filter_lock);
555
556 /* Release the filter */
557 sflt_release_locked(sfe_filter);
558 }
559 }
560
561 lck_rw_unlock_exclusive(&sock_filter_lock);
562 }
563
564
565 static void
sflt_notify_internal(struct socket * so,sflt_event_t event,void * param,sflt_handle handle)566 sflt_notify_internal(struct socket *so, sflt_event_t event, void *param,
567 sflt_handle handle)
568 {
569 if (so->so_filt == NULL) {
570 return;
571 }
572
573 struct socket_filter_entry *entry;
574 int unlocked = 0;
575
576 lck_rw_lock_shared(&sock_filter_lock);
577 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
578 if ((entry->sfe_flags & SFEF_ATTACHED) &&
579 entry->sfe_filter->sf_filter.sf_notify &&
580 ((handle && entry->sfe_filter->sf_filter.sf_handle !=
581 handle) || !handle)) {
582 /*
583 * Retain the filter entry and release
584 * the socket filter lock
585 */
586 sflt_entry_retain(entry);
587 lck_rw_unlock_shared(&sock_filter_lock);
588
589 /* If the socket isn't already unlocked, unlock it */
590 if (unlocked == 0) {
591 unlocked = 1;
592 socket_unlock(so, 0);
593 }
594
595 /* Finally call the filter */
596 entry->sfe_filter->sf_filter.sf_notify(
597 entry->sfe_cookie, so, event, param);
598
599 /*
600 * Take the socket filter lock again
601 * and release the entry
602 */
603 lck_rw_lock_shared(&sock_filter_lock);
604 sflt_entry_release(entry);
605 }
606 }
607 lck_rw_unlock_shared(&sock_filter_lock);
608
609 if (unlocked != 0) {
610 socket_lock(so, 0);
611 }
612 }
613
614 __private_extern__ void
sflt_notify(struct socket * so,sflt_event_t event,void * param)615 sflt_notify(struct socket *so, sflt_event_t event, void *param)
616 {
617 sflt_notify_internal(so, event, param, 0);
618 }
619
620 static void
sflt_notify_after_register(struct socket * so,sflt_event_t event,sflt_handle handle)621 sflt_notify_after_register(struct socket *so, sflt_event_t event,
622 sflt_handle handle)
623 {
624 sflt_notify_internal(so, event, NULL, handle);
625 }
626
627 __private_extern__ int
sflt_ioctl(struct socket * so,u_long cmd,caddr_t data)628 sflt_ioctl(struct socket *so, u_long cmd, caddr_t data)
629 {
630 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
631 return 0;
632 }
633
634 struct socket_filter_entry *entry;
635 int unlocked = 0;
636 int error = 0;
637
638 lck_rw_lock_shared(&sock_filter_lock);
639 for (entry = so->so_filt; entry && error == 0;
640 entry = entry->sfe_next_onsocket) {
641 if ((entry->sfe_flags & SFEF_ATTACHED) &&
642 entry->sfe_filter->sf_filter.sf_ioctl) {
643 /*
644 * Retain the filter entry and release
645 * the socket filter lock
646 */
647 sflt_entry_retain(entry);
648 lck_rw_unlock_shared(&sock_filter_lock);
649
650 /* If the socket isn't already unlocked, unlock it */
651 if (unlocked == 0) {
652 socket_unlock(so, 0);
653 unlocked = 1;
654 }
655
656 /* Call the filter */
657 error = entry->sfe_filter->sf_filter.sf_ioctl(
658 entry->sfe_cookie, so, cmd, data);
659
660 /*
661 * Take the socket filter lock again
662 * and release the entry
663 */
664 lck_rw_lock_shared(&sock_filter_lock);
665 sflt_entry_release(entry);
666 }
667 }
668 lck_rw_unlock_shared(&sock_filter_lock);
669
670 if (unlocked) {
671 socket_lock(so, 0);
672 }
673
674 return error;
675 }
676
677 __private_extern__ int
sflt_bind(struct socket * so,const struct sockaddr * nam)678 sflt_bind(struct socket *so, const struct sockaddr *nam)
679 {
680 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
681 return 0;
682 }
683
684 struct socket_filter_entry *entry;
685 int unlocked = 0;
686 int error = 0;
687
688 lck_rw_lock_shared(&sock_filter_lock);
689 for (entry = so->so_filt; entry && error == 0;
690 entry = entry->sfe_next_onsocket) {
691 if ((entry->sfe_flags & SFEF_ATTACHED) &&
692 entry->sfe_filter->sf_filter.sf_bind) {
693 /*
694 * Retain the filter entry and
695 * release the socket filter lock
696 */
697 sflt_entry_retain(entry);
698 lck_rw_unlock_shared(&sock_filter_lock);
699
700 /* If the socket isn't already unlocked, unlock it */
701 if (unlocked == 0) {
702 socket_unlock(so, 0);
703 unlocked = 1;
704 }
705
706 /* Call the filter */
707 error = entry->sfe_filter->sf_filter.sf_bind(
708 entry->sfe_cookie, so, nam);
709
710 /*
711 * Take the socket filter lock again and
712 * release the entry
713 */
714 lck_rw_lock_shared(&sock_filter_lock);
715 sflt_entry_release(entry);
716 }
717 }
718 lck_rw_unlock_shared(&sock_filter_lock);
719
720 if (unlocked) {
721 socket_lock(so, 0);
722 }
723
724 return error;
725 }
726
727 __private_extern__ int
sflt_listen(struct socket * so)728 sflt_listen(struct socket *so)
729 {
730 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
731 return 0;
732 }
733
734 struct socket_filter_entry *entry;
735 int unlocked = 0;
736 int error = 0;
737
738 lck_rw_lock_shared(&sock_filter_lock);
739 for (entry = so->so_filt; entry && error == 0;
740 entry = entry->sfe_next_onsocket) {
741 if ((entry->sfe_flags & SFEF_ATTACHED) &&
742 entry->sfe_filter->sf_filter.sf_listen) {
743 /*
744 * Retain the filter entry and release
745 * the socket filter lock
746 */
747 sflt_entry_retain(entry);
748 lck_rw_unlock_shared(&sock_filter_lock);
749
750 /* If the socket isn't already unlocked, unlock it */
751 if (unlocked == 0) {
752 socket_unlock(so, 0);
753 unlocked = 1;
754 }
755
756 /* Call the filter */
757 error = entry->sfe_filter->sf_filter.sf_listen(
758 entry->sfe_cookie, so);
759
760 /*
761 * Take the socket filter lock again
762 * and release the entry
763 */
764 lck_rw_lock_shared(&sock_filter_lock);
765 sflt_entry_release(entry);
766 }
767 }
768 lck_rw_unlock_shared(&sock_filter_lock);
769
770 if (unlocked) {
771 socket_lock(so, 0);
772 }
773
774 return error;
775 }
776
777 __private_extern__ int
sflt_accept(struct socket * head,struct socket * so,const struct sockaddr * local,const struct sockaddr * remote)778 sflt_accept(struct socket *head, struct socket *so,
779 const struct sockaddr *local, const struct sockaddr *remote)
780 {
781 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
782 return 0;
783 }
784
785 struct socket_filter_entry *entry;
786 int unlocked = 0;
787 int error = 0;
788
789 lck_rw_lock_shared(&sock_filter_lock);
790 for (entry = so->so_filt; entry && error == 0;
791 entry = entry->sfe_next_onsocket) {
792 if ((entry->sfe_flags & SFEF_ATTACHED) &&
793 entry->sfe_filter->sf_filter.sf_accept) {
794 /*
795 * Retain the filter entry and
796 * release the socket filter lock
797 */
798 sflt_entry_retain(entry);
799 lck_rw_unlock_shared(&sock_filter_lock);
800
801 /* If the socket isn't already unlocked, unlock it */
802 if (unlocked == 0) {
803 socket_unlock(so, 0);
804 unlocked = 1;
805 }
806
807 /* Call the filter */
808 error = entry->sfe_filter->sf_filter.sf_accept(
809 entry->sfe_cookie, head, so, local, remote);
810
811 /*
812 * Take the socket filter lock again
813 * and release the entry
814 */
815 lck_rw_lock_shared(&sock_filter_lock);
816 sflt_entry_release(entry);
817 }
818 }
819 lck_rw_unlock_shared(&sock_filter_lock);
820
821 if (unlocked) {
822 socket_lock(so, 0);
823 }
824
825 return error;
826 }
827
828 __private_extern__ int
sflt_getsockname(struct socket * so,struct sockaddr ** local)829 sflt_getsockname(struct socket *so, struct sockaddr **local)
830 {
831 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
832 return 0;
833 }
834
835 struct socket_filter_entry *entry;
836 int unlocked = 0;
837 int error = 0;
838
839 lck_rw_lock_shared(&sock_filter_lock);
840 for (entry = so->so_filt; entry && error == 0;
841 entry = entry->sfe_next_onsocket) {
842 if ((entry->sfe_flags & SFEF_ATTACHED) &&
843 entry->sfe_filter->sf_filter.sf_getsockname) {
844 /*
845 * Retain the filter entry and
846 * release the socket filter lock
847 */
848 sflt_entry_retain(entry);
849 lck_rw_unlock_shared(&sock_filter_lock);
850
851 /* If the socket isn't already unlocked, unlock it */
852 if (unlocked == 0) {
853 socket_unlock(so, 0);
854 unlocked = 1;
855 }
856
857 /* Call the filter */
858 error = entry->sfe_filter->sf_filter.sf_getsockname(
859 entry->sfe_cookie, so, local);
860
861 /*
862 * Take the socket filter lock again
863 * and release the entry
864 */
865 lck_rw_lock_shared(&sock_filter_lock);
866 sflt_entry_release(entry);
867 }
868 }
869 lck_rw_unlock_shared(&sock_filter_lock);
870
871 if (unlocked) {
872 socket_lock(so, 0);
873 }
874
875 return error;
876 }
877
878 __private_extern__ int
sflt_getpeername(struct socket * so,struct sockaddr ** remote)879 sflt_getpeername(struct socket *so, struct sockaddr **remote)
880 {
881 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
882 return 0;
883 }
884
885 struct socket_filter_entry *entry;
886 int unlocked = 0;
887 int error = 0;
888
889 lck_rw_lock_shared(&sock_filter_lock);
890 for (entry = so->so_filt; entry && error == 0;
891 entry = entry->sfe_next_onsocket) {
892 if ((entry->sfe_flags & SFEF_ATTACHED) &&
893 entry->sfe_filter->sf_filter.sf_getpeername) {
894 /*
895 * Retain the filter entry and release
896 * the socket filter lock
897 */
898 sflt_entry_retain(entry);
899 lck_rw_unlock_shared(&sock_filter_lock);
900
901 /* If the socket isn't already unlocked, unlock it */
902 if (unlocked == 0) {
903 socket_unlock(so, 0);
904 unlocked = 1;
905 }
906
907 /* Call the filter */
908 error = entry->sfe_filter->sf_filter.sf_getpeername(
909 entry->sfe_cookie, so, remote);
910
911 /*
912 * Take the socket filter lock again
913 * and release the entry
914 */
915 lck_rw_lock_shared(&sock_filter_lock);
916 sflt_entry_release(entry);
917 }
918 }
919 lck_rw_unlock_shared(&sock_filter_lock);
920
921 if (unlocked) {
922 socket_lock(so, 0);
923 }
924
925 return error;
926 }
927
928 __private_extern__ int
sflt_connectin(struct socket * so,const struct sockaddr * remote)929 sflt_connectin(struct socket *so, const struct sockaddr *remote)
930 {
931 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
932 return 0;
933 }
934
935 struct socket_filter_entry *entry;
936 int unlocked = 0;
937 int error = 0;
938
939 lck_rw_lock_shared(&sock_filter_lock);
940 for (entry = so->so_filt; entry && error == 0;
941 entry = entry->sfe_next_onsocket) {
942 if ((entry->sfe_flags & SFEF_ATTACHED) &&
943 entry->sfe_filter->sf_filter.sf_connect_in) {
944 /*
945 * Retain the filter entry and release
946 * the socket filter lock
947 */
948 sflt_entry_retain(entry);
949 lck_rw_unlock_shared(&sock_filter_lock);
950
951 /* If the socket isn't already unlocked, unlock it */
952 if (unlocked == 0) {
953 socket_unlock(so, 0);
954 unlocked = 1;
955 }
956
957 /* Call the filter */
958 error = entry->sfe_filter->sf_filter.sf_connect_in(
959 entry->sfe_cookie, so, remote);
960
961 /*
962 * Take the socket filter lock again
963 * and release the entry
964 */
965 lck_rw_lock_shared(&sock_filter_lock);
966 sflt_entry_release(entry);
967 }
968 }
969 lck_rw_unlock_shared(&sock_filter_lock);
970
971 if (unlocked) {
972 socket_lock(so, 0);
973 }
974
975 return error;
976 }
977
978 static int
sflt_connectout_common(struct socket * so,const struct sockaddr * nam)979 sflt_connectout_common(struct socket *so, const struct sockaddr *nam)
980 {
981 struct socket_filter_entry *entry;
982 int unlocked = 0;
983 int error = 0;
984
985 lck_rw_lock_shared(&sock_filter_lock);
986 for (entry = so->so_filt; entry && error == 0;
987 entry = entry->sfe_next_onsocket) {
988 if ((entry->sfe_flags & SFEF_ATTACHED) &&
989 entry->sfe_filter->sf_filter.sf_connect_out) {
990 /*
991 * Retain the filter entry and release
992 * the socket filter lock
993 */
994 sflt_entry_retain(entry);
995 lck_rw_unlock_shared(&sock_filter_lock);
996
997 /* If the socket isn't already unlocked, unlock it */
998 if (unlocked == 0) {
999 socket_unlock(so, 0);
1000 unlocked = 1;
1001 }
1002
1003 /* Call the filter */
1004 error = entry->sfe_filter->sf_filter.sf_connect_out(
1005 entry->sfe_cookie, so, nam);
1006
1007 /*
1008 * Take the socket filter lock again
1009 * and release the entry
1010 */
1011 lck_rw_lock_shared(&sock_filter_lock);
1012 sflt_entry_release(entry);
1013 }
1014 }
1015 lck_rw_unlock_shared(&sock_filter_lock);
1016
1017 if (unlocked) {
1018 socket_lock(so, 0);
1019 }
1020
1021 return error;
1022 }
1023
1024 __private_extern__ int
sflt_connectout(struct socket * so,const struct sockaddr * nam)1025 sflt_connectout(struct socket *so, const struct sockaddr *nam)
1026 {
1027 char buf[SOCK_MAXADDRLEN];
1028 struct sockaddr *sa;
1029 int error;
1030
1031 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1032 return 0;
1033 }
1034
1035 /*
1036 * Workaround for rdar://23362120
1037 * Always pass a buffer that can hold an IPv6 socket address
1038 */
1039 bzero(buf, sizeof(buf));
1040 bcopy(nam, buf, nam->sa_len);
1041 sa = (struct sockaddr *)buf;
1042
1043 error = sflt_connectout_common(so, sa);
1044 if (error != 0) {
1045 return error;
1046 }
1047
1048 /*
1049 * If the address was modified, copy it back
1050 */
1051 if (bcmp(sa, nam, nam->sa_len) != 0) {
1052 bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len);
1053 }
1054
1055 return 0;
1056 }
1057
1058 __private_extern__ int
sflt_setsockopt(struct socket * so,struct sockopt * sopt)1059 sflt_setsockopt(struct socket *so, struct sockopt *sopt)
1060 {
1061 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1062 return 0;
1063 }
1064
1065 struct socket_filter_entry *entry;
1066 int unlocked = 0;
1067 int error = 0;
1068
1069 lck_rw_lock_shared(&sock_filter_lock);
1070 for (entry = so->so_filt; entry && error == 0;
1071 entry = entry->sfe_next_onsocket) {
1072 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1073 entry->sfe_filter->sf_filter.sf_setoption) {
1074 /*
1075 * Retain the filter entry and release
1076 * the socket filter lock
1077 */
1078 sflt_entry_retain(entry);
1079 lck_rw_unlock_shared(&sock_filter_lock);
1080
1081 /* If the socket isn't already unlocked, unlock it */
1082 if (unlocked == 0) {
1083 socket_unlock(so, 0);
1084 unlocked = 1;
1085 }
1086
1087 /* Call the filter */
1088 error = entry->sfe_filter->sf_filter.sf_setoption(
1089 entry->sfe_cookie, so, sopt);
1090
1091 /*
1092 * Take the socket filter lock again
1093 * and release the entry
1094 */
1095 lck_rw_lock_shared(&sock_filter_lock);
1096 sflt_entry_release(entry);
1097 }
1098 }
1099 lck_rw_unlock_shared(&sock_filter_lock);
1100
1101 if (unlocked) {
1102 socket_lock(so, 0);
1103 }
1104
1105 return error;
1106 }
1107
1108 __private_extern__ int
sflt_getsockopt(struct socket * so,struct sockopt * sopt)1109 sflt_getsockopt(struct socket *so, struct sockopt *sopt)
1110 {
1111 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1112 return 0;
1113 }
1114
1115 struct socket_filter_entry *entry;
1116 int unlocked = 0;
1117 int error = 0;
1118
1119 lck_rw_lock_shared(&sock_filter_lock);
1120 for (entry = so->so_filt; entry && error == 0;
1121 entry = entry->sfe_next_onsocket) {
1122 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1123 entry->sfe_filter->sf_filter.sf_getoption) {
1124 /*
1125 * Retain the filter entry and release
1126 * the socket filter lock
1127 */
1128 sflt_entry_retain(entry);
1129 lck_rw_unlock_shared(&sock_filter_lock);
1130
1131 /* If the socket isn't already unlocked, unlock it */
1132 if (unlocked == 0) {
1133 socket_unlock(so, 0);
1134 unlocked = 1;
1135 }
1136
1137 /* Call the filter */
1138 error = entry->sfe_filter->sf_filter.sf_getoption(
1139 entry->sfe_cookie, so, sopt);
1140
1141 /*
1142 * Take the socket filter lock again
1143 * and release the entry
1144 */
1145 lck_rw_lock_shared(&sock_filter_lock);
1146 sflt_entry_release(entry);
1147 }
1148 }
1149 lck_rw_unlock_shared(&sock_filter_lock);
1150
1151 if (unlocked) {
1152 socket_lock(so, 0);
1153 }
1154
1155 return error;
1156 }
1157
1158 __private_extern__ int
sflt_data_out(struct socket * so,const struct sockaddr * to,mbuf_t * data,mbuf_t * control,sflt_data_flag_t flags)1159 sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data,
1160 mbuf_t *control, sflt_data_flag_t flags)
1161 {
1162 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1163 return 0;
1164 }
1165
1166 struct socket_filter_entry *entry;
1167 int unlocked = 0;
1168 int setsendthread = 0;
1169 int error = 0;
1170
1171 lck_rw_lock_shared(&sock_filter_lock);
1172 for (entry = so->so_filt; entry && error == 0;
1173 entry = entry->sfe_next_onsocket) {
1174 /* skip if this is a subflow socket */
1175 if (so->so_flags & SOF_MP_SUBFLOW) {
1176 continue;
1177 }
1178 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1179 entry->sfe_filter->sf_filter.sf_data_out) {
1180 /*
1181 * Retain the filter entry and
1182 * release the socket filter lock
1183 */
1184 sflt_entry_retain(entry);
1185 lck_rw_unlock_shared(&sock_filter_lock);
1186
1187 /* If the socket isn't already unlocked, unlock it */
1188 if (unlocked == 0) {
1189 if (so->so_send_filt_thread == NULL) {
1190 setsendthread = 1;
1191 so->so_send_filt_thread =
1192 current_thread();
1193 }
1194 socket_unlock(so, 0);
1195 unlocked = 1;
1196 }
1197
1198 /* Call the filter */
1199 error = entry->sfe_filter->sf_filter.sf_data_out(
1200 entry->sfe_cookie, so, to, data, control, flags);
1201
1202 /*
1203 * Take the socket filter lock again
1204 * and release the entry
1205 */
1206 lck_rw_lock_shared(&sock_filter_lock);
1207 sflt_entry_release(entry);
1208 }
1209 }
1210 lck_rw_unlock_shared(&sock_filter_lock);
1211
1212 if (unlocked) {
1213 socket_lock(so, 0);
1214 if (setsendthread) {
1215 so->so_send_filt_thread = NULL;
1216 }
1217 }
1218
1219 return error;
1220 }
1221
1222 __private_extern__ int
sflt_data_in(struct socket * so,const struct sockaddr * from,mbuf_t * data,mbuf_t * control,sflt_data_flag_t flags)1223 sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
1224 mbuf_t *control, sflt_data_flag_t flags)
1225 {
1226 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1227 return 0;
1228 }
1229
1230 struct socket_filter_entry *entry;
1231 int error = 0;
1232 int unlocked = 0;
1233
1234 lck_rw_lock_shared(&sock_filter_lock);
1235
1236 for (entry = so->so_filt; entry && (error == 0);
1237 entry = entry->sfe_next_onsocket) {
1238 /* skip if this is a subflow socket */
1239 if (so->so_flags & SOF_MP_SUBFLOW) {
1240 continue;
1241 }
1242 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1243 entry->sfe_filter->sf_filter.sf_data_in) {
1244 /*
1245 * Retain the filter entry and
1246 * release the socket filter lock
1247 */
1248 sflt_entry_retain(entry);
1249 lck_rw_unlock_shared(&sock_filter_lock);
1250
1251 /* If the socket isn't already unlocked, unlock it */
1252 if (unlocked == 0) {
1253 unlocked = 1;
1254 socket_unlock(so, 0);
1255 }
1256
1257 /* Call the filter */
1258 error = entry->sfe_filter->sf_filter.sf_data_in(
1259 entry->sfe_cookie, so, from, data, control, flags);
1260
1261 /*
1262 * Take the socket filter lock again
1263 * and release the entry
1264 */
1265 lck_rw_lock_shared(&sock_filter_lock);
1266 sflt_entry_release(entry);
1267 }
1268 }
1269 lck_rw_unlock_shared(&sock_filter_lock);
1270
1271 if (unlocked) {
1272 socket_lock(so, 0);
1273 }
1274
1275 return error;
1276 }
1277
1278 #pragma mark -- KPI --
1279
1280 errno_t
sflt_attach(socket_t socket,sflt_handle handle)1281 sflt_attach(socket_t socket, sflt_handle handle)
1282 {
1283 socket_lock(socket, 1);
1284 errno_t result = sflt_attach_internal(socket, handle);
1285 socket_unlock(socket, 1);
1286 return result;
1287 }
1288
1289 errno_t
sflt_detach(socket_t socket,sflt_handle handle)1290 sflt_detach(socket_t socket, sflt_handle handle)
1291 {
1292 struct socket_filter_entry *entry;
1293 errno_t result = 0;
1294
1295 if (socket == NULL || handle == 0) {
1296 return EINVAL;
1297 }
1298
1299 lck_rw_lock_exclusive(&sock_filter_lock);
1300 for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
1301 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
1302 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
1303 break;
1304 }
1305 }
1306
1307 if (entry != NULL) {
1308 sflt_detach_locked(entry);
1309 }
1310 lck_rw_unlock_exclusive(&sock_filter_lock);
1311
1312 return result;
1313 }
1314
1315 struct solist {
1316 struct solist *next;
1317 struct socket *so;
1318 };
1319
1320 static errno_t
sflt_register_common(const struct sflt_filter * filter,int domain,int type,int protocol,bool is_internal)1321 sflt_register_common(const struct sflt_filter *filter, int domain, int type,
1322 int protocol, bool is_internal)
1323 {
1324 struct socket_filter *sock_filt = NULL;
1325 struct socket_filter *match = NULL;
1326 int error = 0;
1327 struct protosw *pr;
1328 unsigned int len;
1329 struct socket *so;
1330 struct inpcb *inp;
1331 struct solist *solisthead = NULL, *solist = NULL;
1332
1333 if ((domain != PF_INET) && (domain != PF_INET6)) {
1334 return ENOTSUP;
1335 }
1336
1337 pr = pffindproto(domain, protocol, type);
1338 if (pr == NULL) {
1339 return ENOENT;
1340 }
1341
1342 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
1343 filter->sf_handle == 0 || filter->sf_name == NULL) {
1344 return EINVAL;
1345 }
1346
1347 /* Allocate the socket filter */
1348 sock_filt = kalloc_type(struct socket_filter,
1349 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1350
1351 /* Legacy sflt_filter length; current structure minus extended */
1352 len = sizeof(*filter) - sizeof(struct sflt_filter_ext);
1353 /*
1354 * Include extended fields if filter defines SFLT_EXTENDED.
1355 * We've zeroed out our internal sflt_filter placeholder,
1356 * so any unused portion would have been taken care of.
1357 */
1358 if (filter->sf_flags & SFLT_EXTENDED) {
1359 unsigned int ext_len = filter->sf_len;
1360
1361 if (ext_len > sizeof(struct sflt_filter_ext)) {
1362 ext_len = sizeof(struct sflt_filter_ext);
1363 }
1364
1365 len += ext_len;
1366 }
1367 bcopy(filter, &sock_filt->sf_filter, len);
1368
1369 lck_rw_lock_exclusive(&sock_filter_lock);
1370 /* Look for an existing entry */
1371 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
1372 if (match->sf_filter.sf_handle ==
1373 sock_filt->sf_filter.sf_handle) {
1374 break;
1375 }
1376 }
1377
1378 /* Add the entry only if there was no existing entry */
1379 if (match == NULL) {
1380 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1381 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1382 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1383 sf_protosw_next);
1384 sock_filt->sf_proto = pr;
1385 }
1386 os_ref_init(&sock_filt->sf_refcount, NULL);
1387
1388 OSIncrementAtomic64(&net_api_stats.nas_sfltr_register_count);
1389 INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_total);
1390 if (is_internal) {
1391 sock_filt->sf_flags |= SFF_INTERNAL;
1392 OSIncrementAtomic64(&net_api_stats.nas_sfltr_register_os_count);
1393 INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_os_total);
1394 }
1395 }
1396 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
1397 net_filter_event_mark(NET_FILTER_EVENT_SOCKET,
1398 net_check_compatible_sfltr());
1399 net_filter_event_mark(NET_FILTER_EVENT_ALF,
1400 net_check_compatible_alf());
1401 net_filter_event_mark(NET_FILTER_EVENT_PARENTAL_CONTROLS,
1402 net_check_compatible_parental_controls());
1403 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
1404
1405 lck_rw_unlock_exclusive(&sock_filter_lock);
1406
1407 if (match != NULL) {
1408 kfree_type(struct socket_filter, sock_filt);
1409 return EEXIST;
1410 }
1411
1412 if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY)) {
1413 return error;
1414 }
1415
1416 /*
1417 * Setup the filter on the TCP and UDP sockets already created.
1418 */
1419 #define SOLIST_ADD(_so) do { \
1420 solist->next = solisthead; \
1421 sock_retain((_so)); \
1422 solist->so = (_so); \
1423 solisthead = solist; \
1424 } while (0)
1425 if (protocol == IPPROTO_TCP) {
1426 lck_rw_lock_shared(&tcbinfo.ipi_lock);
1427 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1428 so = inp->inp_socket;
1429 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1430 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1431 (so->so_state & SS_NOFDREF)) ||
1432 !SOCK_CHECK_DOM(so, domain) ||
1433 !SOCK_CHECK_TYPE(so, type)) {
1434 continue;
1435 }
1436 solist = kalloc_type(struct solist, Z_NOWAIT);
1437 if (!solist) {
1438 continue;
1439 }
1440 SOLIST_ADD(so);
1441 }
1442 lck_rw_done(&tcbinfo.ipi_lock);
1443 } else if (protocol == IPPROTO_UDP) {
1444 lck_rw_lock_shared(&udbinfo.ipi_lock);
1445 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
1446 so = inp->inp_socket;
1447 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1448 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1449 (so->so_state & SS_NOFDREF)) ||
1450 !SOCK_CHECK_DOM(so, domain) ||
1451 !SOCK_CHECK_TYPE(so, type)) {
1452 continue;
1453 }
1454 solist = kalloc_type(struct solist, Z_NOWAIT);
1455 if (!solist) {
1456 continue;
1457 }
1458 SOLIST_ADD(so);
1459 }
1460 lck_rw_done(&udbinfo.ipi_lock);
1461 }
1462 /* XXX it's possible to walk the raw socket list as well */
1463 #undef SOLIST_ADD
1464
1465 while (solisthead) {
1466 sflt_handle handle = filter->sf_handle;
1467
1468 so = solisthead->so;
1469 socket_lock(so, 0);
1470 sflt_initsock(so);
1471 if (so->so_state & SS_ISCONNECTING) {
1472 sflt_notify_after_register(so, sock_evt_connecting,
1473 handle);
1474 } else if (so->so_state & SS_ISCONNECTED) {
1475 sflt_notify_after_register(so, sock_evt_connected,
1476 handle);
1477 } else if ((so->so_state &
1478 (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) ==
1479 (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) {
1480 sflt_notify_after_register(so, sock_evt_disconnecting,
1481 handle);
1482 } else if ((so->so_state &
1483 (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) ==
1484 (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) {
1485 sflt_notify_after_register(so, sock_evt_disconnected,
1486 handle);
1487 } else if (so->so_state & SS_CANTSENDMORE) {
1488 sflt_notify_after_register(so, sock_evt_cantsendmore,
1489 handle);
1490 } else if (so->so_state & SS_CANTRCVMORE) {
1491 sflt_notify_after_register(so, sock_evt_cantrecvmore,
1492 handle);
1493 }
1494 socket_unlock(so, 0);
1495 /* XXX no easy way to post the sock_evt_closing event */
1496 sock_release(so);
1497 solist = solisthead;
1498 solisthead = solisthead->next;
1499 kfree_type(struct solist, solist);
1500 }
1501
1502 return error;
1503 }
1504
1505 errno_t
sflt_register_internal(const struct sflt_filter * filter,int domain,int type,int protocol)1506 sflt_register_internal(const struct sflt_filter *filter, int domain, int type,
1507 int protocol)
1508 {
1509 return sflt_register_common(filter, domain, type, protocol, true);
1510 }
1511
1512 #define MAX_NUM_FRAMES 5
1513
1514 errno_t
sflt_register(const struct sflt_filter * filter,int domain,int type,int protocol)1515 sflt_register(const struct sflt_filter *filter, int domain, int type,
1516 int protocol)
1517 {
1518 return sflt_register_common(filter, domain, type, protocol, false);
1519 }
1520
1521 errno_t
sflt_unregister(sflt_handle handle)1522 sflt_unregister(sflt_handle handle)
1523 {
1524 struct socket_filter *filter;
1525 lck_rw_lock_exclusive(&sock_filter_lock);
1526
1527 /* Find the entry by the handle */
1528 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
1529 if (filter->sf_filter.sf_handle == handle) {
1530 break;
1531 }
1532 }
1533
1534 if (filter) {
1535 if (filter->sf_flags & SFF_INTERNAL) {
1536 VERIFY(OSDecrementAtomic64(&net_api_stats.nas_sfltr_register_os_count) > 0);
1537 }
1538 VERIFY(OSDecrementAtomic64(&net_api_stats.nas_sfltr_register_count) > 0);
1539
1540 /* Remove it from the global list */
1541 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
1542
1543 /* Remove it from the protosw list */
1544 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1545 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head,
1546 filter, sf_protosw_next);
1547 }
1548
1549 /* Detach from any sockets */
1550 struct socket_filter_entry *entry = NULL;
1551
1552 for (entry = filter->sf_entry_head; entry;
1553 entry = entry->sfe_next_onfilter) {
1554 sflt_detach_locked(entry);
1555 }
1556
1557 /* Release the filter */
1558 sflt_release_locked(filter);
1559 }
1560 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
1561 net_filter_event_mark(NET_FILTER_EVENT_SOCKET,
1562 net_check_compatible_sfltr());
1563 net_filter_event_mark(NET_FILTER_EVENT_ALF,
1564 net_check_compatible_alf());
1565 net_filter_event_mark(NET_FILTER_EVENT_PARENTAL_CONTROLS,
1566 net_check_compatible_parental_controls());
1567 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
1568
1569 lck_rw_unlock_exclusive(&sock_filter_lock);
1570
1571 if (filter == NULL) {
1572 return ENOENT;
1573 }
1574
1575 return 0;
1576 }
1577
1578 errno_t
sock_inject_data_in(socket_t so,const struct sockaddr * from,mbuf_t data,mbuf_t control,sflt_data_flag_t flags)1579 sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data,
1580 mbuf_t control, sflt_data_flag_t flags)
1581 {
1582 int error = 0;
1583
1584 if (so == NULL || data == NULL) {
1585 return EINVAL;
1586 }
1587
1588 if (flags & sock_data_filt_flag_oob) {
1589 return ENOTSUP;
1590 }
1591
1592 socket_lock(so, 1);
1593
1594 /* reject if this is a subflow socket */
1595 if (so->so_flags & SOF_MP_SUBFLOW) {
1596 error = ENOTSUP;
1597 goto done;
1598 }
1599
1600 if (from) {
1601 if (sbappendaddr(&so->so_rcv,
1602 (struct sockaddr *)(uintptr_t)from, data, control, NULL)) {
1603 sorwakeup(so);
1604 }
1605 goto done;
1606 }
1607
1608 if (control) {
1609 if (sbappendcontrol(&so->so_rcv, data, control, NULL)) {
1610 sorwakeup(so);
1611 }
1612 goto done;
1613 }
1614
1615 if (flags & sock_data_filt_flag_record) {
1616 if (control || from) {
1617 error = EINVAL;
1618 goto done;
1619 }
1620 if (sbappendrecord(&so->so_rcv, (struct mbuf *)data)) {
1621 sorwakeup(so);
1622 }
1623 goto done;
1624 }
1625
1626 if (sbappend(&so->so_rcv, data)) {
1627 sorwakeup(so);
1628 }
1629 done:
1630 socket_unlock(so, 1);
1631 return error;
1632 }
1633
1634 errno_t
sock_inject_data_out(socket_t so,const struct sockaddr * to,mbuf_t data,mbuf_t control,sflt_data_flag_t flags)1635 sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data,
1636 mbuf_t control, sflt_data_flag_t flags)
1637 {
1638 int sosendflags = 0;
1639 int error = 0;
1640
1641 /* reject if this is a subflow socket */
1642 if (so->so_flags & SOF_MP_SUBFLOW) {
1643 return ENOTSUP;
1644 }
1645
1646 if (flags & sock_data_filt_flag_oob) {
1647 sosendflags = MSG_OOB;
1648 }
1649
1650 #if SKYWALK
1651 sk_protect_t protect = sk_async_transmit_protect();
1652 #endif /* SKYWALK */
1653
1654 error = sosend(so, (struct sockaddr *)(uintptr_t)to, NULL,
1655 data, control, sosendflags);
1656
1657 #if SKYWALK
1658 sk_async_transmit_unprotect(protect);
1659 #endif /* SKYWALK */
1660
1661 return error;
1662 }
1663
1664 sockopt_dir
sockopt_direction(sockopt_t sopt)1665 sockopt_direction(sockopt_t sopt)
1666 {
1667 return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set;
1668 }
1669
1670 int
sockopt_level(sockopt_t sopt)1671 sockopt_level(sockopt_t sopt)
1672 {
1673 return sopt->sopt_level;
1674 }
1675
1676 int
sockopt_name(sockopt_t sopt)1677 sockopt_name(sockopt_t sopt)
1678 {
1679 return sopt->sopt_name;
1680 }
1681
1682 size_t
sockopt_valsize(sockopt_t sopt)1683 sockopt_valsize(sockopt_t sopt)
1684 {
1685 return sopt->sopt_valsize;
1686 }
1687
1688 errno_t
sockopt_copyin(sockopt_t sopt,void * data,size_t len)1689 sockopt_copyin(sockopt_t sopt, void *data, size_t len)
1690 {
1691 return sooptcopyin(sopt, data, len, len);
1692 }
1693
1694 errno_t
sockopt_copyout(sockopt_t sopt,void * data,size_t len)1695 sockopt_copyout(sockopt_t sopt, void *data, size_t len)
1696 {
1697 return sooptcopyout(sopt, data, len);
1698 }
1699
1700 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
1701 static bool
net_check_compatible_sfltr(void)1702 net_check_compatible_sfltr(void)
1703 {
1704 if (net_api_stats.nas_sfltr_register_count > net_api_stats.nas_sfltr_register_os_count ||
1705 net_api_stats.nas_sfltr_register_os_count > 4) {
1706 return false;
1707 }
1708 return true;
1709 }
1710
1711 bool
net_check_compatible_alf(void)1712 net_check_compatible_alf(void)
1713 {
1714 int alf_perm;
1715 size_t len = sizeof(alf_perm);
1716 errno_t error;
1717
1718 error = kernel_sysctlbyname("net.alf.perm", &alf_perm, &len, NULL, 0);
1719 if (error == 0) {
1720 if (alf_perm != 0) {
1721 return false;
1722 }
1723 }
1724 return true;
1725 }
1726
1727 static bool
net_check_compatible_parental_controls(void)1728 net_check_compatible_parental_controls(void)
1729 {
1730 /*
1731 * Assumes the first 4 OS socket filters are for ALF and additional
1732 * OS filters are for Parental Controls web content filter
1733 */
1734 if (net_api_stats.nas_sfltr_register_os_count > 4) {
1735 return false;
1736 }
1737 return true;
1738 }
1739 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
1740