xref: /xnu-12377.41.6/bsd/netinet/kpi_ipfilter.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2004-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/param.h>  /* for definition of NULL */
30 #include <sys/errno.h>
31 #include <sys/malloc.h>
32 #include <sys/socket.h>
33 #include <sys/mbuf.h>
34 #include <sys/systm.h>
35 #include <libkern/OSAtomic.h>
36 
37 #include <machine/endian.h>
38 
39 #define _IP_VHL
40 #include <net/if_var.h>
41 #include <net/route.h>
42 #include <net/kpi_protocol.h>
43 #include <net/net_api_stats.h>
44 #if SKYWALK
45 #include <skywalk/lib/net_filter_event.h>
46 #endif /* SKYWALK */
47 
48 #include <netinet/in_systm.h>
49 #include <netinet/in.h>
50 #include <netinet/in_var.h>
51 #include <netinet6/in6_var.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet/ip_var.h>
55 #include <netinet6/ip6_var.h>
56 #include <netinet/kpi_ipfilter_var.h>
57 
58 #include <stdbool.h>
59 
60 #if SKYWALK
61 #include <skywalk/core/skywalk_var.h>
62 #endif /* SKYWALK */
63 
64 /*
65  * kipf_lock and kipf_ref protect the linkage of the list of IP filters
66  * An IP filter can be removed only when kipf_ref is zero
67  * If an IP filter cannot be removed because kipf_ref is not null, then
68  * the IP filter is marjed and kipf_delayed_remove is set so that when
69  * kipf_ref eventually goes down to zero, the IP filter is removed
70  */
71 static LCK_GRP_DECLARE(kipf_lock_grp, "IP Filter");
72 static LCK_MTX_DECLARE(kipf_lock, &kipf_lock_grp);
73 static u_int32_t kipf_ref = 0;
74 static u_int32_t kipf_delayed_remove = 0;
75 u_int32_t kipf_count = 0;
76 
77 __private_extern__ struct ipfilter_list ipv4_filters = TAILQ_HEAD_INITIALIZER(ipv4_filters);
78 __private_extern__ struct ipfilter_list ipv6_filters = TAILQ_HEAD_INITIALIZER(ipv6_filters);
79 __private_extern__ struct ipfilter_list tbr_filters = TAILQ_HEAD_INITIALIZER(tbr_filters);
80 
81 #undef ipf_addv4
82 #undef ipf_addv6
83 extern errno_t ipf_addv4(const struct ipf_filter *filter,
84     ipfilter_t *filter_ref);
85 extern errno_t ipf_addv6(const struct ipf_filter *filter,
86     ipfilter_t *filter_ref);
87 
88 static errno_t ipf_add(const struct ipf_filter *filter,
89     ipfilter_t *filter_ref, struct ipfilter_list *head, bool is_internal);
90 
91 #if SKYWALK
92 static bool net_check_compatible_ipf(void);
93 #endif /* SKYWALK */
94 
95 __private_extern__ void
ipf_ref(void)96 ipf_ref(void)
97 {
98 	lck_mtx_lock(&kipf_lock);
99 	if (os_inc_overflow(&kipf_ref)) {
100 		panic("kipf_ref overflow");
101 	}
102 	lck_mtx_unlock(&kipf_lock);
103 }
104 
105 __private_extern__ void
ipf_unref(void)106 ipf_unref(void)
107 {
108 	lck_mtx_lock(&kipf_lock);
109 
110 	if (os_dec_overflow(&kipf_ref)) {
111 		panic("kipf_ref underflow");
112 	}
113 
114 	if (kipf_ref == 0 && kipf_delayed_remove != 0) {
115 		struct ipfilter *filter;
116 
117 		while ((filter = TAILQ_FIRST(&tbr_filters))) {
118 			VERIFY(OSDecrementAtomic64(&net_api_stats.nas_ipf_add_count) > 0);
119 			if (filter->ipf_flags & IPFF_INTERNAL) {
120 				VERIFY(OSDecrementAtomic64(&net_api_stats.nas_ipf_add_os_count) > 0);
121 			}
122 
123 			ipf_detach_func ipf_detach = filter->ipf_filter.ipf_detach;
124 			void *__single cookie = filter->ipf_filter.cookie;
125 
126 			TAILQ_REMOVE(filter->ipf_head, filter, ipf_link);
127 			TAILQ_REMOVE(&tbr_filters, filter, ipf_tbr);
128 			kipf_delayed_remove--;
129 
130 			if (ipf_detach) {
131 				lck_mtx_unlock(&kipf_lock);
132 				ipf_detach(cookie);
133 				lck_mtx_lock(&kipf_lock);
134 				/* In case some filter got to run while we released the lock */
135 				if (kipf_ref != 0) {
136 					break;
137 				}
138 			}
139 		}
140 	}
141 #if SKYWALK
142 	if (kernel_is_macos_or_server()) {
143 		net_filter_event_mark(NET_FILTER_EVENT_IP,
144 		    net_check_compatible_ipf());
145 	}
146 #endif /* SKYWALK */
147 	lck_mtx_unlock(&kipf_lock);
148 }
149 
150 static errno_t
ipf_add(const struct ipf_filter * filter,ipfilter_t * filter_ref,struct ipfilter_list * head,bool is_internal)151 ipf_add(
152 	const struct ipf_filter *filter,
153 	ipfilter_t *filter_ref,
154 	struct ipfilter_list *head,
155 	bool is_internal)
156 {
157 	struct ipfilter *new_filter;
158 	if (filter->name == NULL || (filter->ipf_input == NULL && filter->ipf_output == NULL)) {
159 		return EINVAL;
160 	}
161 
162 	new_filter = kalloc_type(struct ipfilter, Z_WAITOK | Z_NOFAIL);
163 
164 	lck_mtx_lock(&kipf_lock);
165 	new_filter->ipf_filter = *filter;
166 	new_filter->ipf_head = head;
167 
168 	TAILQ_INSERT_HEAD(head, new_filter, ipf_link);
169 
170 	OSIncrementAtomic64(&net_api_stats.nas_ipf_add_count);
171 	INC_ATOMIC_INT64_LIM(net_api_stats.nas_ipf_add_total);
172 	if (is_internal) {
173 		new_filter->ipf_flags = IPFF_INTERNAL;
174 		OSIncrementAtomic64(&net_api_stats.nas_ipf_add_os_count);
175 		INC_ATOMIC_INT64_LIM(net_api_stats.nas_ipf_add_os_total);
176 	}
177 #if SKYWALK
178 	if (kernel_is_macos_or_server()) {
179 		net_filter_event_mark(NET_FILTER_EVENT_IP,
180 		    net_check_compatible_ipf());
181 	}
182 #endif /* SKYWALK */
183 
184 	lck_mtx_unlock(&kipf_lock);
185 
186 	*filter_ref = (ipfilter_t)new_filter;
187 
188 	/* This will force TCP to re-evaluate its use of TSO */
189 	OSAddAtomic(1, &kipf_count);
190 	routegenid_update();
191 
192 	return 0;
193 }
194 
195 errno_t
ipf_addv4_internal(const struct ipf_filter * filter,ipfilter_t * filter_ref)196 ipf_addv4_internal(
197 	const struct ipf_filter *filter,
198 	ipfilter_t *filter_ref)
199 {
200 	return ipf_add(filter, filter_ref, &ipv4_filters, true);
201 }
202 
203 errno_t
ipf_addv4(const struct ipf_filter * filter,ipfilter_t * filter_ref)204 ipf_addv4(
205 	const struct ipf_filter *filter,
206 	ipfilter_t *filter_ref)
207 {
208 	return ipf_add(filter, filter_ref, &ipv4_filters, false);
209 }
210 
211 errno_t
ipf_addv6_internal(const struct ipf_filter * filter,ipfilter_t * filter_ref)212 ipf_addv6_internal(
213 	const struct ipf_filter *filter,
214 	ipfilter_t *filter_ref)
215 {
216 	return ipf_add(filter, filter_ref, &ipv6_filters, true);
217 }
218 
219 errno_t
ipf_addv6(const struct ipf_filter * filter,ipfilter_t * filter_ref)220 ipf_addv6(
221 	const struct ipf_filter *filter,
222 	ipfilter_t *filter_ref)
223 {
224 	return ipf_add(filter, filter_ref, &ipv6_filters, false);
225 }
226 
227 static errno_t
ipf_input_detached(void * cookie,mbuf_t * data,int offset,u_int8_t protocol)228 ipf_input_detached(void *cookie, mbuf_t *data, int offset, u_int8_t protocol)
229 {
230 #pragma unused(cookie, data, offset, protocol)
231 
232 #if DEBUG
233 	printf("ipf_input_detached\n");
234 #endif /* DEBUG */
235 
236 	return 0;
237 }
238 
239 static errno_t
ipf_output_detached(void * cookie,mbuf_t * data,ipf_pktopts_t options)240 ipf_output_detached(void *cookie, mbuf_t *data, ipf_pktopts_t options)
241 {
242 #pragma unused(cookie, data, options)
243 
244 #if DEBUG
245 	printf("ipf_output_detached\n");
246 #endif /* DEBUG */
247 
248 	return 0;
249 }
250 
251 errno_t
ipf_remove(ipfilter_t filter_ref)252 ipf_remove(
253 	ipfilter_t filter_ref)
254 {
255 	struct ipfilter *match = (struct ipfilter *)filter_ref;
256 	struct ipfilter_list *head;
257 
258 	if (match == 0 || (match->ipf_head != &ipv4_filters && match->ipf_head != &ipv6_filters)) {
259 		return EINVAL;
260 	}
261 
262 	head = match->ipf_head;
263 
264 	lck_mtx_lock(&kipf_lock);
265 	TAILQ_FOREACH(match, head, ipf_link) {
266 		if (match == (struct ipfilter *)filter_ref) {
267 			ipf_detach_func ipf_detach = match->ipf_filter.ipf_detach;
268 			void *__single cookie = match->ipf_filter.cookie;
269 
270 			/*
271 			 * Cannot detach when they are filters running
272 			 */
273 			if (kipf_ref) {
274 				kipf_delayed_remove++;
275 				TAILQ_INSERT_TAIL(&tbr_filters, match, ipf_tbr);
276 				match->ipf_filter.ipf_input = ipf_input_detached;
277 				match->ipf_filter.ipf_output = ipf_output_detached;
278 				lck_mtx_unlock(&kipf_lock);
279 			} else {
280 				VERIFY(OSDecrementAtomic64(&net_api_stats.nas_ipf_add_count) > 0);
281 				if (match->ipf_flags & IPFF_INTERNAL) {
282 					VERIFY(OSDecrementAtomic64(&net_api_stats.nas_ipf_add_os_count) > 0);
283 				}
284 
285 				TAILQ_REMOVE(head, match, ipf_link);
286 				lck_mtx_unlock(&kipf_lock);
287 
288 				if (ipf_detach) {
289 					ipf_detach(cookie);
290 				}
291 				kfree_type(struct ipfilter, match);
292 
293 				/* This will force TCP to re-evaluate its use of TSO */
294 				OSAddAtomic(-1, &kipf_count);
295 				routegenid_update();
296 			}
297 			return 0;
298 		}
299 	}
300 #if SKYWALK
301 	if (kernel_is_macos_or_server()) {
302 		net_filter_event_mark(NET_FILTER_EVENT_IP,
303 		    net_check_compatible_ipf());
304 	}
305 #endif /* SKYWALK */
306 
307 	lck_mtx_unlock(&kipf_lock);
308 
309 	return ENOENT;
310 }
311 
312 int log_for_en1 = 0;
313 
314 errno_t
ipf_inject_input(mbuf_t data,ipfilter_t filter_ref)315 ipf_inject_input(
316 	mbuf_t data,
317 	ipfilter_t filter_ref)
318 {
319 	struct mbuf *m = (struct mbuf *)data;
320 	struct m_tag *mtag = 0;
321 	struct ip *ip = mtod(m, struct ip *);
322 	struct ip6_hdr *ip6;
323 	u_int8_t        vers;
324 	int hlen;
325 	errno_t error = 0;
326 	protocol_family_t proto;
327 	struct in_ifaddr *ia = NULL;
328 	struct in_addr *pkt_dst = NULL;
329 	struct in6_ifaddr *ia6 = NULL;
330 	struct sockaddr_in6 pkt_dst6;
331 
332 	vers = IP_VHL_V(ip->ip_vhl);
333 
334 	switch (vers) {
335 	case 4:
336 		proto = PF_INET;
337 		break;
338 	case 6:
339 		proto = PF_INET6;
340 		break;
341 	default:
342 		error = ENOTSUP;
343 		goto done;
344 	}
345 
346 	if (filter_ref == 0 && m->m_pkthdr.rcvif == 0) {
347 		/*
348 		 * Search for interface with the local address
349 		 */
350 		switch (proto) {
351 		case PF_INET:
352 			pkt_dst = &ip->ip_dst;
353 			lck_rw_lock_shared(&in_ifaddr_rwlock);
354 			TAILQ_FOREACH(ia, INADDR_HASH(pkt_dst->s_addr), ia_hash) {
355 				if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst->s_addr) {
356 					m->m_pkthdr.rcvif = ia->ia_ifp;
357 					break;
358 				}
359 			}
360 			lck_rw_done(&in_ifaddr_rwlock);
361 			break;
362 
363 		case PF_INET6:
364 			ip6 = mtod(m, struct ip6_hdr *);
365 			pkt_dst6.sin6_addr = ip6->ip6_dst;
366 			lck_rw_lock_shared(&in6_ifaddr_rwlock);
367 			TAILQ_FOREACH(ia6, IN6ADDR_HASH(&pkt_dst6.sin6_addr), ia6_hash) {
368 				if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, &pkt_dst6.sin6_addr)) {
369 					m->m_pkthdr.rcvif = ia6->ia_ifp;
370 					break;
371 				}
372 			}
373 			lck_rw_done(&in6_ifaddr_rwlock);
374 			break;
375 
376 		default:
377 			break;
378 		}
379 
380 		/*
381 		 * If none found, fallback to loopback
382 		 */
383 		if (m->m_pkthdr.rcvif == NULL) {
384 			m->m_pkthdr.rcvif = lo_ifp;
385 		}
386 
387 		m->m_pkthdr.csum_data = 0;
388 		m->m_pkthdr.csum_flags = 0;
389 		if (vers == 4) {
390 			hlen = IP_VHL_HL(ip->ip_vhl) << 2;
391 			ip->ip_sum = 0;
392 			ip->ip_sum = in_cksum(m, hlen);
393 		}
394 	}
395 	if (filter_ref != 0) {
396 		mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFILT,
397 		    sizeof(ipfilter_t), M_NOWAIT, m);
398 		if (mtag == NULL) {
399 			error = ENOMEM;
400 			goto done;
401 		}
402 		*(ipfilter_t *)(mtag->m_tag_data) = filter_ref;
403 		m_tag_prepend(m, mtag);
404 	}
405 
406 	error = proto_inject(proto, data);
407 
408 done:
409 	return error;
410 }
411 
412 static errno_t
ipf_injectv4_out(mbuf_t data,ipfilter_t filter_ref,ipf_pktopts_t options)413 ipf_injectv4_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options)
414 {
415 	struct route ro;
416 	struct ip *ip;
417 	struct mbuf *m = (struct mbuf *)data;
418 	errno_t error = 0;
419 	struct m_tag *mtag = NULL;
420 	struct ip_moptions *imo = NULL;
421 	struct ip_out_args ipoa;
422 
423 	bzero(&ipoa, sizeof(ipoa));
424 	ipoa.ipoa_boundif = IFSCOPE_NONE;
425 	ipoa.ipoa_sotc = SO_TC_UNSPEC;
426 	ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
427 
428 	/* Make the IP header contiguous in the mbuf */
429 	if ((size_t)m->m_len < sizeof(struct ip)) {
430 		m = m_pullup(m, sizeof(struct ip));
431 		if (m == NULL) {
432 			return ENOMEM;
433 		}
434 	}
435 	ip = mtod(m, struct ip *);
436 
437 	if (filter_ref != 0) {
438 		mtag = m_tag_create(KERNEL_MODULE_TAG_ID,
439 		    KERNEL_TAG_TYPE_IPFILT, sizeof(ipfilter_t), M_NOWAIT, m);
440 		if (mtag == NULL) {
441 			m_freem(m);
442 			return ENOMEM;
443 		}
444 		*(ipfilter_t *)(mtag->m_tag_data) = filter_ref;
445 		m_tag_prepend(m, mtag);
446 	}
447 
448 	if (options != NULL && (options->ippo_flags & IPPOF_MCAST_OPTS) &&
449 	    (imo = ip_allocmoptions(Z_NOWAIT)) != NULL) {
450 		imo->imo_multicast_ifp = options->ippo_mcast_ifnet;
451 		imo->imo_multicast_ttl = options->ippo_mcast_ttl;
452 		imo->imo_multicast_loop = (u_char)options->ippo_mcast_loop;
453 	}
454 
455 	if (options != NULL) {
456 		if (options->ippo_flags & IPPOF_SELECT_SRCIF) {
457 			ipoa.ipoa_flags |= IPOAF_SELECT_SRCIF;
458 		}
459 		if (options->ippo_flags & IPPOF_BOUND_IF) {
460 			ipoa.ipoa_flags |= IPOAF_BOUND_IF;
461 			ipoa.ipoa_boundif = options->ippo_flags >>
462 			    IPPOF_SHIFT_IFSCOPE;
463 		}
464 		if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR) {
465 			ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
466 		}
467 		if (options->ippo_flags & IPPOF_BOUND_SRCADDR) {
468 			ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
469 		}
470 		if (options->ippo_flags & IPPOF_NO_IFF_EXPENSIVE) {
471 			ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
472 		}
473 		if (options->ippo_flags & IPPOF_NO_IFF_CONSTRAINED) {
474 			ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
475 		}
476 	}
477 
478 	bzero(&ro, sizeof(struct route));
479 
480 	/* Put ip_len and ip_off in host byte order, ip_output expects that */
481 
482 #if BYTE_ORDER != BIG_ENDIAN
483 	NTOHS(ip->ip_len);
484 	NTOHS(ip->ip_off);
485 #endif
486 
487 	/* Send; enforce source interface selection via IP_OUTARGS flag */
488 	error = ip_output(m, NULL, &ro,
489 	    IP_ALLOWBROADCAST | IP_RAWOUTPUT | IP_OUTARGS, imo, &ipoa);
490 
491 	/* Release the route */
492 	ROUTE_RELEASE(&ro);
493 
494 	if (imo != NULL) {
495 		IMO_REMREF(imo);
496 	}
497 
498 	return error;
499 }
500 
501 static errno_t
ipf_injectv6_out(mbuf_t data,ipfilter_t filter_ref,ipf_pktopts_t options)502 ipf_injectv6_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options)
503 {
504 	struct route_in6 ro;
505 	struct ip6_hdr *ip6;
506 	struct mbuf *m = (struct mbuf *)data;
507 	errno_t error = 0;
508 	struct m_tag *mtag = NULL;
509 	struct ip6_moptions *im6o = NULL;
510 	struct ip6_out_args ip6oa;
511 
512 	bzero(&ip6oa, sizeof(ip6oa));
513 	ip6oa.ip6oa_boundif = IFSCOPE_NONE;
514 	ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
515 	ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
516 
517 	/* Make the IP header contiguous in the mbuf */
518 	if ((size_t)m->m_len < sizeof(struct ip6_hdr)) {
519 		m = m_pullup(m, sizeof(struct ip6_hdr));
520 		if (m == NULL) {
521 			return ENOMEM;
522 		}
523 	}
524 	ip6 = mtod(m, struct ip6_hdr *);
525 
526 	if (filter_ref != 0) {
527 		mtag = m_tag_create(KERNEL_MODULE_TAG_ID,
528 		    KERNEL_TAG_TYPE_IPFILT, sizeof(ipfilter_t), M_NOWAIT, m);
529 		if (mtag == NULL) {
530 			m_freem(m);
531 			return ENOMEM;
532 		}
533 		*(ipfilter_t *)(mtag->m_tag_data) = filter_ref;
534 		m_tag_prepend(m, mtag);
535 	}
536 
537 	if (options != NULL && (options->ippo_flags & IPPOF_MCAST_OPTS) &&
538 	    (im6o = ip6_allocmoptions(Z_NOWAIT)) != NULL) {
539 		im6o->im6o_multicast_ifp = options->ippo_mcast_ifnet;
540 		im6o->im6o_multicast_hlim = options->ippo_mcast_ttl;
541 		im6o->im6o_multicast_loop = (u_char)options->ippo_mcast_loop;
542 	}
543 
544 	if (options != NULL) {
545 		if (options->ippo_flags & IPPOF_SELECT_SRCIF) {
546 			ip6oa.ip6oa_flags |= IP6OAF_SELECT_SRCIF;
547 		}
548 		if (options->ippo_flags & IPPOF_BOUND_IF) {
549 			ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
550 			ip6oa.ip6oa_boundif = options->ippo_flags >>
551 			    IPPOF_SHIFT_IFSCOPE;
552 		}
553 		if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR) {
554 			ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
555 		}
556 		if (options->ippo_flags & IPPOF_BOUND_SRCADDR) {
557 			ip6oa.ip6oa_flags |= IP6OAF_BOUND_SRCADDR;
558 		}
559 		if (options->ippo_flags & IPPOF_NO_IFF_EXPENSIVE) {
560 			ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE;
561 		}
562 		if (options->ippo_flags & IPPOF_NO_IFF_CONSTRAINED) {
563 			ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED;
564 		}
565 	}
566 
567 	bzero(&ro, sizeof(struct route_in6));
568 
569 	/*
570 	 * Send  mbuf and ifscope information. Check for correctness
571 	 * of ifscope information is done while searching for a route in
572 	 * ip6_output.
573 	 */
574 	ip6_output_setsrcifscope(m, IFSCOPE_UNKNOWN, NULL);
575 	ip6_output_setdstifscope(m, IFSCOPE_UNKNOWN, NULL);
576 	error = ip6_output(m, NULL, &ro, IPV6_OUTARGS, im6o, NULL, &ip6oa);
577 
578 	/* Release the route */
579 	ROUTE_RELEASE(&ro);
580 
581 	if (im6o != NULL) {
582 		IM6O_REMREF(im6o);
583 	}
584 
585 	return error;
586 }
587 
588 errno_t
ipf_inject_output(mbuf_t data,ipfilter_t filter_ref,ipf_pktopts_t options)589 ipf_inject_output(
590 	mbuf_t data,
591 	ipfilter_t filter_ref,
592 	ipf_pktopts_t options)
593 {
594 	struct mbuf     *m = (struct mbuf *)data;
595 	u_int8_t        vers;
596 	errno_t         error = 0;
597 
598 #if SKYWALK
599 	sk_protect_t protect = sk_async_transmit_protect();
600 #endif /* SKYWALK */
601 
602 	/* Make one byte of the header contiguous in the mbuf */
603 	if (m->m_len < 1) {
604 		m = m_pullup(m, 1);
605 		if (m == NULL) {
606 			goto done;
607 		}
608 	}
609 
610 	vers = (*(u_int8_t *)m_mtod(m)) >> 4;
611 	switch (vers) {
612 	case 4:
613 		error = ipf_injectv4_out(data, filter_ref, options);
614 		break;
615 	case 6:
616 		error = ipf_injectv6_out(data, filter_ref, options);
617 		break;
618 	default:
619 		m_freem(m);
620 		error = ENOTSUP;
621 		break;
622 	}
623 
624 done:
625 #if SKYWALK
626 	sk_async_transmit_unprotect(protect);
627 #endif /* SKYWALK */
628 
629 	return error;
630 }
631 
632 __private_extern__ ipfilter_t
ipf_get_inject_filter(struct mbuf * m)633 ipf_get_inject_filter(struct mbuf *m)
634 {
635 	ipfilter_t __single filter_ref = 0;
636 	struct m_tag *mtag;
637 
638 	mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFILT);
639 	if (mtag) {
640 		filter_ref = *(ipfilter_t *)(mtag->m_tag_data);
641 
642 		m_tag_delete(m, mtag);
643 	}
644 	return filter_ref;
645 }
646 
647 struct ipfilt_tag_container {
648 	struct m_tag    ipft_m_tag;
649 	ipfilter_t      ipft_filter_ref;
650 };
651 
652 static struct m_tag *
m_tag_kalloc_ipfilt(u_int32_t id,u_int16_t type,uint16_t len,int wait)653 m_tag_kalloc_ipfilt(u_int32_t id, u_int16_t type, uint16_t len, int wait)
654 {
655 	struct ipfilt_tag_container *tag_container;
656 	struct m_tag *tag = NULL;
657 
658 	assert3u(id, ==, KERNEL_MODULE_TAG_ID);
659 	assert3u(type, ==, KERNEL_TAG_TYPE_IPFILT);
660 	assert3u(len, ==, sizeof(ipfilter_t));
661 
662 	if (len != sizeof(ipfilter_t)) {
663 		return NULL;
664 	}
665 
666 	tag_container = kalloc_type(struct ipfilt_tag_container, wait | M_ZERO);
667 	if (tag_container != NULL) {
668 		tag =  &tag_container->ipft_m_tag;
669 
670 		assert3p(tag, ==, tag_container);
671 
672 		M_TAG_INIT(tag, id, type, len, &tag_container->ipft_filter_ref, NULL);
673 	}
674 
675 	return tag;
676 }
677 
678 static void
m_tag_kfree_ipfilt(struct m_tag * tag)679 m_tag_kfree_ipfilt(struct m_tag *tag)
680 {
681 	struct ipfilt_tag_container *tag_container = (struct ipfilt_tag_container *)tag;
682 
683 	assert3u(tag->m_tag_len, ==, sizeof(ipfilter_t));
684 
685 	kfree_type(struct ipfilt_tag_container, tag_container);
686 }
687 
688 void
ipfilter_register_m_tag(void)689 ipfilter_register_m_tag(void)
690 {
691 	int error;
692 
693 	error = m_register_internal_tag_type(KERNEL_TAG_TYPE_IPFILT, sizeof(ipfilter_t),
694 	    m_tag_kalloc_ipfilt, m_tag_kfree_ipfilt);
695 
696 	assert3u(error, ==, 0);
697 }
698 
699 #if SKYWALK
700 bool
net_check_compatible_ipf(void)701 net_check_compatible_ipf(void)
702 {
703 	if (net_api_stats.nas_ipf_add_count > net_api_stats.nas_ipf_add_os_count) {
704 		return false;
705 	}
706 	return true;
707 }
708 #endif /* SKYWALK */
709