xref: /xnu-10063.121.3/bsd/netinet/kpi_ipfilter.c (revision 2c2f96dc2b9a4408a43d3150ae9c105355ca3daa)
1 /*
2  * Copyright (c) 2004-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/param.h>  /* for definition of NULL */
30 #include <sys/errno.h>
31 #include <sys/malloc.h>
32 #include <sys/socket.h>
33 #include <sys/mbuf.h>
34 #include <sys/systm.h>
35 #include <libkern/OSAtomic.h>
36 
37 #include <machine/endian.h>
38 
39 #define _IP_VHL
40 #include <net/if_var.h>
41 #include <net/route.h>
42 #include <net/kpi_protocol.h>
43 #include <net/net_api_stats.h>
44 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
45 #include <skywalk/lib/net_filter_event.h>
46 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
47 
48 #include <netinet/in_systm.h>
49 #include <netinet/in.h>
50 #include <netinet/in_var.h>
51 #include <netinet6/in6_var.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet/ip_var.h>
55 #include <netinet6/ip6_var.h>
56 #include <netinet/kpi_ipfilter_var.h>
57 
58 #include <stdbool.h>
59 
60 #if SKYWALK
61 #include <skywalk/core/skywalk_var.h>
62 #endif /* SKYWALK */
63 
64 /*
65  * kipf_lock and kipf_ref protect the linkage of the list of IP filters
66  * An IP filter can be removed only when kipf_ref is zero
67  * If an IP filter cannot be removed because kipf_ref is not null, then
68  * the IP filter is marjed and kipf_delayed_remove is set so that when
69  * kipf_ref eventually goes down to zero, the IP filter is removed
70  */
71 static LCK_GRP_DECLARE(kipf_lock_grp, "IP Filter");
72 static LCK_MTX_DECLARE(kipf_lock, &kipf_lock_grp);
73 static u_int32_t kipf_ref = 0;
74 static u_int32_t kipf_delayed_remove = 0;
75 u_int32_t kipf_count = 0;
76 
77 __private_extern__ struct ipfilter_list ipv4_filters = TAILQ_HEAD_INITIALIZER(ipv4_filters);
78 __private_extern__ struct ipfilter_list ipv6_filters = TAILQ_HEAD_INITIALIZER(ipv6_filters);
79 __private_extern__ struct ipfilter_list tbr_filters = TAILQ_HEAD_INITIALIZER(tbr_filters);
80 
81 #undef ipf_addv4
82 #undef ipf_addv6
83 extern errno_t ipf_addv4(const struct ipf_filter *filter,
84     ipfilter_t *filter_ref);
85 extern errno_t ipf_addv6(const struct ipf_filter *filter,
86     ipfilter_t *filter_ref);
87 
88 static errno_t ipf_add(const struct ipf_filter *filter,
89     ipfilter_t *filter_ref, struct ipfilter_list *head, bool is_internal);
90 
91 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
92 static bool net_check_compatible_ipf(void);
93 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
94 
95 __private_extern__ void
ipf_ref(void)96 ipf_ref(void)
97 {
98 	lck_mtx_lock(&kipf_lock);
99 	if (os_inc_overflow(&kipf_ref)) {
100 		panic("kipf_ref overflow");
101 	}
102 	lck_mtx_unlock(&kipf_lock);
103 }
104 
105 __private_extern__ void
ipf_unref(void)106 ipf_unref(void)
107 {
108 	lck_mtx_lock(&kipf_lock);
109 
110 	if (os_dec_overflow(&kipf_ref)) {
111 		panic("kipf_ref underflow");
112 	}
113 
114 	if (kipf_ref == 0 && kipf_delayed_remove != 0) {
115 		struct ipfilter *filter;
116 
117 		while ((filter = TAILQ_FIRST(&tbr_filters))) {
118 			VERIFY(OSDecrementAtomic64(&net_api_stats.nas_ipf_add_count) > 0);
119 			if (filter->ipf_flags & IPFF_INTERNAL) {
120 				VERIFY(OSDecrementAtomic64(&net_api_stats.nas_ipf_add_os_count) > 0);
121 			}
122 
123 			ipf_detach_func ipf_detach = filter->ipf_filter.ipf_detach;
124 			void* cookie = filter->ipf_filter.cookie;
125 
126 			TAILQ_REMOVE(filter->ipf_head, filter, ipf_link);
127 			TAILQ_REMOVE(&tbr_filters, filter, ipf_tbr);
128 			kipf_delayed_remove--;
129 
130 			if (ipf_detach) {
131 				lck_mtx_unlock(&kipf_lock);
132 				ipf_detach(cookie);
133 				lck_mtx_lock(&kipf_lock);
134 				/* In case some filter got to run while we released the lock */
135 				if (kipf_ref != 0) {
136 					break;
137 				}
138 			}
139 		}
140 	}
141 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
142 	net_filter_event_mark(NET_FILTER_EVENT_IP,
143 	    net_check_compatible_ipf());
144 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
145 	lck_mtx_unlock(&kipf_lock);
146 }
147 
148 static errno_t
ipf_add(const struct ipf_filter * filter,ipfilter_t * filter_ref,struct ipfilter_list * head,bool is_internal)149 ipf_add(
150 	const struct ipf_filter *filter,
151 	ipfilter_t *filter_ref,
152 	struct ipfilter_list *head,
153 	bool is_internal)
154 {
155 	struct ipfilter *new_filter;
156 	if (filter->name == NULL || (filter->ipf_input == NULL && filter->ipf_output == NULL)) {
157 		return EINVAL;
158 	}
159 
160 	new_filter = kalloc_type(struct ipfilter, Z_WAITOK | Z_NOFAIL);
161 
162 	lck_mtx_lock(&kipf_lock);
163 	new_filter->ipf_filter = *filter;
164 	new_filter->ipf_head = head;
165 
166 	TAILQ_INSERT_HEAD(head, new_filter, ipf_link);
167 
168 	OSIncrementAtomic64(&net_api_stats.nas_ipf_add_count);
169 	INC_ATOMIC_INT64_LIM(net_api_stats.nas_ipf_add_total);
170 	if (is_internal) {
171 		new_filter->ipf_flags = IPFF_INTERNAL;
172 		OSIncrementAtomic64(&net_api_stats.nas_ipf_add_os_count);
173 		INC_ATOMIC_INT64_LIM(net_api_stats.nas_ipf_add_os_total);
174 	}
175 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
176 	net_filter_event_mark(NET_FILTER_EVENT_IP,
177 	    net_check_compatible_ipf());
178 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
179 
180 	lck_mtx_unlock(&kipf_lock);
181 
182 	*filter_ref = (ipfilter_t)new_filter;
183 
184 	/* This will force TCP to re-evaluate its use of TSO */
185 	OSAddAtomic(1, &kipf_count);
186 	routegenid_update();
187 
188 	return 0;
189 }
190 
191 errno_t
ipf_addv4_internal(const struct ipf_filter * filter,ipfilter_t * filter_ref)192 ipf_addv4_internal(
193 	const struct ipf_filter *filter,
194 	ipfilter_t *filter_ref)
195 {
196 	return ipf_add(filter, filter_ref, &ipv4_filters, true);
197 }
198 
199 errno_t
ipf_addv4(const struct ipf_filter * filter,ipfilter_t * filter_ref)200 ipf_addv4(
201 	const struct ipf_filter *filter,
202 	ipfilter_t *filter_ref)
203 {
204 	return ipf_add(filter, filter_ref, &ipv4_filters, false);
205 }
206 
207 errno_t
ipf_addv6_internal(const struct ipf_filter * filter,ipfilter_t * filter_ref)208 ipf_addv6_internal(
209 	const struct ipf_filter *filter,
210 	ipfilter_t *filter_ref)
211 {
212 	return ipf_add(filter, filter_ref, &ipv6_filters, true);
213 }
214 
215 errno_t
ipf_addv6(const struct ipf_filter * filter,ipfilter_t * filter_ref)216 ipf_addv6(
217 	const struct ipf_filter *filter,
218 	ipfilter_t *filter_ref)
219 {
220 	return ipf_add(filter, filter_ref, &ipv6_filters, false);
221 }
222 
223 static errno_t
ipf_input_detached(void * cookie,mbuf_t * data,int offset,u_int8_t protocol)224 ipf_input_detached(void *cookie, mbuf_t *data, int offset, u_int8_t protocol)
225 {
226 #pragma unused(cookie, data, offset, protocol)
227 
228 #if DEBUG
229 	printf("ipf_input_detached\n");
230 #endif /* DEBUG */
231 
232 	return 0;
233 }
234 
235 static errno_t
ipf_output_detached(void * cookie,mbuf_t * data,ipf_pktopts_t options)236 ipf_output_detached(void *cookie, mbuf_t *data, ipf_pktopts_t options)
237 {
238 #pragma unused(cookie, data, options)
239 
240 #if DEBUG
241 	printf("ipf_output_detached\n");
242 #endif /* DEBUG */
243 
244 	return 0;
245 }
246 
247 errno_t
ipf_remove(ipfilter_t filter_ref)248 ipf_remove(
249 	ipfilter_t filter_ref)
250 {
251 	struct ipfilter *match = (struct ipfilter *)filter_ref;
252 	struct ipfilter_list *head;
253 
254 	if (match == 0 || (match->ipf_head != &ipv4_filters && match->ipf_head != &ipv6_filters)) {
255 		return EINVAL;
256 	}
257 
258 	head = match->ipf_head;
259 
260 	lck_mtx_lock(&kipf_lock);
261 	TAILQ_FOREACH(match, head, ipf_link) {
262 		if (match == (struct ipfilter *)filter_ref) {
263 			ipf_detach_func ipf_detach = match->ipf_filter.ipf_detach;
264 			void* cookie = match->ipf_filter.cookie;
265 
266 			/*
267 			 * Cannot detach when they are filters running
268 			 */
269 			if (kipf_ref) {
270 				kipf_delayed_remove++;
271 				TAILQ_INSERT_TAIL(&tbr_filters, match, ipf_tbr);
272 				match->ipf_filter.ipf_input = ipf_input_detached;
273 				match->ipf_filter.ipf_output = ipf_output_detached;
274 				lck_mtx_unlock(&kipf_lock);
275 			} else {
276 				VERIFY(OSDecrementAtomic64(&net_api_stats.nas_ipf_add_count) > 0);
277 				if (match->ipf_flags & IPFF_INTERNAL) {
278 					VERIFY(OSDecrementAtomic64(&net_api_stats.nas_ipf_add_os_count) > 0);
279 				}
280 
281 				TAILQ_REMOVE(head, match, ipf_link);
282 				lck_mtx_unlock(&kipf_lock);
283 
284 				if (ipf_detach) {
285 					ipf_detach(cookie);
286 				}
287 				kfree_type(struct ipfilter, match);
288 
289 				/* This will force TCP to re-evaluate its use of TSO */
290 				OSAddAtomic(-1, &kipf_count);
291 				routegenid_update();
292 			}
293 			return 0;
294 		}
295 	}
296 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
297 	net_filter_event_mark(NET_FILTER_EVENT_IP,
298 	    net_check_compatible_ipf());
299 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
300 
301 	lck_mtx_unlock(&kipf_lock);
302 
303 	return ENOENT;
304 }
305 
306 int log_for_en1 = 0;
307 
308 errno_t
ipf_inject_input(mbuf_t data,ipfilter_t filter_ref)309 ipf_inject_input(
310 	mbuf_t data,
311 	ipfilter_t filter_ref)
312 {
313 	struct mbuf *m = (struct mbuf *)data;
314 	struct m_tag *mtag = 0;
315 	struct ip *ip = mtod(m, struct ip *);
316 	struct ip6_hdr *ip6;
317 	u_int8_t        vers;
318 	int hlen;
319 	errno_t error = 0;
320 	protocol_family_t proto;
321 	struct in_ifaddr *ia = NULL;
322 	struct in_addr *pkt_dst = NULL;
323 	struct in6_ifaddr *ia6 = NULL;
324 	struct sockaddr_in6 pkt_dst6;
325 
326 	vers = IP_VHL_V(ip->ip_vhl);
327 
328 	switch (vers) {
329 	case 4:
330 		proto = PF_INET;
331 		break;
332 	case 6:
333 		proto = PF_INET6;
334 		break;
335 	default:
336 		error = ENOTSUP;
337 		goto done;
338 	}
339 
340 	if (filter_ref == 0 && m->m_pkthdr.rcvif == 0) {
341 		/*
342 		 * Search for interface with the local address
343 		 */
344 		switch (proto) {
345 		case PF_INET:
346 			pkt_dst = &ip->ip_dst;
347 			lck_rw_lock_shared(&in_ifaddr_rwlock);
348 			TAILQ_FOREACH(ia, INADDR_HASH(pkt_dst->s_addr), ia_hash) {
349 				if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst->s_addr) {
350 					m->m_pkthdr.rcvif = ia->ia_ifp;
351 					break;
352 				}
353 			}
354 			lck_rw_done(&in_ifaddr_rwlock);
355 			break;
356 
357 		case PF_INET6:
358 			ip6 = mtod(m, struct ip6_hdr *);
359 			pkt_dst6.sin6_addr = ip6->ip6_dst;
360 			lck_rw_lock_shared(&in6_ifaddr_rwlock);
361 			TAILQ_FOREACH(ia6, IN6ADDR_HASH(&pkt_dst6.sin6_addr), ia6_hash) {
362 				if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, &pkt_dst6.sin6_addr)) {
363 					m->m_pkthdr.rcvif = ia6->ia_ifp;
364 					break;
365 				}
366 			}
367 			lck_rw_done(&in6_ifaddr_rwlock);
368 			break;
369 
370 		default:
371 			break;
372 		}
373 
374 		/*
375 		 * If none found, fallback to loopback
376 		 */
377 		if (m->m_pkthdr.rcvif == NULL) {
378 			m->m_pkthdr.rcvif = lo_ifp;
379 		}
380 
381 		m->m_pkthdr.csum_data = 0;
382 		m->m_pkthdr.csum_flags = 0;
383 		if (vers == 4) {
384 			hlen = IP_VHL_HL(ip->ip_vhl) << 2;
385 			ip->ip_sum = 0;
386 			ip->ip_sum = in_cksum(m, hlen);
387 		}
388 	}
389 	if (filter_ref != 0) {
390 		mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFILT,
391 		    sizeof(ipfilter_t), M_NOWAIT, m);
392 		if (mtag == NULL) {
393 			error = ENOMEM;
394 			goto done;
395 		}
396 		*(ipfilter_t *)(mtag->m_tag_data) = filter_ref;
397 		m_tag_prepend(m, mtag);
398 	}
399 
400 	error = proto_inject(proto, data);
401 
402 done:
403 	return error;
404 }
405 
406 static errno_t
ipf_injectv4_out(mbuf_t data,ipfilter_t filter_ref,ipf_pktopts_t options)407 ipf_injectv4_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options)
408 {
409 	struct route ro;
410 	struct ip *ip;
411 	struct mbuf *m = (struct mbuf *)data;
412 	errno_t error = 0;
413 	struct m_tag *mtag = NULL;
414 	struct ip_moptions *imo = NULL;
415 	struct ip_out_args ipoa;
416 
417 	bzero(&ipoa, sizeof(ipoa));
418 	ipoa.ipoa_boundif = IFSCOPE_NONE;
419 	ipoa.ipoa_sotc = SO_TC_UNSPEC;
420 	ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
421 
422 	/* Make the IP header contiguous in the mbuf */
423 	if ((size_t)m->m_len < sizeof(struct ip)) {
424 		m = m_pullup(m, sizeof(struct ip));
425 		if (m == NULL) {
426 			return ENOMEM;
427 		}
428 	}
429 	ip = (struct ip *)m_mtod(m);
430 
431 	if (filter_ref != 0) {
432 		mtag = m_tag_create(KERNEL_MODULE_TAG_ID,
433 		    KERNEL_TAG_TYPE_IPFILT, sizeof(ipfilter_t), M_NOWAIT, m);
434 		if (mtag == NULL) {
435 			m_freem(m);
436 			return ENOMEM;
437 		}
438 		*(ipfilter_t *)(mtag->m_tag_data) = filter_ref;
439 		m_tag_prepend(m, mtag);
440 	}
441 
442 	if (options != NULL && (options->ippo_flags & IPPOF_MCAST_OPTS) &&
443 	    (imo = ip_allocmoptions(Z_NOWAIT)) != NULL) {
444 		imo->imo_multicast_ifp = options->ippo_mcast_ifnet;
445 		imo->imo_multicast_ttl = options->ippo_mcast_ttl;
446 		imo->imo_multicast_loop = (u_char)options->ippo_mcast_loop;
447 	}
448 
449 	if (options != NULL) {
450 		if (options->ippo_flags & IPPOF_SELECT_SRCIF) {
451 			ipoa.ipoa_flags |= IPOAF_SELECT_SRCIF;
452 		}
453 		if (options->ippo_flags & IPPOF_BOUND_IF) {
454 			ipoa.ipoa_flags |= IPOAF_BOUND_IF;
455 			ipoa.ipoa_boundif = options->ippo_flags >>
456 			    IPPOF_SHIFT_IFSCOPE;
457 		}
458 		if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR) {
459 			ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
460 		}
461 		if (options->ippo_flags & IPPOF_BOUND_SRCADDR) {
462 			ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
463 		}
464 		if (options->ippo_flags & IPPOF_NO_IFF_EXPENSIVE) {
465 			ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
466 		}
467 		if (options->ippo_flags & IPPOF_NO_IFF_CONSTRAINED) {
468 			ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
469 		}
470 	}
471 
472 	bzero(&ro, sizeof(struct route));
473 
474 	/* Put ip_len and ip_off in host byte order, ip_output expects that */
475 
476 #if BYTE_ORDER != BIG_ENDIAN
477 	NTOHS(ip->ip_len);
478 	NTOHS(ip->ip_off);
479 #endif
480 
481 	/* Send; enforce source interface selection via IP_OUTARGS flag */
482 	error = ip_output(m, NULL, &ro,
483 	    IP_ALLOWBROADCAST | IP_RAWOUTPUT | IP_OUTARGS, imo, &ipoa);
484 
485 	/* Release the route */
486 	ROUTE_RELEASE(&ro);
487 
488 	if (imo != NULL) {
489 		IMO_REMREF(imo);
490 	}
491 
492 	return error;
493 }
494 
495 static errno_t
ipf_injectv6_out(mbuf_t data,ipfilter_t filter_ref,ipf_pktopts_t options)496 ipf_injectv6_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options)
497 {
498 	struct route_in6 ro;
499 	struct ip6_hdr *ip6;
500 	struct mbuf *m = (struct mbuf *)data;
501 	errno_t error = 0;
502 	struct m_tag *mtag = NULL;
503 	struct ip6_moptions *im6o = NULL;
504 	struct ip6_out_args ip6oa;
505 
506 	bzero(&ip6oa, sizeof(ip6oa));
507 	ip6oa.ip6oa_boundif = IFSCOPE_NONE;
508 	ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
509 	ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
510 
511 	/* Make the IP header contiguous in the mbuf */
512 	if ((size_t)m->m_len < sizeof(struct ip6_hdr)) {
513 		m = m_pullup(m, sizeof(struct ip6_hdr));
514 		if (m == NULL) {
515 			return ENOMEM;
516 		}
517 	}
518 	ip6 = (struct ip6_hdr *)m_mtod(m);
519 
520 	if (filter_ref != 0) {
521 		mtag = m_tag_create(KERNEL_MODULE_TAG_ID,
522 		    KERNEL_TAG_TYPE_IPFILT, sizeof(ipfilter_t), M_NOWAIT, m);
523 		if (mtag == NULL) {
524 			m_freem(m);
525 			return ENOMEM;
526 		}
527 		*(ipfilter_t *)(mtag->m_tag_data) = filter_ref;
528 		m_tag_prepend(m, mtag);
529 	}
530 
531 	if (options != NULL && (options->ippo_flags & IPPOF_MCAST_OPTS) &&
532 	    (im6o = ip6_allocmoptions(Z_NOWAIT)) != NULL) {
533 		im6o->im6o_multicast_ifp = options->ippo_mcast_ifnet;
534 		im6o->im6o_multicast_hlim = options->ippo_mcast_ttl;
535 		im6o->im6o_multicast_loop = (u_char)options->ippo_mcast_loop;
536 	}
537 
538 	if (options != NULL) {
539 		if (options->ippo_flags & IPPOF_SELECT_SRCIF) {
540 			ip6oa.ip6oa_flags |= IP6OAF_SELECT_SRCIF;
541 		}
542 		if (options->ippo_flags & IPPOF_BOUND_IF) {
543 			ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
544 			ip6oa.ip6oa_boundif = options->ippo_flags >>
545 			    IPPOF_SHIFT_IFSCOPE;
546 		}
547 		if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR) {
548 			ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
549 		}
550 		if (options->ippo_flags & IPPOF_BOUND_SRCADDR) {
551 			ip6oa.ip6oa_flags |= IP6OAF_BOUND_SRCADDR;
552 		}
553 		if (options->ippo_flags & IPPOF_NO_IFF_EXPENSIVE) {
554 			ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE;
555 		}
556 		if (options->ippo_flags & IPPOF_NO_IFF_CONSTRAINED) {
557 			ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED;
558 		}
559 	}
560 
561 	bzero(&ro, sizeof(struct route_in6));
562 
563 	/*
564 	 * Send  mbuf and ifscope information. Check for correctness
565 	 * of ifscope information is done while searching for a route in
566 	 * ip6_output.
567 	 */
568 	ip6_output_setsrcifscope(m, IFSCOPE_UNKNOWN, NULL);
569 	ip6_output_setdstifscope(m, IFSCOPE_UNKNOWN, NULL);
570 	error = ip6_output(m, NULL, &ro, IPV6_OUTARGS, im6o, NULL, &ip6oa);
571 
572 	/* Release the route */
573 	ROUTE_RELEASE(&ro);
574 
575 	if (im6o != NULL) {
576 		IM6O_REMREF(im6o);
577 	}
578 
579 	return error;
580 }
581 
582 errno_t
ipf_inject_output(mbuf_t data,ipfilter_t filter_ref,ipf_pktopts_t options)583 ipf_inject_output(
584 	mbuf_t data,
585 	ipfilter_t filter_ref,
586 	ipf_pktopts_t options)
587 {
588 	struct mbuf     *m = (struct mbuf *)data;
589 	u_int8_t        vers;
590 	errno_t         error = 0;
591 
592 #if SKYWALK
593 	sk_protect_t protect = sk_async_transmit_protect();
594 #endif /* SKYWALK */
595 
596 	/* Make one byte of the header contiguous in the mbuf */
597 	if (m->m_len < 1) {
598 		m = m_pullup(m, 1);
599 		if (m == NULL) {
600 			goto done;
601 		}
602 	}
603 
604 	vers = (*(u_int8_t *)m_mtod(m)) >> 4;
605 	switch (vers) {
606 	case 4:
607 		error = ipf_injectv4_out(data, filter_ref, options);
608 		break;
609 	case 6:
610 		error = ipf_injectv6_out(data, filter_ref, options);
611 		break;
612 	default:
613 		m_freem(m);
614 		error = ENOTSUP;
615 		break;
616 	}
617 
618 done:
619 #if SKYWALK
620 	sk_async_transmit_unprotect(protect);
621 #endif /* SKYWALK */
622 
623 	return error;
624 }
625 
626 __private_extern__ ipfilter_t
ipf_get_inject_filter(struct mbuf * m)627 ipf_get_inject_filter(struct mbuf *m)
628 {
629 	ipfilter_t filter_ref = 0;
630 	struct m_tag *mtag;
631 
632 	mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFILT);
633 	if (mtag) {
634 		filter_ref = *(ipfilter_t *)(mtag->m_tag_data);;
635 
636 		m_tag_delete(m, mtag);
637 	}
638 	return filter_ref;
639 }
640 
641 struct ipfilt_tag_container {
642 	struct m_tag    ipft_m_tag;
643 	ipfilter_t      ipft_filter_ref;
644 };
645 
646 static struct m_tag *
m_tag_kalloc_ipfilt(u_int32_t id,u_int16_t type,uint16_t len,int wait)647 m_tag_kalloc_ipfilt(u_int32_t id, u_int16_t type, uint16_t len, int wait)
648 {
649 	struct ipfilt_tag_container *tag_container;
650 	struct m_tag *tag = NULL;
651 
652 	assert3u(id, ==, KERNEL_MODULE_TAG_ID);
653 	assert3u(type, ==, KERNEL_TAG_TYPE_IPFILT);
654 	assert3u(len, ==, sizeof(ipfilter_t));
655 
656 	if (len != sizeof(ipfilter_t)) {
657 		return NULL;
658 	}
659 
660 	tag_container = kalloc_type(struct ipfilt_tag_container, wait | M_ZERO);
661 	if (tag_container != NULL) {
662 		tag =  &tag_container->ipft_m_tag;
663 
664 		assert3p(tag, ==, tag_container);
665 
666 		M_TAG_INIT(tag, id, type, len, &tag_container->ipft_filter_ref, NULL);
667 	}
668 
669 	return tag;
670 }
671 
672 static void
m_tag_kfree_ipfilt(struct m_tag * tag)673 m_tag_kfree_ipfilt(struct m_tag *tag)
674 {
675 	struct ipfilt_tag_container *tag_container = (struct ipfilt_tag_container *)tag;
676 
677 	assert3u(tag->m_tag_len, ==, sizeof(ipfilter_t));
678 
679 	kfree_type(struct ipfilt_tag_container, tag_container);
680 }
681 
682 void
ipfilter_register_m_tag(void)683 ipfilter_register_m_tag(void)
684 {
685 	int error;
686 
687 	error = m_register_internal_tag_type(KERNEL_TAG_TYPE_IPFILT, sizeof(ipfilter_t),
688 	    m_tag_kalloc_ipfilt, m_tag_kfree_ipfilt);
689 
690 	assert3u(error, ==, 0);
691 }
692 
693 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
694 bool
net_check_compatible_ipf(void)695 net_check_compatible_ipf(void)
696 {
697 	if (net_api_stats.nas_ipf_add_count > net_api_stats.nas_ipf_add_os_count) {
698 		return false;
699 	}
700 	return true;
701 }
702 #endif /* SKYWALK && XNU_TARGET_OS_OSX */
703