xref: /xnu-11417.121.6/bsd/netinet/ip_output.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650) !
1 /*
2  * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)ip_output.c	8.3 (Berkeley) 1/21/94
61  */
62 /*
63  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64  * support for mandatory and extensible security protections.  This notice
65  * is included in support of clause 2.2 (b) of the Apple Public License,
66  * Version 2.0.
67  */
68 
69 #define _IP_VHL
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/socketvar.h>
79 #include <kern/locks.h>
80 #include <sys/sysctl.h>
81 #include <sys/mcache.h>
82 #include <sys/kdebug.h>
83 
84 #include <machine/endian.h>
85 #include <pexpert/pexpert.h>
86 #include <mach/sdt.h>
87 
88 #include <libkern/OSAtomic.h>
89 #include <libkern/OSByteOrder.h>
90 
91 #include <net/if.h>
92 #include <net/if_dl.h>
93 #include <net/if_types.h>
94 #include <net/route.h>
95 #include <net/ntstat.h>
96 #include <net/net_osdep.h>
97 #include <net/dlil.h>
98 #include <net/net_perf.h>
99 #include <net/droptap.h>
100 
101 #include <netinet/in.h>
102 #include <netinet/in_systm.h>
103 #include <netinet/ip.h>
104 #include <netinet/in_pcb.h>
105 #include <netinet/in_var.h>
106 #include <netinet/ip_var.h>
107 #include <netinet/kpi_ipfilter_var.h>
108 #include <netinet/in_tclass.h>
109 #include <netinet/udp.h>
110 
111 #include <netinet6/nd6.h>
112 
113 #define DBG_LAYER_BEG           NETDBG_CODE(DBG_NETIP, 1)
114 #define DBG_LAYER_END           NETDBG_CODE(DBG_NETIP, 3)
115 #define DBG_FNC_IP_OUTPUT       NETDBG_CODE(DBG_NETIP, (1 << 8) | 1)
116 #define DBG_FNC_IPSEC4_OUTPUT   NETDBG_CODE(DBG_NETIP, (2 << 8) | 1)
117 
118 #if IPSEC
119 #include <netinet6/ipsec.h>
120 #include <netkey/key.h>
121 #if IPSEC_DEBUG
122 #include <netkey/key_debug.h>
123 #else
124 #define KEYDEBUG(lev, arg)
125 #endif
126 #endif /* IPSEC */
127 
128 #if NECP
129 #include <net/necp.h>
130 #endif /* NECP */
131 
132 
133 #if DUMMYNET
134 #include <netinet/ip_dummynet.h>
135 #endif
136 
137 #if PF
138 #include <net/pfvar.h>
139 #endif /* PF */
140 
141 #include <net/sockaddr_utils.h>
142 
143 u_short ip_id;
144 
145 static int sysctl_reset_ip_output_stats SYSCTL_HANDLER_ARGS;
146 static int sysctl_ip_output_measure_bins SYSCTL_HANDLER_ARGS;
147 static int sysctl_ip_output_getperf SYSCTL_HANDLER_ARGS;
148 static void ip_out_cksum_stats(int, u_int32_t);
149 static struct mbuf *ip_insertoptions(struct mbuf *, struct mbuf *, int *);
150 static int ip_optcopy(struct ip *__indexable, struct ip *__indexable);
151 static int ip_pcbopts(int, struct mbuf **, struct mbuf *);
152 static void imo_trace(struct ip_moptions *, int);
153 static void ip_mloopback(struct ifnet *, struct ifnet *, struct mbuf *,
154     struct sockaddr_in *, int);
155 static struct ifaddr *in_selectsrcif(struct ip *, struct route *, unsigned int);
156 
157 extern struct ip_linklocal_stat ip_linklocal_stat;
158 extern unsigned int log_restricted;
159 
160 /* temporary: for testing */
161 #if IPSEC
162 extern int ipsec_bypass;
163 #endif
164 
165 static int force_ipsum = 0;
166 static int ip_maxchainsent = 0;
167 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxchainsent,
168     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_maxchainsent, 0,
169     "use dlil_output_list");
170 
171 SYSCTL_INT(_net_inet_ip, OID_AUTO, force_ipsum,
172     CTLFLAG_RW | CTLFLAG_LOCKED, &force_ipsum, 0,
173     "force IP checksum");
174 #if DEBUG
175 static int forge_ce = 0;
176 SYSCTL_INT(_net_inet_ip, OID_AUTO, forge_ce,
177     CTLFLAG_RW | CTLFLAG_LOCKED, &forge_ce, 0,
178     "Forge ECN CE");
179 #endif /* DEBUG */
180 
181 static int ip_select_srcif_debug = 0;
182 SYSCTL_INT(_net_inet_ip, OID_AUTO, select_srcif_debug,
183     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_select_srcif_debug, 0,
184     "log source interface selection debug info");
185 
186 static int ip_output_measure = 0;
187 SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf,
188     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
189     &ip_output_measure, 0, sysctl_reset_ip_output_stats, "I",
190     "Do time measurement");
191 
192 static uint64_t ip_output_measure_bins = 0;
193 SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf_bins,
194     CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_output_measure_bins, 0,
195     sysctl_ip_output_measure_bins, "I",
196     "bins for chaining performance data histogram");
197 
198 static net_perf_t net_perf;
199 SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf_data,
200     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
201     0, 0, sysctl_ip_output_getperf, "S,net_perf",
202     "IP output performance data (struct net_perf, net/net_perf.h)");
203 
204 __private_extern__ int rfc6864 = 1;
205 SYSCTL_INT(_net_inet_ip, OID_AUTO, rfc6864, CTLFLAG_RW | CTLFLAG_LOCKED,
206     &rfc6864, 0, "updated ip id field behavior");
207 
208 #define IMO_TRACE_HIST_SIZE     32      /* size of trace history */
209 
210 /* For gdb */
211 __private_extern__ unsigned int imo_trace_hist_size = IMO_TRACE_HIST_SIZE;
212 
213 struct ip_moptions_dbg {
214 	struct ip_moptions      imo;                    /* ip_moptions */
215 	u_int16_t               imo_refhold_cnt;        /* # of IMO_ADDREF */
216 	u_int16_t               imo_refrele_cnt;        /* # of IMO_REMREF */
217 	/*
218 	 * Alloc and free callers.
219 	 */
220 	ctrace_t                imo_alloc;
221 	ctrace_t                imo_free;
222 	/*
223 	 * Circular lists of IMO_ADDREF and IMO_REMREF callers.
224 	 */
225 	ctrace_t                imo_refhold[IMO_TRACE_HIST_SIZE];
226 	ctrace_t                imo_refrele[IMO_TRACE_HIST_SIZE];
227 };
228 
229 #if DEBUG
230 static unsigned int imo_debug = 1;      /* debugging (enabled) */
231 #else
232 static unsigned int imo_debug;          /* debugging (disabled) */
233 #endif /* !DEBUG */
234 
235 ZONE_DECLARE(imo_zone, struct ip_moptions);
236 #define IMO_ZONE_NAME  "ip_moptions"   /* zone name */
237 zone_t imo_zone;                       /* zone for ip_moptions */
238 
239 #if PF
240 __attribute__((noinline))
241 static int
ip_output_pf_dn_hook(struct ifnet * ifp,struct mbuf ** mppn,struct mbuf ** mp,struct pf_rule * dn_pf_rule,struct route * ro,struct sockaddr_in * dst,int flags,struct ip_out_args * ipoa)242 ip_output_pf_dn_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
243     struct pf_rule *dn_pf_rule, struct route *ro, struct sockaddr_in *dst, int flags,
244     struct ip_out_args *ipoa)
245 {
246 	int rc;
247 	struct ip_fw_args args = {};
248 
249 	args.fwa_pf_rule = dn_pf_rule;
250 	args.fwa_oif = ifp;
251 	args.fwa_ro = ro;
252 	args.fwa_dst = dst;
253 	args.fwa_oflags = flags;
254 	if (flags & IP_OUTARGS) {
255 		args.fwa_ipoa = ipoa;
256 	}
257 	rc = pf_af_hook(ifp, mppn, mp, AF_INET, FALSE, &args);
258 
259 	return rc;
260 }
261 
262 #endif /* PF */
263 
264 
265 /*
266  * IP output.  The packet in mbuf chain m contains a skeletal IP
267  * header (with len, off, ttl, proto, tos, src, dst).
268  * The mbuf chain containing the packet will be freed.
269  * The mbuf opt, if present, will not be freed.
270  */
271 int
ip_output(struct mbuf * m0,struct mbuf * opt,struct route * ro,int flags,struct ip_moptions * imo,struct ip_out_args * ipoa)272 ip_output(struct mbuf *m0, struct mbuf *opt, struct route *ro, int flags,
273     struct ip_moptions *imo, struct ip_out_args *ipoa)
274 {
275 	return ip_output_list(m0, 0, opt, ro, flags, imo, ipoa);
276 }
277 
278 /*
279  * IP output.  The packet in mbuf chain m contains a skeletal IP
280  * header (with len, off, ttl, proto, tos, src, dst).
281  * The mbuf chain containing the packet will be freed.
282  * The mbuf opt, if present, will not be freed.
283  *
284  * Route ro MUST be non-NULL; if ro->ro_rt is valid, route lookup would be
285  * skipped and ro->ro_rt would be used.  Otherwise the result of route
286  * lookup is stored in ro->ro_rt.
287  *
288  * In the IP forwarding case, the packet will arrive with options already
289  * inserted, so must have a NULL opt pointer.
290  */
291 int
ip_output_list(struct mbuf * m0,int packetchain,struct mbuf * opt,struct route * ro,int flags,struct ip_moptions * imo,struct ip_out_args * ipoa)292 ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt,
293     struct route *ro, int flags, struct ip_moptions *imo,
294     struct ip_out_args *ipoa)
295 {
296 	struct ip *ip;
297 	struct ifnet *ifp = NULL;               /* not refcnt'd */
298 	mbuf_ref_t m = m0, prevnxt = NULL, *mppn = &prevnxt;
299 	int hlen = sizeof(struct ip);
300 	int len = 0, error = 0;
301 	struct sockaddr_in *__single dst = NULL;
302 	struct in_ifaddr *__single ia = NULL, *__single src_ia = NULL;
303 	struct in_addr pkt_dst;
304 	struct ipf_pktopts *__single ippo = NULL;
305 	ipfilter_t inject_filter_ref __single = NULL;
306 	mbuf_ref_t packetlist;
307 	uint32_t sw_csum, pktcnt = 0, scnt = 0, bytecnt = 0;
308 	uint32_t packets_processed = 0;
309 	unsigned int ifscope = IFSCOPE_NONE;
310 	struct flowadv *adv = NULL;
311 	struct timeval start_tv;
312 #if IPSEC
313 	struct socket *__single so = NULL;
314 	struct secpolicy *__single sp = NULL;
315 #endif /* IPSEC */
316 #if NECP
317 	necp_kernel_policy_result necp_result = 0;
318 	necp_kernel_policy_result_parameter necp_result_parameter;
319 	necp_kernel_policy_id necp_matched_policy_id = 0;
320 #endif /* NECP */
321 #if DUMMYNET
322 	struct m_tag *__single tag;
323 	struct ip_out_args saved_ipoa;
324 	struct sockaddr_in dst_buf;
325 #endif /* DUMMYNET */
326 	struct {
327 #if IPSEC
328 		struct ipsec_output_state ipsec_state;
329 #endif /* IPSEC */
330 #if NECP
331 		struct route necp_route;
332 #endif /* NECP */
333 #if DUMMYNET
334 		struct route saved_route;
335 #endif /* DUMMYNET */
336 		struct ipf_pktopts ipf_pktopts;
337 	} ipobz;
338 #define ipsec_state     ipobz.ipsec_state
339 #define necp_route      ipobz.necp_route
340 #define sro_fwd         ipobz.sro_fwd
341 #define saved_route     ipobz.saved_route
342 #define ipf_pktopts     ipobz.ipf_pktopts
343 	union {
344 		struct {
345 			boolean_t select_srcif : 1;     /* set once */
346 			boolean_t srcbound : 1;         /* set once */
347 			boolean_t nocell : 1;           /* set once */
348 			boolean_t isbroadcast : 1;
349 			boolean_t didfilter : 1;
350 			boolean_t noexpensive : 1;      /* set once */
351 			boolean_t noconstrained : 1;      /* set once */
352 			boolean_t awdl_unrestricted : 1;        /* set once */
353 			boolean_t management_allowed : 1;        /* set once */
354 			boolean_t ultra_constrained_allowed : 1; /* set once */
355 		};
356 		uint32_t raw;
357 	} ipobf = { .raw = 0 };
358 
359 	int interface_mtu = 0;
360 	struct pf_rule *__single dn_pf_rule = NULL;
361 	drop_reason_t drop_reason = DROP_REASON_UNSPECIFIED;
362 /*
363  * Here we check for restrictions when sending frames.
364  * N.B.: IPv4 over internal co-processor interfaces is not allowed.
365  */
366 #define IP_CHECK_RESTRICTIONS(_ifp, _ipobf)                                 \
367 	(((_ipobf).nocell && IFNET_IS_CELLULAR(_ifp)) ||                    \
368 	 ((_ipobf).noexpensive && IFNET_IS_EXPENSIVE(_ifp)) ||              \
369 	 ((_ipobf).noconstrained && IFNET_IS_CONSTRAINED(_ifp)) ||          \
370 	  (IFNET_IS_INTCOPROC(_ifp)) ||                                     \
371 	 (!(_ipobf).management_allowed && IFNET_IS_MANAGEMENT(_ifp)) ||     \
372 	 (!(_ipobf).ultra_constrained_allowed && IFNET_IS_ULTRA_CONSTRAINED(_ifp)) || \
373 	 (!(_ipobf).awdl_unrestricted && IFNET_IS_AWDL_RESTRICTED(_ifp)))
374 
375 	if (ip_output_measure) {
376 		net_perf_start_time(&net_perf, &start_tv);
377 	}
378 	KERNEL_DEBUG(DBG_FNC_IP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
379 
380 	VERIFY(m0->m_flags & M_PKTHDR);
381 	packetlist = m0;
382 
383 	/* zero out {ipsec_state, args, sro_fwd, saved_route, ipf_pktops} */
384 	bzero(&ipobz, sizeof(ipobz));
385 	ippo = &ipf_pktopts;
386 
387 #if DUMMYNET
388 	if (SLIST_EMPTY(&m0->m_pkthdr.tags)) {
389 		goto ipfw_tags_done;
390 	}
391 
392 	/* Grab info from mtags prepended to the chain */
393 	if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID,
394 	    KERNEL_TAG_TYPE_DUMMYNET)) != NULL) {
395 		struct dn_pkt_tag       *dn_tag;
396 
397 		dn_tag = (struct dn_pkt_tag *)(tag->m_tag_data);
398 		dn_pf_rule = dn_tag->dn_pf_rule;
399 		opt = NULL;
400 		saved_route = dn_tag->dn_ro;
401 		ro = &saved_route;
402 
403 		imo = NULL;
404 		SOCKADDR_COPY(&dn_tag->dn_dst, &dst_buf, sizeof(dst_buf));
405 		dst = &dst_buf;
406 		ifp = dn_tag->dn_ifp;
407 		flags = dn_tag->dn_flags;
408 		if ((dn_tag->dn_flags & IP_OUTARGS)) {
409 			saved_ipoa = dn_tag->dn_ipoa;
410 			ipoa = &saved_ipoa;
411 		}
412 
413 		m_tag_delete(m0, tag);
414 	}
415 ipfw_tags_done:
416 #endif /* DUMMYNET */
417 
418 	m = m0;
419 	m->m_pkthdr.pkt_flags &= ~(PKTF_LOOP | PKTF_IFAINFO);
420 
421 #if IPSEC
422 	if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) {
423 		/* If packet is bound to an interface, check bound policies */
424 		if ((flags & IP_OUTARGS) && (ipoa != NULL) &&
425 		    (ipoa->ipoa_flags & IPOAF_BOUND_IF) &&
426 		    ipoa->ipoa_boundif != IFSCOPE_NONE) {
427 			if (ipsec4_getpolicybyinterface(m, IPSEC_DIR_OUTBOUND,
428 			    &flags, ipoa, &sp) != 0) {
429 				drop_reason = DROP_REASON_IP_OUTBOUND_IPSEC_POLICY;
430 				goto bad;
431 			}
432 		}
433 	}
434 #endif /* IPSEC */
435 
436 	VERIFY(ro != NULL);
437 
438 	if (flags & IP_OUTARGS) {
439 		/*
440 		 * In the forwarding case, only the ifscope value is used,
441 		 * as source interface selection doesn't take place.
442 		 */
443 		if ((ipobf.select_srcif = (!(flags & IP_FORWARDING) &&
444 		    (ipoa->ipoa_flags & IPOAF_SELECT_SRCIF)))) {
445 			ipf_pktopts.ippo_flags |= IPPOF_SELECT_SRCIF;
446 		}
447 
448 		if ((ipoa->ipoa_flags & IPOAF_BOUND_IF) &&
449 		    ipoa->ipoa_boundif != IFSCOPE_NONE) {
450 			ifscope = ipoa->ipoa_boundif;
451 			ipf_pktopts.ippo_flags |=
452 			    (IPPOF_BOUND_IF | (ifscope << IPPOF_SHIFT_IFSCOPE));
453 		}
454 
455 		/* double negation needed for bool bit field */
456 		ipobf.srcbound = !!(ipoa->ipoa_flags & IPOAF_BOUND_SRCADDR);
457 		if (ipobf.srcbound) {
458 			ipf_pktopts.ippo_flags |= IPPOF_BOUND_SRCADDR;
459 		}
460 	} else {
461 		ipobf.select_srcif = FALSE;
462 		ipobf.srcbound = FALSE;
463 		ifscope = IFSCOPE_NONE;
464 		if (flags & IP_OUTARGS) {
465 			ipoa->ipoa_boundif = IFSCOPE_NONE;
466 			ipoa->ipoa_flags &= ~(IPOAF_SELECT_SRCIF |
467 			    IPOAF_BOUND_IF | IPOAF_BOUND_SRCADDR);
468 		}
469 	}
470 
471 	if (flags & IP_OUTARGS) {
472 		if (ipoa->ipoa_flags & IPOAF_NO_CELLULAR) {
473 			ipobf.nocell = true;
474 			ipf_pktopts.ippo_flags |= IPPOF_NO_IFT_CELLULAR;
475 		}
476 		if (ipoa->ipoa_flags & IPOAF_NO_EXPENSIVE) {
477 			ipobf.noexpensive = true;
478 			ipf_pktopts.ippo_flags |= IPPOF_NO_IFF_EXPENSIVE;
479 		}
480 		if (ipoa->ipoa_flags & IPOAF_NO_CONSTRAINED) {
481 			ipobf.noconstrained = true;
482 			ipf_pktopts.ippo_flags |= IPPOF_NO_IFF_CONSTRAINED;
483 		}
484 		if (ipoa->ipoa_flags & IPOAF_AWDL_UNRESTRICTED) {
485 			ipobf.awdl_unrestricted = true;
486 		}
487 		if (ipoa->ipoa_flags & IPOAF_MANAGEMENT_ALLOWED) {
488 			ipobf.management_allowed = true;
489 		}
490 		if (ipoa->ipoa_flags & IPOAF_ULTRA_CONSTRAINED_ALLOWED) {
491 			ipobf.ultra_constrained_allowed = true;
492 		}
493 		adv = &ipoa->ipoa_flowadv;
494 		adv->code = FADV_SUCCESS;
495 		ipoa->ipoa_flags &= ~IPOAF_RET_MASK;
496 	}
497 
498 #if IPSEC
499 	if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) {
500 		so = ipsec_getsocket(m);
501 		if (so != NULL) {
502 			(void) ipsec_setsocket(m, NULL);
503 		}
504 	}
505 #endif /* IPSEC */
506 
507 #if DUMMYNET
508 	if (dn_pf_rule != NULL) {
509 		/* dummynet already saw us */
510 		ip = mtod(m, struct ip *);
511 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
512 		pkt_dst = ip->ip_dst;
513 		if (ro->ro_rt != NULL) {
514 			RT_LOCK_SPIN(ro->ro_rt);
515 			ia = ifatoia(ro->ro_rt->rt_ifa);
516 			if (ia) {
517 				/* Become a regular mutex */
518 				RT_CONVERT_LOCK(ro->ro_rt);
519 				ifa_addref(&ia->ia_ifa);
520 			}
521 			RT_UNLOCK(ro->ro_rt);
522 		}
523 
524 		goto sendit;
525 	}
526 #endif /* DUMMYNET */
527 
528 loopit:
529 	packets_processed++;
530 	ipobf.isbroadcast = FALSE;
531 	ipobf.didfilter = FALSE;
532 
533 	VERIFY(m->m_flags & M_PKTHDR);
534 	/*
535 	 * No need to proccess packet twice if we've already seen it.
536 	 */
537 	if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
538 		inject_filter_ref = ipf_get_inject_filter(m);
539 	} else {
540 		inject_filter_ref = NULL;
541 	}
542 
543 	if (opt) {
544 		m = ip_insertoptions(m, opt, &len);
545 		hlen = len;
546 		/* Update the chain */
547 		if (m != m0) {
548 			if (m0 == packetlist) {
549 				packetlist = m;
550 			}
551 			m0 = m;
552 		}
553 	}
554 	ip = mtod(m, struct ip *);
555 
556 	pkt_dst = ip->ip_dst;
557 
558 	/*
559 	 * We must not send if the packet is destined to network zero.
560 	 * RFC1122 3.2.1.3 (a) and (b).
561 	 */
562 	if (IN_ZERONET(ntohl(pkt_dst.s_addr))) {
563 		error = EHOSTUNREACH;
564 		drop_reason = DROP_REASON_IP_ZERO_NET;
565 		goto bad;
566 	}
567 
568 	/*
569 	 * Fill in IP header.
570 	 */
571 	if (!(flags & (IP_FORWARDING | IP_RAWOUTPUT))) {
572 		ip->ip_vhl = IP_MAKE_VHL(IPVERSION, hlen >> 2);
573 		ip->ip_off &= IP_DF;
574 		if (rfc6864 && IP_OFF_IS_ATOMIC(ip->ip_off)) {
575 			// Per RFC6864, value of ip_id is undefined for atomic ip packets
576 			ip->ip_id = 0;
577 		} else {
578 			ip->ip_id = ip_randomid((uint64_t)m);
579 		}
580 		OSAddAtomic(1, &ipstat.ips_localout);
581 	} else {
582 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
583 	}
584 
585 #if DEBUG
586 	/* For debugging, we let the stack forge congestion */
587 	if (forge_ce != 0 &&
588 	    ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_ECT1 ||
589 	    (ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_ECT0)) {
590 		ip->ip_tos = (ip->ip_tos & ~IPTOS_ECN_MASK) | IPTOS_ECN_CE;
591 		forge_ce--;
592 	}
593 #endif /* DEBUG */
594 
595 	if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_ECT1) {
596 		m->m_pkthdr.pkt_ext_flags |= PKTF_EXT_L4S;
597 	}
598 
599 	KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
600 	    ip->ip_p, ip->ip_off, ip->ip_len);
601 
602 	dst = SIN(&ro->ro_dst);
603 
604 	/*
605 	 * If there is a cached route,
606 	 * check that it is to the same destination
607 	 * and is still up.  If not, free it and try again.
608 	 * The address family should also be checked in case of sharing the
609 	 * cache with IPv6.
610 	 */
611 
612 	if (ro->ro_rt != NULL) {
613 		if (ROUTE_UNUSABLE(ro) && ip->ip_src.s_addr != INADDR_ANY &&
614 		    !(flags & (IP_ROUTETOIF | IP_FORWARDING))) {
615 			src_ia = ifa_foraddr(ip->ip_src.s_addr);
616 			if (src_ia == NULL) {
617 				OSAddAtomic(1, &ipstat.ips_src_addr_not_avail);
618 				error = EADDRNOTAVAIL;
619 				drop_reason = DROP_REASON_IP_SRC_ADDR_NO_AVAIL;
620 				goto bad;
621 			}
622 			ifa_remref(&src_ia->ia_ifa);
623 			src_ia = NULL;
624 		}
625 		/*
626 		 * Test rt_flags without holding rt_lock for performance
627 		 * reasons; if the route is down it will hopefully be
628 		 * caught by the layer below (since it uses this route
629 		 * as a hint) or during the next transmit.
630 		 */
631 		if (ROUTE_UNUSABLE(ro) || dst->sin_family != AF_INET ||
632 		    dst->sin_addr.s_addr != pkt_dst.s_addr) {
633 			ROUTE_RELEASE(ro);
634 		}
635 
636 		/*
637 		 * If we're doing source interface selection, we may not
638 		 * want to use this route; only synch up the generation
639 		 * count otherwise.
640 		 */
641 		if (!ipobf.select_srcif && ro->ro_rt != NULL &&
642 		    RT_GENID_OUTOFSYNC(ro->ro_rt)) {
643 			RT_GENID_SYNC(ro->ro_rt);
644 		}
645 	}
646 	if (ro->ro_rt == NULL) {
647 		SOCKADDR_ZERO(dst, sizeof(*dst));
648 		dst->sin_family = AF_INET;
649 		dst->sin_len = sizeof(*dst);
650 		dst->sin_addr = pkt_dst;
651 	}
652 	/*
653 	 * If routing to interface only,
654 	 * short circuit routing lookup.
655 	 */
656 	if (flags & IP_ROUTETOIF) {
657 		if (ia != NULL) {
658 			ifa_remref(&ia->ia_ifa);
659 		}
660 		if ((ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst)))) == NULL) {
661 			ia = ifatoia(ifa_ifwithnet(sintosa(dst)));
662 			if (ia == NULL) {
663 				OSAddAtomic(1, &ipstat.ips_noroute);
664 				error = ENETUNREACH;
665 				/* XXX IPv6 APN fallback notification?? */
666 				drop_reason = DROP_REASON_IP_DST_ADDR_NO_AVAIL;
667 				goto bad;
668 			}
669 		}
670 		ifp = ia->ia_ifp;
671 		ip->ip_ttl = 1;
672 		ipobf.isbroadcast = in_broadcast(dst->sin_addr, ifp);
673 		/*
674 		 * For consistency with other cases below.  Loopback
675 		 * multicast case is handled separately by ip_mloopback().
676 		 */
677 		if ((ifp->if_flags & IFF_LOOPBACK) &&
678 		    !IN_MULTICAST(ntohl(pkt_dst.s_addr))) {
679 			m->m_pkthdr.rcvif = ifp;
680 			ip_setsrcifaddr_info(m, ifp->if_index, NULL);
681 			ip_setdstifaddr_info(m, ifp->if_index, NULL);
682 		}
683 	} else if (IN_MULTICAST(ntohl(pkt_dst.s_addr)) &&
684 	    imo != NULL && (ifp = imo->imo_multicast_ifp) != NULL) {
685 		/*
686 		 * Bypass the normal routing lookup for multicast
687 		 * packets if the interface is specified.
688 		 */
689 		ipobf.isbroadcast = FALSE;
690 		if (ia != NULL) {
691 			ifa_remref(&ia->ia_ifa);
692 		}
693 
694 		/* Macro takes reference on ia */
695 		IFP_TO_IA(ifp, ia);
696 	} else {
697 		struct ifaddr *ia0 = NULL;
698 		boolean_t cloneok = FALSE;
699 		/*
700 		 * Perform source interface selection; the source IP address
701 		 * must belong to one of the addresses of the interface used
702 		 * by the route.  For performance reasons, do this only if
703 		 * there is no route, or if the routing table has changed,
704 		 * or if we haven't done source interface selection on this
705 		 * route (for this PCB instance) before.
706 		 */
707 		if (ipobf.select_srcif &&
708 		    ip->ip_src.s_addr != INADDR_ANY && (ROUTE_UNUSABLE(ro) ||
709 		    !(ro->ro_flags & ROF_SRCIF_SELECTED))) {
710 			/* Find the source interface */
711 			ia0 = in_selectsrcif(ip, ro, ifscope);
712 
713 			/*
714 			 * If the source address belongs to a restricted
715 			 * interface and the caller forbids our using
716 			 * interfaces of such type, pretend that there is no
717 			 * route.
718 			 */
719 			if (ia0 != NULL &&
720 			    IP_CHECK_RESTRICTIONS(ia0->ifa_ifp, ipobf)) {
721 				if (log_restricted) {
722 					printf("%s:%d pid %d (%s) is unable to transmit packets on %s\n",
723 					    __func__, __LINE__,
724 					    proc_getpid(current_proc()), proc_best_name(current_proc()),
725 					    ia0->ifa_ifp->if_xname);
726 				}
727 				ifa_remref(ia0);
728 				ia0 = NULL;
729 				error = EHOSTUNREACH;
730 				if (flags & IP_OUTARGS) {
731 					ipoa->ipoa_flags |= IPOAF_R_IFDENIED;
732 				}
733 				drop_reason = DROP_REASON_IP_TO_RESTRICTED_IF;
734 				goto bad;
735 			}
736 
737 			/*
738 			 * If the source address is spoofed (in the case of
739 			 * IP_RAWOUTPUT on an unbounded socket), or if this
740 			 * is destined for local/loopback, just let it go out
741 			 * using the interface of the route.  Otherwise,
742 			 * there's no interface having such an address,
743 			 * so bail out.
744 			 */
745 			if (ia0 == NULL && (!(flags & IP_RAWOUTPUT) ||
746 			    ipobf.srcbound) && ifscope != lo_ifp->if_index) {
747 				error = EADDRNOTAVAIL;
748 				OSAddAtomic(1, &ipstat.ips_src_addr_not_avail);
749 				drop_reason = DROP_REASON_IP_SRC_ADDR_NO_AVAIL;
750 				goto bad;
751 			}
752 
753 			/*
754 			 * If the caller didn't explicitly specify the scope,
755 			 * pick it up from the source interface.  If the cached
756 			 * route was wrong and was blown away as part of source
757 			 * interface selection, don't mask out RTF_PRCLONING
758 			 * since that route may have been allocated by the ULP,
759 			 * unless the IP header was created by the caller or
760 			 * the destination is IPv4 LLA.  The check for the
761 			 * latter is needed because IPv4 LLAs are never scoped
762 			 * in the current implementation, and we don't want to
763 			 * replace the resolved IPv4 LLA route with one whose
764 			 * gateway points to that of the default gateway on
765 			 * the primary interface of the system.
766 			 */
767 			if (ia0 != NULL) {
768 				if (ifscope == IFSCOPE_NONE) {
769 					ifscope = ia0->ifa_ifp->if_index;
770 				}
771 				cloneok = (!(flags & IP_RAWOUTPUT) &&
772 				    !(IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))));
773 			}
774 		}
775 
776 		/*
777 		 * If this is the case, we probably don't want to allocate
778 		 * a protocol-cloned route since we didn't get one from the
779 		 * ULP.  This lets TCP do its thing, while not burdening
780 		 * forwarding or ICMP with the overhead of cloning a route.
781 		 * Of course, we still want to do any cloning requested by
782 		 * the link layer, as this is probably required in all cases
783 		 * for correct operation (as it is for ARP).
784 		 */
785 		if (ro->ro_rt == NULL) {
786 			uint32_t ign = RTF_PRCLONING;
787 			/*
788 			 * We make an exception here: if the destination
789 			 * address is INADDR_BROADCAST, allocate a protocol-
790 			 * cloned host route so that we end up with a route
791 			 * marked with the RTF_BROADCAST flag.  Otherwise,
792 			 * we would end up referring to the default route,
793 			 * instead of creating a cloned host route entry.
794 			 * That would introduce inconsistencies between ULPs
795 			 * that allocate a route and those that don't.  The
796 			 * RTF_BROADCAST route is important since we'd want
797 			 * to send out undirected IP broadcast packets using
798 			 * link-level broadcast address. Another exception
799 			 * is for ULP-created routes that got blown away by
800 			 * source interface selection (see above).
801 			 *
802 			 * These exceptions will no longer be necessary when
803 			 * the RTF_PRCLONING scheme is no longer present.
804 			 */
805 			if (cloneok || dst->sin_addr.s_addr == INADDR_BROADCAST) {
806 				ign &= ~RTF_PRCLONING;
807 			}
808 
809 			/*
810 			 * Loosen the route lookup criteria if the ifscope
811 			 * corresponds to the loopback interface; this is
812 			 * needed to support Application Layer Gateways
813 			 * listening on loopback, in conjunction with packet
814 			 * filter redirection rules.  The final source IP
815 			 * address will be rewritten by the packet filter
816 			 * prior to the RFC1122 loopback check below.
817 			 */
818 			if (ifscope == lo_ifp->if_index) {
819 				rtalloc_ign(ro, ign);
820 			} else {
821 				rtalloc_scoped_ign(ro, ign, ifscope);
822 			}
823 
824 			/*
825 			 * If the route points to a cellular/expensive interface
826 			 * and the caller forbids our using interfaces of such type,
827 			 * pretend that there is no route.
828 			 */
829 			if (ro->ro_rt != NULL) {
830 				RT_LOCK_SPIN(ro->ro_rt);
831 				if (IP_CHECK_RESTRICTIONS(ro->ro_rt->rt_ifp,
832 				    ipobf)) {
833 					if (log_restricted) {
834 						printf("%s:%d pid %d (%s) is unable to transmit packets on %s\n",
835 						    __func__, __LINE__,
836 						    proc_getpid(current_proc()), proc_best_name(current_proc()),
837 						    ro->ro_rt->rt_ifp->if_xname);
838 					}
839 					RT_UNLOCK(ro->ro_rt);
840 					ROUTE_RELEASE(ro);
841 					if (flags & IP_OUTARGS) {
842 						ipoa->ipoa_flags |=
843 						    IPOAF_R_IFDENIED;
844 					}
845 				} else {
846 					RT_UNLOCK(ro->ro_rt);
847 				}
848 			}
849 		}
850 
851 		if (ro->ro_rt == NULL) {
852 			OSAddAtomic(1, &ipstat.ips_noroute);
853 			error = EHOSTUNREACH;
854 			if (ia0 != NULL) {
855 				ifa_remref(ia0);
856 				ia0 = NULL;
857 			}
858 			drop_reason = DROP_REASON_IP_NO_ROUTE;
859 			goto bad;
860 		}
861 
862 		if (ia != NULL) {
863 			ifa_remref(&ia->ia_ifa);
864 		}
865 		RT_LOCK_SPIN(ro->ro_rt);
866 		ia = ifatoia(ro->ro_rt->rt_ifa);
867 		if (ia != NULL) {
868 			/* Become a regular mutex */
869 			RT_CONVERT_LOCK(ro->ro_rt);
870 			ifa_addref(&ia->ia_ifa);
871 		}
872 		/*
873 		 * Note: ia_ifp may not be the same as rt_ifp; the latter
874 		 * is what we use for determining outbound i/f, mtu, etc.
875 		 */
876 		ifp = ro->ro_rt->rt_ifp;
877 		ro->ro_rt->rt_use++;
878 		if (ro->ro_rt->rt_flags & RTF_GATEWAY) {
879 			dst = SIN(ro->ro_rt->rt_gateway);
880 		}
881 		if (ro->ro_rt->rt_flags & RTF_HOST) {
882 			/* double negation needed for bool bit field */
883 			ipobf.isbroadcast =
884 			    !!(ro->ro_rt->rt_flags & RTF_BROADCAST);
885 		} else {
886 			/* Become a regular mutex */
887 			RT_CONVERT_LOCK(ro->ro_rt);
888 			ipobf.isbroadcast = in_broadcast(dst->sin_addr, ifp);
889 		}
890 		/*
891 		 * For consistency with IPv6, as well as to ensure that
892 		 * IP_RECVIF is set correctly for packets that are sent
893 		 * to one of the local addresses.  ia (rt_ifa) would have
894 		 * been fixed up by rt_setif for local routes.  This
895 		 * would make it appear as if the packet arrives on the
896 		 * interface which owns the local address.  Loopback
897 		 * multicast case is handled separately by ip_mloopback().
898 		 */
899 		if (ia != NULL && (ifp->if_flags & IFF_LOOPBACK) &&
900 		    !IN_MULTICAST(ntohl(pkt_dst.s_addr))) {
901 			uint16_t srcidx;
902 
903 			m->m_pkthdr.rcvif = ia->ia_ifa.ifa_ifp;
904 
905 			if (ia0 != NULL) {
906 				srcidx = ia0->ifa_ifp->if_index;
907 			} else if ((ro->ro_flags & ROF_SRCIF_SELECTED) &&
908 			    ro->ro_srcia != NULL) {
909 				srcidx = ro->ro_srcia->ifa_ifp->if_index;
910 			} else {
911 				srcidx = 0;
912 			}
913 
914 			ip_setsrcifaddr_info(m, srcidx, NULL);
915 			ip_setdstifaddr_info(m, 0, ia);
916 		}
917 		RT_UNLOCK(ro->ro_rt);
918 		if (ia0 != NULL) {
919 			ifa_remref(ia0);
920 			ia0 = NULL;
921 		}
922 	}
923 
924 	if (IN_MULTICAST(ntohl(pkt_dst.s_addr))) {
925 		struct ifnet *srcifp = NULL;
926 		struct in_multi *inm;
927 		u_int32_t vif = 0;
928 		u_int8_t ttl = IP_DEFAULT_MULTICAST_TTL;
929 		u_int8_t loop = IP_DEFAULT_MULTICAST_LOOP;
930 
931 		m->m_flags |= M_MCAST;
932 		/*
933 		 * IP destination address is multicast.  Make sure "dst"
934 		 * still points to the address in "ro".  (It may have been
935 		 * changed to point to a gateway address, above.)
936 		 */
937 		dst = SIN(&ro->ro_dst);
938 		/*
939 		 * See if the caller provided any multicast options
940 		 */
941 		if (imo != NULL) {
942 			IMO_LOCK(imo);
943 			vif = imo->imo_multicast_vif;
944 			ttl = imo->imo_multicast_ttl;
945 			loop = imo->imo_multicast_loop;
946 			if (!(flags & IP_RAWOUTPUT)) {
947 				ip->ip_ttl = ttl;
948 			}
949 			if (imo->imo_multicast_ifp != NULL) {
950 				ifp = imo->imo_multicast_ifp;
951 			}
952 			IMO_UNLOCK(imo);
953 		} else if (!(flags & IP_RAWOUTPUT)) {
954 			vif = -1;
955 			ip->ip_ttl = ttl;
956 		}
957 		/*
958 		 * Confirm that the outgoing interface supports multicast.
959 		 */
960 		if (imo == NULL || vif == -1) {
961 			if (!(ifp->if_flags & IFF_MULTICAST)) {
962 				OSAddAtomic(1, &ipstat.ips_noroute);
963 				error = ENETUNREACH;
964 				drop_reason = DROP_REASON_IP_IF_CANNOT_MULTICAST;
965 				goto bad;
966 			}
967 		}
968 		/*
969 		 * If source address not specified yet, use address
970 		 * of outgoing interface.
971 		 */
972 		if (ip->ip_src.s_addr == INADDR_ANY) {
973 			struct in_ifaddr *ia1;
974 			lck_rw_lock_shared(&in_ifaddr_rwlock);
975 			TAILQ_FOREACH(ia1, &in_ifaddrhead, ia_link) {
976 				IFA_LOCK_SPIN(&ia1->ia_ifa);
977 				if (ia1->ia_ifp == ifp) {
978 					ip->ip_src = IA_SIN(ia1)->sin_addr;
979 					srcifp = ifp;
980 					IFA_UNLOCK(&ia1->ia_ifa);
981 					break;
982 				}
983 				IFA_UNLOCK(&ia1->ia_ifa);
984 			}
985 			lck_rw_done(&in_ifaddr_rwlock);
986 			if (ip->ip_src.s_addr == INADDR_ANY) {
987 				error = ENETUNREACH;
988 				drop_reason = DROP_REASON_IP_SRC_ADDR_ANY;
989 				goto bad;
990 			}
991 		}
992 
993 		in_multihead_lock_shared();
994 		IN_LOOKUP_MULTI(&pkt_dst, ifp, inm);
995 		in_multihead_lock_done();
996 		if (inm != NULL && (imo == NULL || loop)) {
997 			/*
998 			 * If we belong to the destination multicast group
999 			 * on the outgoing interface, and the caller did not
1000 			 * forbid loopback, loop back a copy.
1001 			 */
1002 			if (!TAILQ_EMPTY(&ipv4_filters)
1003 #if NECP
1004 			    && !necp_packet_should_skip_filters(m)
1005 #endif // NECP
1006 			    ) {
1007 				struct ipfilter *filter;
1008 				int seen = (inject_filter_ref == NULL);
1009 
1010 				if (imo != NULL) {
1011 					ipf_pktopts.ippo_flags |=
1012 					    IPPOF_MCAST_OPTS;
1013 					ipf_pktopts.ippo_mcast_ifnet = ifp;
1014 					ipf_pktopts.ippo_mcast_ttl = ttl;
1015 					ipf_pktopts.ippo_mcast_loop = loop;
1016 				}
1017 
1018 				ipf_ref();
1019 
1020 				/*
1021 				 * 4135317 - always pass network byte
1022 				 * order to filter
1023 				 */
1024 #if BYTE_ORDER != BIG_ENDIAN
1025 				HTONS(ip->ip_len);
1026 				HTONS(ip->ip_off);
1027 #endif
1028 				TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
1029 					if (seen == 0) {
1030 						if ((struct ipfilter *)
1031 						    inject_filter_ref == filter) {
1032 							seen = 1;
1033 						}
1034 					} else if (filter->ipf_filter.
1035 					    ipf_output != NULL) {
1036 						errno_t result;
1037 						result = filter->ipf_filter.
1038 						    ipf_output(filter->
1039 						    ipf_filter.cookie,
1040 						    (mbuf_t *)&m, ippo);
1041 						if (result == EJUSTRETURN) {
1042 							ipf_unref();
1043 							INM_REMREF(inm);
1044 							goto done;
1045 						}
1046 						if (result != 0) {
1047 							ipf_unref();
1048 							INM_REMREF(inm);
1049 							drop_reason = DROP_REASON_IP_FILTER_DROP;
1050 							goto bad;
1051 						}
1052 					}
1053 				}
1054 
1055 				/* set back to host byte order */
1056 				ip = mtod(m, struct ip *);
1057 #if BYTE_ORDER != BIG_ENDIAN
1058 				NTOHS(ip->ip_len);
1059 				NTOHS(ip->ip_off);
1060 #endif
1061 				ipf_unref();
1062 				ipobf.didfilter = true;
1063 			}
1064 			ip_mloopback(srcifp, ifp, m, dst, hlen);
1065 		}
1066 		if (inm != NULL) {
1067 			INM_REMREF(inm);
1068 		}
1069 		/*
1070 		 * Multicasts with a time-to-live of zero may be looped-
1071 		 * back, above, but must not be transmitted on a network.
1072 		 * Also, multicasts addressed to the loopback interface
1073 		 * are not sent -- the above call to ip_mloopback() will
1074 		 * loop back a copy if this host actually belongs to the
1075 		 * destination group on the loopback interface.
1076 		 */
1077 		if (ip->ip_ttl == 0 || ifp->if_flags & IFF_LOOPBACK) {
1078 			m_freem(m);
1079 			goto done;
1080 		}
1081 
1082 		goto sendit;
1083 	}
1084 	/*
1085 	 * If source address not specified yet, use address
1086 	 * of outgoing interface.
1087 	 */
1088 	if (ip->ip_src.s_addr == INADDR_ANY) {
1089 		IFA_LOCK_SPIN(&ia->ia_ifa);
1090 		ip->ip_src = IA_SIN(ia)->sin_addr;
1091 		IFA_UNLOCK(&ia->ia_ifa);
1092 	}
1093 
1094 	/*
1095 	 * Look for broadcast address and
1096 	 * and verify user is allowed to send
1097 	 * such a packet.
1098 	 */
1099 	if (ipobf.isbroadcast) {
1100 		if (!(ifp->if_flags & IFF_BROADCAST)) {
1101 			error = EADDRNOTAVAIL;
1102 			drop_reason = DROP_REASON_IP_IF_CANNOT_BROADCAST;
1103 			goto bad;
1104 		}
1105 		if (!(flags & IP_ALLOWBROADCAST)) {
1106 			error = EACCES;
1107 			drop_reason = DROP_REASON_IP_BROADCAST_NOT_ALLOWED;
1108 			goto bad;
1109 		}
1110 		/* don't allow broadcast messages to be fragmented */
1111 		if ((u_short)ip->ip_len > ifp->if_mtu) {
1112 			drop_reason = DROP_REASON_IP_BROADCAST_TOO_BIG;
1113 			error = EMSGSIZE;
1114 			goto bad;
1115 		}
1116 		m->m_flags |= M_BCAST;
1117 	} else {
1118 		m->m_flags &= ~M_BCAST;
1119 	}
1120 
1121 sendit:
1122 #if PF
1123 	/* Invoke outbound packet filter */
1124 	if (PF_IS_ENABLED) {
1125 		int rc;
1126 
1127 		m0 = m; /* Save for later */
1128 #if DUMMYNET
1129 		rc = ip_output_pf_dn_hook(ifp, mppn, &m, dn_pf_rule, ro, dst, flags, ipoa);
1130 #else /* DUMMYNET */
1131 		rc = pf_af_hook(ifp, mppn, &m, AF_INET, FALSE, NULL);
1132 #endif /* DUMMYNET */
1133 		if (rc != 0 || m == NULL) {
1134 			/* Move to the next packet */
1135 			m = *mppn;
1136 
1137 			/* Skip ahead if first packet in list got dropped */
1138 			if (packetlist == m0) {
1139 				packetlist = m;
1140 			}
1141 
1142 			if (m != NULL) {
1143 				m0 = m;
1144 				/* Next packet in the chain */
1145 				goto loopit;
1146 			} else if (packetlist != NULL) {
1147 				/* No more packet; send down the chain */
1148 				goto sendchain;
1149 			}
1150 			/* Nothing left; we're done */
1151 			goto done;
1152 		}
1153 		m0 = m;
1154 		ip = mtod(m, struct ip *);
1155 		pkt_dst = ip->ip_dst;
1156 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1157 	}
1158 #endif /* PF */
1159 	/*
1160 	 * Force IP TTL to 255 following draft-ietf-zeroconf-ipv4-linklocal.txt
1161 	 */
1162 	if (IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)) ||
1163 	    IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) {
1164 		ip_linklocal_stat.iplls_out_total++;
1165 		if (ip->ip_ttl != MAXTTL) {
1166 			ip_linklocal_stat.iplls_out_badttl++;
1167 			ip->ip_ttl = MAXTTL;
1168 		}
1169 	}
1170 
1171 	if (!ipobf.didfilter &&
1172 	    !TAILQ_EMPTY(&ipv4_filters)
1173 #if NECP
1174 	    && !necp_packet_should_skip_filters(m)
1175 #endif // NECP
1176 	    ) {
1177 		struct ipfilter *filter;
1178 		int seen = (inject_filter_ref == NULL);
1179 		ipf_pktopts.ippo_flags &= ~IPPOF_MCAST_OPTS;
1180 
1181 		/*
1182 		 * Check that a TSO frame isn't passed to a filter.
1183 		 * This could happen if a filter is inserted while
1184 		 * TCP is sending the TSO packet.
1185 		 */
1186 		if (m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) {
1187 			error = EMSGSIZE;
1188 			drop_reason = DROP_REASON_IP_FILTER_TSO;
1189 			goto bad;
1190 		}
1191 
1192 		ipf_ref();
1193 
1194 		/* 4135317 - always pass network byte order to filter */
1195 #if BYTE_ORDER != BIG_ENDIAN
1196 		HTONS(ip->ip_len);
1197 		HTONS(ip->ip_off);
1198 #endif
1199 		TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
1200 			if (seen == 0) {
1201 				if ((struct ipfilter *)inject_filter_ref ==
1202 				    filter) {
1203 					seen = 1;
1204 				}
1205 			} else if (filter->ipf_filter.ipf_output) {
1206 				errno_t result;
1207 				result = filter->ipf_filter.
1208 				    ipf_output(filter->ipf_filter.cookie,
1209 				    (mbuf_t *)&m, ippo);
1210 				if (result == EJUSTRETURN) {
1211 					ipf_unref();
1212 					goto done;
1213 				}
1214 				if (result != 0) {
1215 					ipf_unref();
1216 					drop_reason = DROP_REASON_IP_FILTER_DROP;
1217 					goto bad;
1218 				}
1219 			}
1220 		}
1221 		/* set back to host byte order */
1222 		ip = mtod(m, struct ip *);
1223 #if BYTE_ORDER != BIG_ENDIAN
1224 		NTOHS(ip->ip_len);
1225 		NTOHS(ip->ip_off);
1226 #endif
1227 		ipf_unref();
1228 	}
1229 
1230 #if NECP
1231 	/* Process Network Extension Policy. Will Pass, Drop, or Rebind packet. */
1232 	necp_matched_policy_id = necp_ip_output_find_policy_match(m,
1233 	    flags, (flags & IP_OUTARGS) ? ipoa : NULL, ro ? ro->ro_rt : NULL, &necp_result, &necp_result_parameter);
1234 	if (necp_matched_policy_id) {
1235 		necp_mark_packet_from_ip(m, necp_matched_policy_id);
1236 		switch (necp_result) {
1237 		case NECP_KERNEL_POLICY_RESULT_PASS:
1238 			if (necp_result_parameter.pass_flags & NECP_KERNEL_POLICY_PASS_NO_SKIP_IPSEC) {
1239 				break;
1240 			}
1241 			/* Check if the interface is allowed */
1242 			if (!necp_packet_is_allowed_over_interface(m, ifp)) {
1243 				error = EHOSTUNREACH;
1244 				OSAddAtomic(1, &ipstat.ips_necp_policy_drop);
1245 				drop_reason = DROP_REASON_IP_NECP_POLICY_NO_ALLOW_IF;
1246 				goto bad;
1247 			}
1248 			goto skip_ipsec;
1249 		case NECP_KERNEL_POLICY_RESULT_DROP:
1250 			error = EHOSTUNREACH;
1251 			OSAddAtomic(1, &ipstat.ips_necp_policy_drop);
1252 			drop_reason = DROP_REASON_IP_NECP_POLICY_DROP;
1253 			goto bad;
1254 		case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT:
1255 			/* Flow divert packets should be blocked at the IP layer */
1256 			error = EHOSTUNREACH;
1257 			OSAddAtomic(1, &ipstat.ips_necp_policy_drop);
1258 			drop_reason = DROP_REASON_IP_NECP_POLICY_SOCKET_DIVERT;
1259 			goto bad;
1260 		case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL: {
1261 			/* Verify that the packet is being routed to the tunnel */
1262 			struct ifnet *policy_ifp = necp_get_ifnet_from_result_parameter(&necp_result_parameter);
1263 			if (policy_ifp == ifp) {
1264 				/* Check if the interface is allowed */
1265 				if (!necp_packet_is_allowed_over_interface(m, ifp)) {
1266 					error = EHOSTUNREACH;
1267 					OSAddAtomic(1, &ipstat.ips_necp_policy_drop);
1268 					drop_reason = DROP_REASON_IP_NECP_POLICY_TUN_NO_ALLOW_IF;
1269 					goto bad;
1270 				}
1271 				goto skip_ipsec;
1272 			} else {
1273 				if (necp_packet_can_rebind_to_ifnet(m, policy_ifp, &necp_route, AF_INET)) {
1274 					/* Check if the interface is allowed */
1275 					if (!necp_packet_is_allowed_over_interface(m, policy_ifp)) {
1276 						error = EHOSTUNREACH;
1277 						OSAddAtomic(1, &ipstat.ips_necp_policy_drop);
1278 						drop_reason = DROP_REASON_IP_NECP_POLICY_TUN_REBIND_NO_ALLOW_IF;
1279 						goto bad;
1280 					}
1281 
1282 					/*
1283 					 * Update the QOS marking policy if
1284 					 * 1. up layer asks it to do so
1285 					 * 2. net_qos_policy_restricted is not set
1286 					 * 3. qos_marking_gencount doesn't match necp_kernel_socket_policies_gencount (checked in necp_lookup_current_qos_marking)
1287 					 */
1288 					if (ipoa != NULL &&
1289 					    (ipoa->ipoa_flags & IPOAF_REDO_QOSMARKING_POLICY) &&
1290 					    net_qos_policy_restricted != 0) {
1291 						bool qos_marking = (ipoa->ipoa_flags & IPOAF_QOSMARKING_ALLOWED) ? TRUE : FALSE;
1292 						qos_marking = necp_lookup_current_qos_marking(&ipoa->qos_marking_gencount, NULL, policy_ifp, necp_result_parameter.route_rule_id, qos_marking);
1293 						if (qos_marking) {
1294 							ipoa->ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1295 						} else {
1296 							ipoa->ipoa_flags &= ~IPOAF_QOSMARKING_ALLOWED;
1297 						}
1298 					}
1299 
1300 					/* Set ifp to the tunnel interface, since it is compatible with the packet */
1301 					ifp = policy_ifp;
1302 					ro = &necp_route;
1303 					goto skip_ipsec;
1304 				} else {
1305 					error = ENETUNREACH;
1306 					OSAddAtomic(1, &ipstat.ips_necp_policy_drop);
1307 					drop_reason = DROP_REASON_IP_NECP_POLICY_TUN_NO_REBIND_IF;
1308 					goto bad;
1309 				}
1310 			}
1311 		}
1312 		default:
1313 			break;
1314 		}
1315 	}
1316 	/* Catch-all to check if the interface is allowed */
1317 	if (!necp_packet_is_allowed_over_interface(m, ifp)) {
1318 		error = EHOSTUNREACH;
1319 		OSAddAtomic(1, &ipstat.ips_necp_policy_drop);
1320 		drop_reason = DROP_REASON_IP_NECP_NO_ALLOW_IF;
1321 		goto bad;
1322 	}
1323 #endif /* NECP */
1324 
1325 	if (IP_CHECK_RESTRICTIONS(ifp, ipobf)) {
1326 		if (log_restricted) {
1327 			printf("%s:%d pid %d (%s) is unable to transmit packets on %s\n",
1328 			    __func__, __LINE__,
1329 			    proc_getpid(current_proc()), proc_best_name(current_proc()),
1330 			    ifp->if_xname);
1331 		}
1332 		error = EHOSTUNREACH;
1333 		drop_reason = DROP_REASON_IP_TO_RESTRICTED_IF;
1334 		goto bad;
1335 	}
1336 
1337 #if IPSEC
1338 	if (ipsec_bypass != 0 || (flags & IP_NOIPSEC)) {
1339 		goto skip_ipsec;
1340 	}
1341 
1342 	KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1343 
1344 	if (sp == NULL) {
1345 		/* get SP for this packet */
1346 		if (so != NULL) {
1347 			sp = ipsec4_getpolicybysock(m, IPSEC_DIR_OUTBOUND,
1348 			    so, &error);
1349 		} else {
1350 			sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND,
1351 			    flags, &error);
1352 		}
1353 		if (sp == NULL) {
1354 			IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
1355 			KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1356 			    0, 0, 0, 0, 0);
1357 			drop_reason = DROP_REASON_IP_OUTBOUND_IPSEC_POLICY;
1358 			goto bad;
1359 		}
1360 	}
1361 
1362 	error = 0;
1363 
1364 	/* check policy */
1365 	switch (sp->policy) {
1366 	case IPSEC_POLICY_DISCARD:
1367 	case IPSEC_POLICY_GENERATE:
1368 		/*
1369 		 * This packet is just discarded.
1370 		 */
1371 		IPSEC_STAT_INCREMENT(ipsecstat.out_polvio);
1372 		KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1373 		    1, 0, 0, 0, 0);
1374 		drop_reason = DROP_REASON_IP_OUTBOUND_IPSEC_POLICY;
1375 		goto bad;
1376 
1377 	case IPSEC_POLICY_BYPASS:
1378 	case IPSEC_POLICY_NONE:
1379 		/* no need to do IPsec. */
1380 		KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1381 		    2, 0, 0, 0, 0);
1382 		drop_reason = DROP_REASON_IP_OUTBOUND_IPSEC_POLICY;
1383 		goto skip_ipsec;
1384 
1385 	case IPSEC_POLICY_IPSEC:
1386 		if (sp->req == NULL) {
1387 			/* acquire a policy */
1388 			error = key_spdacquire(sp);
1389 			KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1390 			    3, 0, 0, 0, 0);
1391 			drop_reason = DROP_REASON_IP_OUTBOUND_IPSEC_POLICY;
1392 			goto bad;
1393 		}
1394 		if (sp->ipsec_if) {
1395 			/* Verify the redirect to ipsec interface */
1396 			if (sp->ipsec_if == ifp) {
1397 				goto skip_ipsec;
1398 			}
1399 			drop_reason = DROP_REASON_IP_OUTBOUND_IPSEC_POLICY;
1400 			goto bad;
1401 		}
1402 		break;
1403 
1404 	case IPSEC_POLICY_ENTRUST:
1405 	default:
1406 		printf("ip_output: Invalid policy found. %d\n", sp->policy);
1407 	}
1408 	{
1409 		ipsec_state.m = m;
1410 		if (flags & IP_ROUTETOIF) {
1411 			bzero(&ipsec_state.ro, sizeof(ipsec_state.ro));
1412 		} else {
1413 			route_copyout((struct route *)&ipsec_state.ro, ro, sizeof(struct route));
1414 		}
1415 		ipsec_state.dst = SA(dst);
1416 
1417 		ip->ip_sum = 0;
1418 
1419 		/*
1420 		 * XXX
1421 		 * delayed checksums are not currently compatible with IPsec
1422 		 */
1423 		if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
1424 			in_delayed_cksum(m);
1425 		}
1426 
1427 #if BYTE_ORDER != BIG_ENDIAN
1428 		HTONS(ip->ip_len);
1429 		HTONS(ip->ip_off);
1430 #endif
1431 
1432 		DTRACE_IP6(send, struct mbuf *, m, struct inpcb *, NULL,
1433 		    struct ip *, ip, struct ifnet *, ifp,
1434 		    struct ip *, ip, struct ip6_hdr *, NULL);
1435 
1436 		error = ipsec4_output(&ipsec_state, sp, flags);
1437 		if (ipsec_state.tunneled == 6) {
1438 			m0 = m = NULL;
1439 			error = 0;
1440 			goto bad;
1441 		}
1442 
1443 		m0 = m = ipsec_state.m;
1444 
1445 #if DUMMYNET
1446 		/*
1447 		 * If we're about to use the route in ipsec_state
1448 		 * and this came from dummynet, cleaup now.
1449 		 */
1450 		if (ro == &saved_route &&
1451 		    (!(flags & IP_ROUTETOIF) || ipsec_state.tunneled)) {
1452 			ROUTE_RELEASE(ro);
1453 		}
1454 #endif /* DUMMYNET */
1455 
1456 		if (flags & IP_ROUTETOIF) {
1457 			/*
1458 			 * if we have tunnel mode SA, we may need to ignore
1459 			 * IP_ROUTETOIF.
1460 			 */
1461 			if (ipsec_state.tunneled) {
1462 				flags &= ~IP_ROUTETOIF;
1463 				ro = (struct route *)&ipsec_state.ro;
1464 			}
1465 		} else {
1466 			ro = (struct route *)&ipsec_state.ro;
1467 		}
1468 		dst = SIN(ipsec_state.dst);
1469 		if (error) {
1470 			/* mbuf is already reclaimed in ipsec4_output. */
1471 			m0 = NULL;
1472 			switch (error) {
1473 			case EHOSTUNREACH:
1474 			case ENETUNREACH:
1475 			case EMSGSIZE:
1476 			case ENOBUFS:
1477 			case ENOMEM:
1478 				break;
1479 			default:
1480 				printf("ip4_output (ipsec): error code %d\n", error);
1481 				OS_FALLTHROUGH;
1482 			case ENOENT:
1483 				/* don't show these error codes to the user */
1484 				error = 0;
1485 				break;
1486 			}
1487 			KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1488 			    4, 0, 0, 0, 0);
1489 			goto bad;
1490 		}
1491 	}
1492 
1493 	/* be sure to update variables that are affected by ipsec4_output() */
1494 	ip = mtod(m, struct ip *);
1495 
1496 #ifdef _IP_VHL
1497 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1498 #else /* !_IP_VHL */
1499 	hlen = ip->ip_hl << 2;
1500 #endif /* !_IP_VHL */
1501 	/* Check that there wasn't a route change and src is still valid */
1502 	if (ROUTE_UNUSABLE(ro)) {
1503 		ROUTE_RELEASE(ro);
1504 		VERIFY(src_ia == NULL);
1505 		if (ip->ip_src.s_addr != INADDR_ANY &&
1506 		    !(flags & (IP_ROUTETOIF | IP_FORWARDING)) &&
1507 		    (src_ia = ifa_foraddr(ip->ip_src.s_addr)) == NULL) {
1508 			error = EADDRNOTAVAIL;
1509 			KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1510 			    5, 0, 0, 0, 0);
1511 			OSAddAtomic(1, &ipstat.ips_src_addr_not_avail);
1512 			drop_reason = DROP_REASON_IP_SRC_ADDR_NO_AVAIL;
1513 			goto bad;
1514 		}
1515 		if (src_ia != NULL) {
1516 			ifa_remref(&src_ia->ia_ifa);
1517 			src_ia = NULL;
1518 		}
1519 	}
1520 
1521 	if (ro->ro_rt == NULL) {
1522 		if (!(flags & IP_ROUTETOIF)) {
1523 			printf("%s: can't update route after "
1524 			    "IPsec processing\n", __func__);
1525 			error = EHOSTUNREACH;   /* XXX */
1526 			KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1527 			    6, 0, 0, 0, 0);
1528 			drop_reason = DROP_REASON_IP_NO_ROUTE;
1529 			goto bad;
1530 		}
1531 	} else {
1532 		if (ia != NULL) {
1533 			ifa_remref(&ia->ia_ifa);
1534 		}
1535 		RT_LOCK_SPIN(ro->ro_rt);
1536 		ia = ifatoia(ro->ro_rt->rt_ifa);
1537 		if (ia != NULL) {
1538 			/* Become a regular mutex */
1539 			RT_CONVERT_LOCK(ro->ro_rt);
1540 			ifa_addref(&ia->ia_ifa);
1541 		}
1542 		ifp = ro->ro_rt->rt_ifp;
1543 		RT_UNLOCK(ro->ro_rt);
1544 	}
1545 
1546 	/* make it flipped, again. */
1547 #if BYTE_ORDER != BIG_ENDIAN
1548 	NTOHS(ip->ip_len);
1549 	NTOHS(ip->ip_off);
1550 #endif
1551 	KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1552 	    7, 0xff, 0xff, 0xff, 0xff);
1553 
1554 	/* Pass to filters again */
1555 	if (!TAILQ_EMPTY(&ipv4_filters)
1556 #if NECP
1557 	    && !necp_packet_should_skip_filters(m)
1558 #endif // NECP
1559 	    ) {
1560 		struct ipfilter *filter;
1561 
1562 		ipf_pktopts.ippo_flags &= ~IPPOF_MCAST_OPTS;
1563 
1564 		/*
1565 		 * Check that a TSO frame isn't passed to a filter.
1566 		 * This could happen if a filter is inserted while
1567 		 * TCP is sending the TSO packet.
1568 		 */
1569 		if (m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) {
1570 			error = EMSGSIZE;
1571 			drop_reason = DROP_REASON_IP_FILTER_TSO;
1572 			goto bad;
1573 		}
1574 
1575 		ipf_ref();
1576 
1577 		/* 4135317 - always pass network byte order to filter */
1578 #if BYTE_ORDER != BIG_ENDIAN
1579 		HTONS(ip->ip_len);
1580 		HTONS(ip->ip_off);
1581 #endif
1582 		TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
1583 			if (filter->ipf_filter.ipf_output) {
1584 				errno_t result;
1585 				result = filter->ipf_filter.
1586 				    ipf_output(filter->ipf_filter.cookie,
1587 				    (mbuf_t *)&m, ippo);
1588 				if (result == EJUSTRETURN) {
1589 					ipf_unref();
1590 					goto done;
1591 				}
1592 				if (result != 0) {
1593 					ipf_unref();
1594 					drop_reason = DROP_REASON_IP_FILTER_DROP;
1595 					goto bad;
1596 				}
1597 			}
1598 		}
1599 		/* set back to host byte order */
1600 		ip = mtod(m, struct ip *);
1601 #if BYTE_ORDER != BIG_ENDIAN
1602 		NTOHS(ip->ip_len);
1603 		NTOHS(ip->ip_off);
1604 #endif
1605 		ipf_unref();
1606 	}
1607 skip_ipsec:
1608 #endif /* IPSEC */
1609 
1610 
1611 	/* 127/8 must not appear on wire - RFC1122 */
1612 	if (!(ifp->if_flags & IFF_LOOPBACK) &&
1613 	    ((ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
1614 	    (ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)) {
1615 		OSAddAtomic(1, &ipstat.ips_badaddr);
1616 		error = EADDRNOTAVAIL;
1617 		drop_reason = DROP_REASON_IP_INVALID_ADDR;
1618 		goto bad;
1619 	}
1620 
1621 	if (ipoa != NULL) {
1622 		u_int8_t dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
1623 
1624 		error = set_packet_qos(m, ifp,
1625 		    ipoa->ipoa_flags & IPOAF_QOSMARKING_ALLOWED ? TRUE : FALSE,
1626 		    ipoa->ipoa_sotc, ipoa->ipoa_netsvctype, &dscp);
1627 		if (error == 0) {
1628 			ip->ip_tos &= IPTOS_ECN_MASK;
1629 			ip->ip_tos |= (u_char)(dscp << IPTOS_DSCP_SHIFT);
1630 		} else {
1631 			printf("%s if_dscp_for_mbuf() error %d\n", __func__, error);
1632 			error = 0;
1633 		}
1634 	}
1635 
1636 	ip_output_checksum(ifp, m, (IP_VHL_HL(ip->ip_vhl) << 2),
1637 	    ip->ip_len, &sw_csum);
1638 
1639 	interface_mtu = ifp->if_mtu;
1640 
1641 	if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
1642 		interface_mtu = IN6_LINKMTU(ifp);
1643 		/* Further adjust the size for CLAT46 expansion */
1644 		interface_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
1645 	}
1646 
1647 	/*
1648 	 * If small enough for interface, or the interface will take
1649 	 * care of the fragmentation for us, can just send directly.
1650 	 */
1651 	if ((u_short)ip->ip_len <= interface_mtu || TSO_IPV4_OK(ifp, m) ||
1652 	    (!(ip->ip_off & IP_DF) && (ifp->if_hwassist & CSUM_FRAGMENT))) {
1653 #if BYTE_ORDER != BIG_ENDIAN
1654 		HTONS(ip->ip_len);
1655 		HTONS(ip->ip_off);
1656 #endif
1657 
1658 		ip->ip_sum = 0;
1659 		if ((sw_csum & CSUM_DELAY_IP) || __improbable(force_ipsum != 0)) {
1660 			ip->ip_sum = ip_cksum_hdr_out(m, hlen);
1661 			sw_csum &= ~CSUM_DELAY_IP;
1662 			m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
1663 		}
1664 
1665 #if IPSEC
1666 		/* clean ipsec history once it goes out of the node */
1667 		if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) {
1668 			ipsec_delaux(m);
1669 		}
1670 #endif /* IPSEC */
1671 		if ((m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) &&
1672 		    (m->m_pkthdr.tso_segsz > 0)) {
1673 			scnt += m->m_pkthdr.len / m->m_pkthdr.tso_segsz;
1674 		} else {
1675 			scnt++;
1676 		}
1677 
1678 		if (packetchain == 0) {
1679 			if (ro->ro_rt != NULL && nstat_collect) {
1680 				nstat_route_tx(ro->ro_rt, scnt,
1681 				    m->m_pkthdr.len, 0);
1682 			}
1683 
1684 			error = dlil_output(ifp, PF_INET, m, ro->ro_rt,
1685 			    SA(dst), 0, adv);
1686 			if (dlil_verbose && error) {
1687 				printf("dlil_output error on interface %s: %d\n",
1688 				    ifp->if_xname, error);
1689 			}
1690 			scnt = 0;
1691 			goto done;
1692 		} else {
1693 			/*
1694 			 * packet chaining allows us to reuse the
1695 			 * route for all packets
1696 			 */
1697 			bytecnt += m->m_pkthdr.len;
1698 			mppn = &m->m_nextpkt;
1699 			m = m->m_nextpkt;
1700 			if (m == NULL) {
1701 #if PF
1702 sendchain:
1703 #endif /* PF */
1704 				if (pktcnt > ip_maxchainsent) {
1705 					ip_maxchainsent = pktcnt;
1706 				}
1707 				if (ro->ro_rt != NULL && nstat_collect) {
1708 					nstat_route_tx(ro->ro_rt, scnt,
1709 					    bytecnt, 0);
1710 				}
1711 
1712 				error = dlil_output(ifp, PF_INET, packetlist,
1713 				    ro->ro_rt, SA(dst), 0, adv);
1714 				if (dlil_verbose && error) {
1715 					printf("dlil_output error on interface %s: %d\n",
1716 					    ifp->if_xname, error);
1717 				}
1718 				pktcnt = 0;
1719 				scnt = 0;
1720 				bytecnt = 0;
1721 				goto done;
1722 			}
1723 			m0 = m;
1724 			pktcnt++;
1725 			goto loopit;
1726 		}
1727 	}
1728 
1729 	VERIFY(interface_mtu != 0);
1730 	/*
1731 	 * Too large for interface; fragment if possible.
1732 	 * Must be able to put at least 8 bytes per fragment.
1733 	 * Balk when DF bit is set or the interface didn't support TSO.
1734 	 */
1735 	if ((ip->ip_off & IP_DF) || pktcnt > 0 ||
1736 	    (m->m_pkthdr.csum_flags & CSUM_TSO_IPV4)) {
1737 		error = EMSGSIZE;
1738 		/*
1739 		 * This case can happen if the user changed the MTU
1740 		 * of an interface after enabling IP on it.  Because
1741 		 * most netifs don't keep track of routes pointing to
1742 		 * them, there is no way for one to update all its
1743 		 * routes when the MTU is changed.
1744 		 */
1745 		if (ro->ro_rt) {
1746 			RT_LOCK_SPIN(ro->ro_rt);
1747 			if ((ro->ro_rt->rt_flags & (RTF_UP | RTF_HOST)) &&
1748 			    !(ro->ro_rt->rt_rmx.rmx_locks & RTV_MTU) &&
1749 			    (ro->ro_rt->rt_rmx.rmx_mtu > interface_mtu)) {
1750 				ro->ro_rt->rt_rmx.rmx_mtu = interface_mtu;
1751 			}
1752 			RT_UNLOCK(ro->ro_rt);
1753 		}
1754 		if (pktcnt > 0) {
1755 			m0 = packetlist;
1756 		}
1757 		OSAddAtomic(1, &ipstat.ips_cantfrag);
1758 		drop_reason = DROP_REASON_IP_CANNOT_FRAGMENT;
1759 		goto bad;
1760 	}
1761 
1762 	/*
1763 	 * XXX Only TCP seems to be passing a list of packets here.
1764 	 * The following issue is limited to UDP datagrams with 0 checksum.
1765 	 * For now limit it to the case when single packet is passed down.
1766 	 */
1767 	if (packetchain == 0 && IS_INTF_CLAT46(ifp)) {
1768 		/*
1769 		 * If it is a UDP packet that has checksum set to 0
1770 		 * and is also not being offloaded, compute a full checksum
1771 		 * and update the UDP checksum.
1772 		 */
1773 		if (ip->ip_p == IPPROTO_UDP &&
1774 		    !(m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_PARTIAL))) {
1775 			struct udphdr *uh = NULL;
1776 
1777 			if (m->m_len < hlen + sizeof(struct udphdr)) {
1778 				m = m_pullup(m, hlen + sizeof(struct udphdr));
1779 				if (m == NULL) {
1780 					error = ENOBUFS;
1781 					m0 = m;
1782 					goto bad;
1783 				}
1784 				m0 = m;
1785 				ip = mtod(m, struct ip *);
1786 			}
1787 			/*
1788 			 * Get UDP header and if checksum is 0, then compute the full
1789 			 * checksum.
1790 			 */
1791 			uh = (struct udphdr *)(void *)((caddr_t)ip + hlen);
1792 			if (uh->uh_sum == 0) {
1793 				uh->uh_sum = inet_cksum(m, IPPROTO_UDP, hlen,
1794 				    ip->ip_len - hlen);
1795 				if (uh->uh_sum == 0) {
1796 					uh->uh_sum = 0xffff;
1797 				}
1798 			}
1799 		}
1800 	}
1801 
1802 	error = ip_fragment(m, ifp, interface_mtu, sw_csum);
1803 	if (error != 0) {
1804 		m0 = m = NULL;
1805 		/* ip_fragment() takes care of calling m_drop() */
1806 		goto done;
1807 	}
1808 
1809 	KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
1810 	    ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
1811 
1812 	for (m = m0; m; m = m0) {
1813 		m0 = m->m_nextpkt;
1814 		m->m_nextpkt = 0;
1815 #if IPSEC
1816 		/* clean ipsec history once it goes out of the node */
1817 		if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) {
1818 			ipsec_delaux(m);
1819 		}
1820 #endif /* IPSEC */
1821 		if (error == 0) {
1822 			if ((packetchain != 0) && (pktcnt > 0)) {
1823 				panic("%s: mix of packet in packetlist is "
1824 				    "wrong=%p", __func__, packetlist);
1825 				/* NOTREACHED */
1826 			}
1827 			if (ro->ro_rt != NULL && nstat_collect) {
1828 				nstat_route_tx(ro->ro_rt, 1,
1829 				    m->m_pkthdr.len, 0);
1830 			}
1831 			error = dlil_output(ifp, PF_INET, m, ro->ro_rt,
1832 			    SA(dst), 0, adv);
1833 			if (dlil_verbose && error) {
1834 				printf("dlil_output error on interface %s: %d\n",
1835 				    ifp->if_xname, error);
1836 			}
1837 		} else {
1838 			m_freem(m);
1839 		}
1840 	}
1841 
1842 	if (error == 0) {
1843 		OSAddAtomic(1, &ipstat.ips_fragmented);
1844 	}
1845 
1846 done:
1847 	if (ia != NULL) {
1848 		ifa_remref(&ia->ia_ifa);
1849 		ia = NULL;
1850 	}
1851 #if IPSEC
1852 	ROUTE_RELEASE(&ipsec_state.ro);
1853 	if (sp != NULL) {
1854 		KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1855 		    printf("DP ip_output call free SP:%x\n", sp));
1856 		key_freesp(sp, KEY_SADB_UNLOCKED);
1857 	}
1858 #endif /* IPSEC */
1859 #if NECP
1860 	ROUTE_RELEASE(&necp_route);
1861 #endif /* NECP */
1862 #if DUMMYNET
1863 	ROUTE_RELEASE(&saved_route);
1864 #endif /* DUMMYNET */
1865 
1866 	KERNEL_DEBUG(DBG_FNC_IP_OUTPUT | DBG_FUNC_END, error, 0, 0, 0, 0);
1867 	if (ip_output_measure) {
1868 		net_perf_measure_time(&net_perf, &start_tv, packets_processed);
1869 		net_perf_histogram(&net_perf, packets_processed);
1870 	}
1871 	return error;
1872 bad:
1873 	if (pktcnt > 0) {
1874 		m0 = packetlist;
1875 	}
1876 	m_drop_list(m0, ifp, DROPTAP_FLAG_DIR_OUT | DROPTAP_FLAG_L2_MISSING, drop_reason, NULL, 0);
1877 	goto done;
1878 
1879 #undef ipsec_state
1880 #undef args
1881 #undef sro_fwd
1882 #undef saved_route
1883 #undef ipf_pktopts
1884 #undef IP_CHECK_RESTRICTIONS
1885 }
1886 
1887 int
ip_fragment(struct mbuf * m,struct ifnet * ifp,uint32_t mtu,int sw_csum)1888 ip_fragment(struct mbuf *m, struct ifnet *ifp, uint32_t mtu, int sw_csum)
1889 {
1890 	struct ip *ip, *mhip;
1891 	int len, hlen, mhlen, firstlen, off;
1892 	struct mbuf **mnext = &m->m_nextpkt, *m0;
1893 	int nfrags = 1;
1894 
1895 	ip = mtod(m, struct ip *);
1896 #ifdef _IP_VHL
1897 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1898 #else /* !_IP_VHL */
1899 	hlen = ip->ip_hl << 2;
1900 #endif /* !_IP_VHL */
1901 
1902 	/*
1903 	 * We need to adjust the fragment sizes to account
1904 	 * for IPv6 fragment header if it needs to be translated
1905 	 * from IPv4 to IPv6.
1906 	 */
1907 	if (IS_INTF_CLAT46(ifp)) {
1908 		mtu -= sizeof(struct ip6_frag);
1909 	}
1910 
1911 	firstlen = len = (mtu - hlen) & ~7;
1912 	if (len < 8) {
1913 		OSAddAtomic(1, &ipstat.ips_odropped);
1914 		m_drop(m, DROPTAP_FLAG_DIR_OUT | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_FRAG_TOO_SMALL,
1915 		    NULL, 0);
1916 		return EMSGSIZE;
1917 	}
1918 
1919 	/*
1920 	 * if the interface will not calculate checksums on
1921 	 * fragmented packets, then do it here.
1922 	 */
1923 	if ((m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) &&
1924 	    !(ifp->if_hwassist & CSUM_IP_FRAGS)) {
1925 		in_delayed_cksum(m);
1926 	}
1927 
1928 	/*
1929 	 * Loop through length of segment after first fragment,
1930 	 * make new header and copy data of each part and link onto chain.
1931 	 */
1932 	m0 = m;
1933 	mhlen = sizeof(struct ip);
1934 	for (off = hlen + len; off < (u_short)ip->ip_len; off += len) {
1935 		MGETHDR(m, M_DONTWAIT, MT_HEADER);      /* MAC-OK */
1936 		if (m == NULL) {
1937 			m_drop(m, DROPTAP_FLAG_DIR_OUT | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_FRAG_NO_MEM,
1938 			    NULL, 0);
1939 			OSAddAtomic(1, &ipstat.ips_odropped);
1940 			return ENOBUFS;
1941 		}
1942 		m->m_flags |= (m0->m_flags & M_MCAST) | M_FRAG;
1943 		m->m_data += max_linkhdr;
1944 		mhip = mtod(m, struct ip *);
1945 		*mhip = *ip;
1946 		if (hlen > sizeof(struct ip)) {
1947 			mhlen = ip_optcopy(ip, mhip) + sizeof(struct ip);
1948 			mhip->ip_vhl = IP_MAKE_VHL(IPVERSION, mhlen >> 2);
1949 		}
1950 		m->m_len = mhlen;
1951 		mhip->ip_off = (u_short)(((off - hlen) >> 3) + (ip->ip_off & ~IP_MF));
1952 		if (ip->ip_off & IP_MF) {
1953 			mhip->ip_off |= IP_MF;
1954 		}
1955 		if (off + len >= (u_short)ip->ip_len) {
1956 			len = (u_short)ip->ip_len - off;
1957 		} else {
1958 			mhip->ip_off |= IP_MF;
1959 		}
1960 		mhip->ip_len = htons((u_short)(len + mhlen));
1961 		m->m_next = m_copy(m0, off, len);
1962 		if (m->m_next == NULL) {
1963 			m_drop(m, DROPTAP_FLAG_DIR_OUT | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_FRAG_NO_MEM,
1964 			    NULL, 0);
1965 			OSAddAtomic(1, &ipstat.ips_odropped);
1966 			return ENOBUFS;
1967 		}
1968 		m->m_pkthdr.len = mhlen + len;
1969 		m->m_pkthdr.rcvif = NULL;
1970 		m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
1971 
1972 		M_COPY_CLASSIFIER(m, m0);
1973 		M_COPY_PFTAG(m, m0);
1974 		M_COPY_NECPTAG(m, m0);
1975 
1976 #if BYTE_ORDER != BIG_ENDIAN
1977 		HTONS(mhip->ip_off);
1978 #endif
1979 
1980 		mhip->ip_sum = 0;
1981 		if (sw_csum & CSUM_DELAY_IP) {
1982 			mhip->ip_sum = ip_cksum_hdr_out(m, mhlen);
1983 			m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
1984 		}
1985 		*mnext = m;
1986 		mnext = &m->m_nextpkt;
1987 		nfrags++;
1988 	}
1989 	OSAddAtomic(nfrags, &ipstat.ips_ofragments);
1990 
1991 	/* set first/last markers for fragment chain */
1992 	m->m_flags |= M_LASTFRAG;
1993 	m0->m_flags |= M_FIRSTFRAG | M_FRAG;
1994 	m0->m_pkthdr.csum_data = nfrags;
1995 
1996 	/*
1997 	 * Update first fragment by trimming what's been copied out
1998 	 * and updating header, then send each fragment (in order).
1999 	 */
2000 	m = m0;
2001 	m_adj(m, hlen + firstlen - (u_short)ip->ip_len);
2002 	m->m_pkthdr.len = hlen + firstlen;
2003 	ip->ip_len = htons((u_short)m->m_pkthdr.len);
2004 	ip->ip_off |= IP_MF;
2005 
2006 #if BYTE_ORDER != BIG_ENDIAN
2007 	HTONS(ip->ip_off);
2008 #endif
2009 
2010 	ip->ip_sum = 0;
2011 	if (sw_csum & CSUM_DELAY_IP) {
2012 		ip->ip_sum = ip_cksum_hdr_out(m, hlen);
2013 		m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
2014 	}
2015 
2016 	return 0;
2017 }
2018 
2019 static void
ip_out_cksum_stats(int proto,u_int32_t len)2020 ip_out_cksum_stats(int proto, u_int32_t len)
2021 {
2022 	switch (proto) {
2023 	case IPPROTO_TCP:
2024 		tcp_out_cksum_stats(len);
2025 		break;
2026 	case IPPROTO_UDP:
2027 		udp_out_cksum_stats(len);
2028 		break;
2029 	default:
2030 		/* keep only TCP or UDP stats for now */
2031 		break;
2032 	}
2033 }
2034 
2035 /*
2036  * Process a delayed payload checksum calculation (outbound path.)
2037  *
2038  * hoff is the number of bytes beyond the mbuf data pointer which
2039  * points to the IP header.
2040  *
2041  * Returns a bitmask representing all the work done in software.
2042  */
2043 uint32_t
in_finalize_cksum(struct mbuf * m,uint32_t hoff,uint32_t csum_flags)2044 in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags)
2045 {
2046 	unsigned char buf[15 << 2] __attribute__((aligned(8)));
2047 	struct ip *__single ip;
2048 	uint32_t offset, _hlen, mlen, hlen, len, sw_csum;
2049 	uint16_t csum, ip_len;
2050 
2051 	_CASSERT(sizeof(csum) == sizeof(uint16_t));
2052 	VERIFY(m->m_flags & M_PKTHDR);
2053 
2054 	sw_csum = (csum_flags & m->m_pkthdr.csum_flags);
2055 
2056 	if ((sw_csum &= (CSUM_DELAY_IP | CSUM_DELAY_DATA)) == 0) {
2057 		goto done;
2058 	}
2059 
2060 	mlen = m->m_pkthdr.len;                         /* total mbuf len */
2061 
2062 	/* sanity check (need at least simple IP header) */
2063 	if (mlen < (hoff + sizeof(*ip))) {
2064 		panic("%s: mbuf %p pkt len (%u) < hoff+ip_hdr "
2065 		    "(%u+%u)\n", __func__, m, mlen, hoff,
2066 		    (uint32_t)sizeof(*ip));
2067 		/* NOTREACHED */
2068 	}
2069 
2070 	/*
2071 	 * In case the IP header is not contiguous, or not 32-bit aligned,
2072 	 * or if we're computing the IP header checksum, copy it to a local
2073 	 * buffer.  Copy only the simple IP header here (IP options case
2074 	 * is handled below.)
2075 	 */
2076 	if ((sw_csum & CSUM_DELAY_IP) || (hoff + sizeof(*ip)) > m->m_len ||
2077 	    !IP_HDR_ALIGNED_P(mtod(m, caddr_t) + hoff)) {
2078 		m_copydata(m, hoff, sizeof(*ip), (caddr_t)buf);
2079 		ip = (struct ip *)(void *)buf;
2080 		_hlen = sizeof(*ip);
2081 	} else {
2082 		ip = (struct ip *)(void *)(m_mtod_current(m) + hoff);
2083 		_hlen = 0;
2084 	}
2085 
2086 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;              /* IP header len */
2087 
2088 	/* sanity check */
2089 	if (mlen < (hoff + hlen)) {
2090 		panic("%s: mbuf %p pkt too short (%d) for IP header (%u), "
2091 		    "hoff %u", __func__, m, mlen, hlen, hoff);
2092 		/* NOTREACHED */
2093 	}
2094 
2095 	/*
2096 	 * We could be in the context of an IP or interface filter; in the
2097 	 * former case, ip_len would be in host (correct) order while for
2098 	 * the latter it would be in network order.  Because of this, we
2099 	 * attempt to interpret the length field by comparing it against
2100 	 * the actual packet length.  If the comparison fails, byte swap
2101 	 * the length and check again.  If it still fails, use the actual
2102 	 * packet length.  This also covers the trailing bytes case.
2103 	 */
2104 	ip_len = ip->ip_len;
2105 	if (ip_len != (mlen - hoff)) {
2106 		ip_len = OSSwapInt16(ip_len);
2107 		if (ip_len != (mlen - hoff)) {
2108 			printf("%s: mbuf 0x%llx proto %d IP len %d (%x) "
2109 			    "[swapped %d (%x)] doesn't match actual packet "
2110 			    "length; %d is used instead\n", __func__,
2111 			    (uint64_t)VM_KERNEL_ADDRHASH(m), ip->ip_p,
2112 			    ip->ip_len, ip->ip_len, ip_len, ip_len,
2113 			    (mlen - hoff));
2114 			if (mlen - hoff > UINT16_MAX) {
2115 				panic("%s: mlen %u - hoff %u > 65535",
2116 				    __func__, mlen, hoff);
2117 			}
2118 			ip_len = (uint16_t)(mlen - hoff);
2119 		}
2120 	}
2121 
2122 	len = ip_len - hlen;                            /* csum span */
2123 
2124 	if (sw_csum & CSUM_DELAY_DATA) {
2125 		uint16_t ulpoff;
2126 
2127 		/*
2128 		 * offset is added to the lower 16-bit value of csum_data,
2129 		 * which is expected to contain the ULP offset; therefore
2130 		 * CSUM_PARTIAL offset adjustment must be undone.
2131 		 */
2132 		if ((m->m_pkthdr.csum_flags & (CSUM_PARTIAL | CSUM_DATA_VALID)) ==
2133 		    (CSUM_PARTIAL | CSUM_DATA_VALID)) {
2134 			/*
2135 			 * Get back the original ULP offset (this will
2136 			 * undo the CSUM_PARTIAL logic in ip_output.)
2137 			 */
2138 			m->m_pkthdr.csum_data = (m->m_pkthdr.csum_tx_stuff -
2139 			    m->m_pkthdr.csum_tx_start);
2140 		}
2141 
2142 		ulpoff = (m->m_pkthdr.csum_data & 0xffff); /* ULP csum offset */
2143 		offset = hoff + hlen;                   /* ULP header */
2144 
2145 		if (mlen < (ulpoff + sizeof(csum))) {
2146 			panic("%s: mbuf %p pkt len (%u) proto %d invalid ULP "
2147 			    "cksum offset (%u) cksum flags 0x%x\n", __func__,
2148 			    m, mlen, ip->ip_p, ulpoff, m->m_pkthdr.csum_flags);
2149 			/* NOTREACHED */
2150 		}
2151 
2152 		csum = inet_cksum(m, 0, offset, len);
2153 
2154 		/* Update stats */
2155 		ip_out_cksum_stats(ip->ip_p, len);
2156 
2157 		/* RFC1122 4.1.3.4 */
2158 		if (csum == 0 &&
2159 		    (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_ZERO_INVERT))) {
2160 			csum = 0xffff;
2161 		}
2162 
2163 		/* Insert the checksum in the ULP csum field */
2164 		offset += ulpoff;
2165 		if (offset + sizeof(csum) > m->m_len) {
2166 			m_copyback(m, offset, sizeof(csum), &csum);
2167 		} else if (IP_HDR_ALIGNED_P(mtod(m, char *) + hoff)) {
2168 			*(uint16_t *)(void *)(mtod(m, char *) + offset) = csum;
2169 		} else {
2170 			bcopy(&csum, (mtod(m, char *) + offset), sizeof(csum));
2171 		}
2172 		m->m_pkthdr.csum_flags &= ~(CSUM_DELAY_DATA | CSUM_DATA_VALID |
2173 		    CSUM_PARTIAL | CSUM_ZERO_INVERT);
2174 	}
2175 
2176 	if (sw_csum & CSUM_DELAY_IP) {
2177 		/* IP header must be in the local buffer */
2178 		VERIFY(_hlen == sizeof(*ip));
2179 		if (_hlen != hlen) {
2180 			VERIFY(hlen <= sizeof(buf));
2181 			m_copydata(m, hoff, hlen, (caddr_t)buf);
2182 			ip = (struct ip *)(void *)buf;
2183 			_hlen = hlen;
2184 		}
2185 
2186 		/*
2187 		 * Compute the IP header checksum as if the IP length
2188 		 * is the length which we believe is "correct"; see
2189 		 * how ip_len gets calculated above.  Note that this
2190 		 * is done on the local copy and not on the real one.
2191 		 */
2192 		ip->ip_len = htons(ip_len);
2193 		ip->ip_sum = 0;
2194 		csum = in_cksum_hdr_opt(ip);
2195 
2196 		/* Update stats */
2197 		ipstat.ips_snd_swcsum++;
2198 		ipstat.ips_snd_swcsum_bytes += hlen;
2199 
2200 		/*
2201 		 * Insert only the checksum in the existing IP header
2202 		 * csum field; all other fields are left unchanged.
2203 		 */
2204 		offset = hoff + offsetof(struct ip, ip_sum);
2205 		if (offset + sizeof(csum) > m->m_len) {
2206 			m_copyback(m, offset, sizeof(csum), &csum);
2207 		} else if (IP_HDR_ALIGNED_P(mtod(m, char *) + hoff)) {
2208 			*(uint16_t *)(void *)(mtod(m, char *) + offset) = csum;
2209 		} else {
2210 			bcopy(&csum, (mtod(m, char *) + offset), sizeof(csum));
2211 		}
2212 		m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
2213 	}
2214 
2215 done:
2216 	return sw_csum;
2217 }
2218 
2219 /*
2220  * Insert IP options into preformed packet.
2221  * Adjust IP destination as required for IP source routing,
2222  * as indicated by a non-zero in_addr at the start of the options.
2223  *
2224  * XXX This routine assumes that the packet has no options in place.
2225  */
2226 static struct mbuf *
ip_insertoptions(struct mbuf * m,struct mbuf * opt,int * phlen)2227 ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen)
2228 {
2229 	struct ipoption *p = mtod(opt, struct ipoption *);
2230 	struct mbuf *n;
2231 	struct ip *ip = mtod(m, struct ip *);
2232 	unsigned optlen;
2233 
2234 	optlen = opt->m_len - sizeof(p->ipopt_dst);
2235 	if (optlen + (u_short)ip->ip_len > IP_MAXPACKET) {
2236 		return m;             /* XXX should fail */
2237 	}
2238 	if (p->ipopt_dst.s_addr) {
2239 		ip->ip_dst = p->ipopt_dst;
2240 	}
2241 	if (m->m_flags & M_EXT || m_mtod_current(m) - optlen < m->m_pktdat) {
2242 		MGETHDR(n, M_DONTWAIT, MT_HEADER);      /* MAC-OK */
2243 		if (n == NULL) {
2244 			return m;
2245 		}
2246 		n->m_pkthdr.rcvif = 0;
2247 		n->m_pkthdr.len = m->m_pkthdr.len + optlen;
2248 		m->m_len -= sizeof(struct ip);
2249 		m->m_data += sizeof(struct ip);
2250 		n->m_next = m;
2251 		m = n;
2252 		m->m_len = optlen + sizeof(struct ip);
2253 		m->m_data += max_linkhdr;
2254 		(void) memcpy(mtod(m, void *), ip, sizeof(struct ip));
2255 	} else {
2256 		m->m_data -= optlen;
2257 		m->m_len += optlen;
2258 		m->m_pkthdr.len += optlen;
2259 		ovbcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
2260 	}
2261 	ip = mtod(m, struct ip *);
2262 	bcopy(p->ipopt_list, ip + 1, optlen);
2263 	*phlen = sizeof(struct ip) + optlen;
2264 	ip->ip_vhl = IP_MAKE_VHL(IPVERSION, *phlen >> 2);
2265 	ip->ip_len += optlen;
2266 	return m;
2267 }
2268 
2269 /*
2270  * Copy options from ip to jp,
2271  * omitting those not copied during fragmentation.
2272  */
2273 static int
ip_optcopy(struct ip * __indexable ip,struct ip * __indexable jp)2274 ip_optcopy(struct ip *__indexable ip, struct ip *__indexable jp)
2275 {
2276 	u_char *cp, *dp;
2277 	int opt, optlen, cnt;
2278 
2279 	cp = (u_char *)(ip + 1);
2280 	dp = (u_char *)(jp + 1);
2281 	cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
2282 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
2283 		opt = cp[0];
2284 		if (opt == IPOPT_EOL) {
2285 			break;
2286 		}
2287 		if (opt == IPOPT_NOP) {
2288 			/* Preserve for IP mcast tunnel's LSRR alignment. */
2289 			*dp++ = IPOPT_NOP;
2290 			optlen = 1;
2291 			continue;
2292 		}
2293 #if DIAGNOSTIC
2294 		if (cnt < IPOPT_OLEN + sizeof(*cp)) {
2295 			panic("malformed IPv4 option passed to ip_optcopy");
2296 			/* NOTREACHED */
2297 		}
2298 #endif
2299 		optlen = cp[IPOPT_OLEN];
2300 #if DIAGNOSTIC
2301 		if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
2302 			panic("malformed IPv4 option passed to ip_optcopy");
2303 			/* NOTREACHED */
2304 		}
2305 #endif
2306 		/* bogus lengths should have been caught by ip_dooptions */
2307 		if (optlen > cnt) {
2308 			optlen = cnt;
2309 		}
2310 		if (IPOPT_COPIED(opt)) {
2311 			bcopy(cp, dp, optlen);
2312 			dp += optlen;
2313 		}
2314 	}
2315 	for (optlen = (int)(dp - (u_char *)(jp + 1)); optlen & 0x3; optlen++) {
2316 		*dp++ = IPOPT_EOL;
2317 	}
2318 	return optlen;
2319 }
2320 
2321 
2322 /*
2323  * IP socket option processing.
2324  */
2325 int
ip_ctloutput(struct socket * so,struct sockopt * sopt)2326 ip_ctloutput(struct socket *so, struct sockopt *sopt)
2327 {
2328 	struct  inpcb *inp = sotoinpcb(so);
2329 	int     error, optval;
2330 
2331 	error = optval = 0;
2332 
2333 	if (sopt->sopt_level != IPPROTO_IP && !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_BINDTODEVICE)) {
2334 		return EINVAL;
2335 	}
2336 
2337 	switch (sopt->sopt_dir) {
2338 	case SOPT_SET:
2339 		/*
2340 		 *  Wait if we are in the middle of ip_output
2341 		 *  as we unlocked the socket there and don't
2342 		 *  want to overwrite the IP options
2343 		 */
2344 		if (inp->inp_sndinprog_cnt > 0) {
2345 			lck_mtx_t *__single mutex_held = NULL;
2346 
2347 			mutex_held = socket_getlock(so, PR_F_WILLUNLOCK);
2348 
2349 			inp->inp_sndingprog_waiters++;
2350 
2351 			while (inp->inp_sndinprog_cnt > 0) {
2352 				msleep(&inp->inp_sndinprog_cnt, mutex_held,
2353 				    PSOCK | PCATCH, "inp_sndinprog_cnt", NULL);
2354 			}
2355 			inp->inp_sndingprog_waiters--;
2356 		}
2357 		if (sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_BINDTODEVICE) {
2358 			char namebuf[IFNAMSIZ + 1] = {};
2359 
2360 			error = sooptcopyin_bindtodevice(sopt, namebuf, sizeof(namebuf));
2361 			if (error != 0) {
2362 				break;
2363 			}
2364 			namebuf[IFNAMSIZ] = 0;
2365 
2366 			error = inp_bindtodevice(inp, __unsafe_null_terminated_from_indexable(namebuf, namebuf + IFNAMSIZ));
2367 
2368 			break;
2369 		}
2370 		switch (sopt->sopt_name) {
2371 #ifdef notyet
2372 		case IP_RETOPTS:
2373 #endif
2374 		case IP_OPTIONS: {
2375 			struct mbuf *m;
2376 
2377 			if (sopt->sopt_valsize > MLEN) {
2378 				error = EMSGSIZE;
2379 				break;
2380 			}
2381 			MGET(m, sopt->sopt_p != kernproc ? M_WAIT : M_DONTWAIT,
2382 			    MT_HEADER);
2383 			if (m == NULL) {
2384 				error = ENOBUFS;
2385 				break;
2386 			}
2387 			m->m_len = (int32_t)sopt->sopt_valsize;
2388 			error = sooptcopyin(sopt, mtod(m, char *),
2389 			    m->m_len, m->m_len);
2390 			if (error) {
2391 				m_freem(m);
2392 				break;
2393 			}
2394 
2395 			return ip_pcbopts(sopt->sopt_name,
2396 			           &inp->inp_options, m);
2397 		}
2398 
2399 		case IP_TOS:
2400 		case IP_TTL:
2401 		case IP_RECVOPTS:
2402 		case IP_RECVRETOPTS:
2403 		case IP_RECVDSTADDR:
2404 		case IP_RECVIF:
2405 		case IP_RECVTTL:
2406 		case IP_RECVPKTINFO:
2407 		case IP_RECVTOS:
2408 		case IP_DONTFRAG:
2409 			error = sooptcopyin(sopt, &optval, sizeof(optval),
2410 			    sizeof(optval));
2411 			if (error) {
2412 				break;
2413 			}
2414 
2415 			switch (sopt->sopt_name) {
2416 			case IP_TOS:
2417 				if (optval > UINT8_MAX) {
2418 					error = EINVAL;
2419 					break;
2420 				}
2421 				inp->inp_ip_tos = (uint8_t)optval;
2422 				break;
2423 
2424 			case IP_TTL:
2425 				if (optval > UINT8_MAX) {
2426 					error = EINVAL;
2427 					break;
2428 				}
2429 				inp->inp_ip_ttl = (uint8_t)optval;
2430 				break;
2431 #define OPTSET(bit) do {                                                \
2432 	if (optval) {                                                   \
2433 	    inp->inp_flags |= bit;                                      \
2434 	} else {                                                        \
2435 	    inp->inp_flags &= ~bit;                                     \
2436 	}                                                               \
2437 } while (0)
2438 
2439 #define OPTSET2(bit) do {                                               \
2440 	if (optval) {                                                   \
2441 	    inp->inp_flags2 |= bit;                                     \
2442 	} else {                                                        \
2443 	    inp->inp_flags2 &= ~bit;                                    \
2444 	}                                                               \
2445 } while (0)
2446 
2447 			case IP_RECVOPTS:
2448 				OPTSET(INP_RECVOPTS);
2449 				break;
2450 
2451 			case IP_RECVRETOPTS:
2452 				OPTSET(INP_RECVRETOPTS);
2453 				break;
2454 
2455 			case IP_RECVDSTADDR:
2456 				OPTSET(INP_RECVDSTADDR);
2457 				break;
2458 
2459 			case IP_RECVIF:
2460 				OPTSET(INP_RECVIF);
2461 				break;
2462 
2463 			case IP_RECVTTL:
2464 				OPTSET(INP_RECVTTL);
2465 				break;
2466 
2467 			case IP_RECVPKTINFO:
2468 				OPTSET(INP_PKTINFO);
2469 				break;
2470 
2471 			case IP_RECVTOS:
2472 				OPTSET(INP_RECVTOS);
2473 				break;
2474 
2475 			case IP_DONTFRAG:
2476 				/* This option is settable only for IPv4 */
2477 				if (!(inp->inp_vflag & INP_IPV4)) {
2478 					error = EINVAL;
2479 					break;
2480 				}
2481 				OPTSET2(INP2_DONTFRAG);
2482 				break;
2483 #undef OPTSET
2484 #undef OPTSET2
2485 			}
2486 			break;
2487 		/*
2488 		 * Multicast socket options are processed by the in_mcast
2489 		 * module.
2490 		 */
2491 		case IP_MULTICAST_IF:
2492 		case IP_MULTICAST_IFINDEX:
2493 		case IP_MULTICAST_VIF:
2494 		case IP_MULTICAST_TTL:
2495 		case IP_MULTICAST_LOOP:
2496 		case IP_ADD_MEMBERSHIP:
2497 		case IP_DROP_MEMBERSHIP:
2498 		case IP_ADD_SOURCE_MEMBERSHIP:
2499 		case IP_DROP_SOURCE_MEMBERSHIP:
2500 		case IP_BLOCK_SOURCE:
2501 		case IP_UNBLOCK_SOURCE:
2502 		case IP_MSFILTER:
2503 		case MCAST_JOIN_GROUP:
2504 		case MCAST_LEAVE_GROUP:
2505 		case MCAST_JOIN_SOURCE_GROUP:
2506 		case MCAST_LEAVE_SOURCE_GROUP:
2507 		case MCAST_BLOCK_SOURCE:
2508 		case MCAST_UNBLOCK_SOURCE:
2509 			error = inp_setmoptions(inp, sopt);
2510 			break;
2511 
2512 		case IP_PORTRANGE:
2513 			error = sooptcopyin(sopt, &optval, sizeof(optval),
2514 			    sizeof(optval));
2515 			if (error) {
2516 				break;
2517 			}
2518 
2519 			switch (optval) {
2520 			case IP_PORTRANGE_DEFAULT:
2521 				inp->inp_flags &= ~(INP_LOWPORT);
2522 				inp->inp_flags &= ~(INP_HIGHPORT);
2523 				break;
2524 
2525 			case IP_PORTRANGE_HIGH:
2526 				inp->inp_flags &= ~(INP_LOWPORT);
2527 				inp->inp_flags |= INP_HIGHPORT;
2528 				break;
2529 
2530 			case IP_PORTRANGE_LOW:
2531 				inp->inp_flags &= ~(INP_HIGHPORT);
2532 				inp->inp_flags |= INP_LOWPORT;
2533 				break;
2534 
2535 			default:
2536 				error = EINVAL;
2537 				break;
2538 			}
2539 			break;
2540 
2541 #if IPSEC
2542 		case IP_IPSEC_POLICY: {
2543 			caddr_t req = NULL;
2544 			size_t len = 0;
2545 			int priv;
2546 			mbuf_ref_t m;
2547 			int optname;
2548 
2549 			if ((error = soopt_getm(sopt, &m)) != 0) { /* XXX */
2550 				break;
2551 			}
2552 			if ((error = soopt_mcopyin(sopt, m)) != 0) { /* XXX */
2553 				break;
2554 			}
2555 			priv = (proc_suser(sopt->sopt_p) == 0);
2556 			if (m) {
2557 				req = mtod(m, caddr_t);
2558 				len = m->m_len;
2559 			}
2560 			optname = sopt->sopt_name;
2561 			error = ipsec4_set_policy(inp, optname, req, len, priv);
2562 			m_freem(m);
2563 			break;
2564 		}
2565 #endif /* IPSEC */
2566 
2567 #if TRAFFIC_MGT
2568 		case IP_TRAFFIC_MGT_BACKGROUND: {
2569 			unsigned background = 0;
2570 
2571 			error = sooptcopyin(sopt, &background,
2572 			    sizeof(background), sizeof(background));
2573 			if (error) {
2574 				break;
2575 			}
2576 
2577 			if (background) {
2578 				socket_set_traffic_mgt_flags_locked(so,
2579 				    TRAFFIC_MGT_SO_BACKGROUND);
2580 			} else {
2581 				socket_clear_traffic_mgt_flags_locked(so,
2582 				    TRAFFIC_MGT_SO_BACKGROUND);
2583 			}
2584 
2585 			break;
2586 		}
2587 #endif /* TRAFFIC_MGT */
2588 
2589 		/*
2590 		 * On a multihomed system, scoped routing can be used to
2591 		 * restrict the source interface used for sending packets.
2592 		 * The socket option IP_BOUND_IF binds a particular AF_INET
2593 		 * socket to an interface such that data sent on the socket
2594 		 * is restricted to that interface.  This is unlike the
2595 		 * SO_DONTROUTE option where the routing table is bypassed;
2596 		 * therefore it allows for a greater flexibility and control
2597 		 * over the system behavior, and does not place any restriction
2598 		 * on the destination address type (e.g.  unicast, multicast,
2599 		 * or broadcast if applicable) or whether or not the host is
2600 		 * directly reachable.  Note that in the multicast transmit
2601 		 * case, IP_MULTICAST_{IF,IFINDEX} takes precedence over
2602 		 * IP_BOUND_IF, since the former practically bypasses the
2603 		 * routing table; in this case, IP_BOUND_IF sets the default
2604 		 * interface used for sending multicast packets in the absence
2605 		 * of an explicit multicast transmit interface.
2606 		 */
2607 		case IP_BOUND_IF:
2608 			/* This option is settable only for IPv4 */
2609 			if (!(inp->inp_vflag & INP_IPV4)) {
2610 				error = EINVAL;
2611 				break;
2612 			}
2613 
2614 			error = sooptcopyin(sopt, &optval, sizeof(optval),
2615 			    sizeof(optval));
2616 
2617 			if (error) {
2618 				break;
2619 			}
2620 
2621 			error = inp_bindif(inp, optval, NULL);
2622 			break;
2623 
2624 		case IP_NO_IFT_CELLULAR:
2625 			/* This option is settable only for IPv4 */
2626 			if (!(inp->inp_vflag & INP_IPV4)) {
2627 				error = EINVAL;
2628 				break;
2629 			}
2630 
2631 			error = sooptcopyin(sopt, &optval, sizeof(optval),
2632 			    sizeof(optval));
2633 
2634 			if (error) {
2635 				break;
2636 			}
2637 
2638 			/* once set, it cannot be unset */
2639 			if (!optval && INP_NO_CELLULAR(inp)) {
2640 				error = EINVAL;
2641 				break;
2642 			}
2643 
2644 			error = so_set_restrictions(so,
2645 			    SO_RESTRICT_DENY_CELLULAR);
2646 			break;
2647 
2648 		case IP_OUT_IF:
2649 			/* This option is not settable */
2650 			error = EINVAL;
2651 			break;
2652 
2653 		default:
2654 			error = ENOPROTOOPT;
2655 			break;
2656 		}
2657 		break;
2658 
2659 	case SOPT_GET:
2660 		if (sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_BINDTODEVICE) {
2661 			char namebuf[IFNAMSIZ + 1] = {};
2662 
2663 			if (inp->inp_flags & INP_BOUND_IF) {
2664 				strlcpy(namebuf, inp->inp_boundifp->if_xname, IFNAMSIZ);
2665 			}
2666 			error = sooptcopyout(sopt, &namebuf, IFNAMSIZ);
2667 			break;
2668 		}
2669 		switch (sopt->sopt_name) {
2670 		case IP_OPTIONS:
2671 		case IP_RETOPTS:
2672 			if (inp->inp_options) {
2673 				error = sooptcopyout(sopt,
2674 				    mtod(inp->inp_options, char *),
2675 				    inp->inp_options->m_len);
2676 			} else {
2677 				sopt->sopt_valsize = 0;
2678 			}
2679 			break;
2680 
2681 		case IP_TOS:
2682 		case IP_TTL:
2683 		case IP_RECVOPTS:
2684 		case IP_RECVRETOPTS:
2685 		case IP_RECVDSTADDR:
2686 		case IP_RECVIF:
2687 		case IP_RECVTTL:
2688 		case IP_PORTRANGE:
2689 		case IP_RECVPKTINFO:
2690 		case IP_RECVTOS:
2691 		case IP_DONTFRAG:
2692 			switch (sopt->sopt_name) {
2693 			case IP_TOS:
2694 				optval = inp->inp_ip_tos;
2695 				break;
2696 
2697 			case IP_TTL:
2698 				optval = inp->inp_ip_ttl;
2699 				break;
2700 
2701 #define OPTBIT(bit)     (inp->inp_flags & bit ? 1 : 0)
2702 #define OPTBIT2(bit)    (inp->inp_flags2 & bit ? 1 : 0)
2703 			case IP_RECVOPTS:
2704 				optval = OPTBIT(INP_RECVOPTS);
2705 				break;
2706 
2707 			case IP_RECVRETOPTS:
2708 				optval = OPTBIT(INP_RECVRETOPTS);
2709 				break;
2710 
2711 			case IP_RECVDSTADDR:
2712 				optval = OPTBIT(INP_RECVDSTADDR);
2713 				break;
2714 
2715 			case IP_RECVIF:
2716 				optval = OPTBIT(INP_RECVIF);
2717 				break;
2718 
2719 			case IP_RECVTTL:
2720 				optval = OPTBIT(INP_RECVTTL);
2721 				break;
2722 
2723 			case IP_PORTRANGE:
2724 				if (inp->inp_flags & INP_HIGHPORT) {
2725 					optval = IP_PORTRANGE_HIGH;
2726 				} else if (inp->inp_flags & INP_LOWPORT) {
2727 					optval = IP_PORTRANGE_LOW;
2728 				} else {
2729 					optval = 0;
2730 				}
2731 				break;
2732 
2733 			case IP_RECVPKTINFO:
2734 				optval = OPTBIT(INP_PKTINFO);
2735 				break;
2736 
2737 			case IP_RECVTOS:
2738 				optval = OPTBIT(INP_RECVTOS);
2739 				break;
2740 			case IP_DONTFRAG:
2741 				optval = OPTBIT2(INP2_DONTFRAG);
2742 				break;
2743 			}
2744 			error = sooptcopyout(sopt, &optval, sizeof(optval));
2745 			break;
2746 
2747 		case IP_MULTICAST_IF:
2748 		case IP_MULTICAST_IFINDEX:
2749 		case IP_MULTICAST_VIF:
2750 		case IP_MULTICAST_TTL:
2751 		case IP_MULTICAST_LOOP:
2752 		case IP_MSFILTER:
2753 			error = inp_getmoptions(inp, sopt);
2754 			break;
2755 
2756 #if IPSEC
2757 		case IP_IPSEC_POLICY: {
2758 			error = 0; /* This option is no longer supported */
2759 			break;
2760 		}
2761 #endif /* IPSEC */
2762 
2763 #if TRAFFIC_MGT
2764 		case IP_TRAFFIC_MGT_BACKGROUND: {
2765 			unsigned background = (so->so_flags1 &
2766 			    SOF1_TRAFFIC_MGT_SO_BACKGROUND) ? 1 : 0;
2767 			return sooptcopyout(sopt, &background,
2768 			           sizeof(background));
2769 		}
2770 #endif /* TRAFFIC_MGT */
2771 
2772 		case IP_BOUND_IF:
2773 			if (inp->inp_flags & INP_BOUND_IF) {
2774 				optval = inp->inp_boundifp->if_index;
2775 			}
2776 			error = sooptcopyout(sopt, &optval, sizeof(optval));
2777 			break;
2778 
2779 		case IP_NO_IFT_CELLULAR:
2780 			optval = INP_NO_CELLULAR(inp) ? 1 : 0;
2781 			error = sooptcopyout(sopt, &optval, sizeof(optval));
2782 			break;
2783 
2784 		case IP_OUT_IF:
2785 			optval = (inp->inp_last_outifp != NULL) ?
2786 			    inp->inp_last_outifp->if_index : 0;
2787 			error = sooptcopyout(sopt, &optval, sizeof(optval));
2788 			break;
2789 
2790 		default:
2791 			error = ENOPROTOOPT;
2792 			break;
2793 		}
2794 		break;
2795 	}
2796 	return error;
2797 }
2798 
2799 /*
2800  * Set up IP options in pcb for insertion in output packets.
2801  * Store in mbuf with pointer in pcbopt, adding pseudo-option
2802  * with destination address if source routed.
2803  */
2804 static int
ip_pcbopts(int optname,struct mbuf ** pcbopt,struct mbuf * m)2805 ip_pcbopts(int optname, struct mbuf **pcbopt, struct mbuf *m)
2806 {
2807 #pragma unused(optname)
2808 	int cnt, optlen;
2809 	u_char *cp;
2810 	u_char opt;
2811 
2812 	/* turn off any old options */
2813 	if (*pcbopt) {
2814 		(void) m_free(*pcbopt);
2815 	}
2816 	*pcbopt = 0;
2817 	if (m == (struct mbuf *)0 || m->m_len == 0) {
2818 		/*
2819 		 * Only turning off any previous options.
2820 		 */
2821 		if (m) {
2822 			(void) m_free(m);
2823 		}
2824 		return 0;
2825 	}
2826 
2827 	if (m->m_len % sizeof(int32_t)) {
2828 		goto bad;
2829 	}
2830 
2831 	/*
2832 	 * IP first-hop destination address will be stored before
2833 	 * actual options; move other options back
2834 	 * and clear it when none present.
2835 	 */
2836 	if (m_mtod_upper_bound(m) - m_mtod_end(m) < sizeof(struct in_addr)) {
2837 		goto bad;
2838 	}
2839 	cnt = m->m_len;
2840 	m->m_len += sizeof(struct in_addr);
2841 	cp = mtod(m, u_char *) + sizeof(struct in_addr);
2842 	ovbcopy(mtod(m, caddr_t), (caddr_t)cp, (unsigned)cnt);
2843 	bzero(mtod(m, caddr_t), sizeof(struct in_addr));
2844 
2845 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
2846 		opt = cp[IPOPT_OPTVAL];
2847 		if (opt == IPOPT_EOL) {
2848 			break;
2849 		}
2850 		if (opt == IPOPT_NOP) {
2851 			optlen = 1;
2852 		} else {
2853 			if (cnt < IPOPT_OLEN + sizeof(*cp)) {
2854 				goto bad;
2855 			}
2856 			optlen = cp[IPOPT_OLEN];
2857 			if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
2858 				goto bad;
2859 			}
2860 		}
2861 		switch (opt) {
2862 		default:
2863 			break;
2864 
2865 		case IPOPT_LSRR:
2866 		case IPOPT_SSRR:
2867 			/*
2868 			 * user process specifies route as:
2869 			 *	->A->B->C->D
2870 			 * D must be our final destination (but we can't
2871 			 * check that since we may not have connected yet).
2872 			 * A is first hop destination, which doesn't appear in
2873 			 * actual IP option, but is stored before the options.
2874 			 */
2875 			if (optlen < IPOPT_MINOFF - 1 + sizeof(struct in_addr)) {
2876 				goto bad;
2877 			}
2878 			if (optlen > UINT8_MAX) {
2879 				goto bad;
2880 			}
2881 			m->m_len -= sizeof(struct in_addr);
2882 			cnt -= sizeof(struct in_addr);
2883 			optlen -= sizeof(struct in_addr);
2884 			cp[IPOPT_OLEN] = (uint8_t)optlen;
2885 			/*
2886 			 * Move first hop before start of options.
2887 			 */
2888 			bcopy((caddr_t)&cp[IPOPT_OFFSET + 1], mtod(m, caddr_t),
2889 			    sizeof(struct in_addr));
2890 			/*
2891 			 * Then copy rest of options back
2892 			 * to close up the deleted entry.
2893 			 */
2894 			ovbcopy((caddr_t)(&cp[IPOPT_OFFSET + 1] +
2895 			    sizeof(struct in_addr)),
2896 			    (caddr_t)&cp[IPOPT_OFFSET + 1],
2897 			    (unsigned)cnt - (IPOPT_MINOFF - 1));
2898 			break;
2899 		}
2900 	}
2901 	if (m->m_len > MAX_IPOPTLEN + sizeof(struct in_addr)) {
2902 		goto bad;
2903 	}
2904 	*pcbopt = m;
2905 	return 0;
2906 
2907 bad:
2908 	(void) m_free(m);
2909 	return EINVAL;
2910 }
2911 
2912 void
ip_moptions_init(void)2913 ip_moptions_init(void)
2914 {
2915 	PE_parse_boot_argn("ifa_debug", &imo_debug, sizeof(imo_debug));
2916 
2917 	vm_size_t imo_size = (imo_debug == 0) ? sizeof(struct ip_moptions) :
2918 	    sizeof(struct ip_moptions_dbg);
2919 
2920 	imo_zone = zone_create(IMO_ZONE_NAME, imo_size, ZC_ZFREE_CLEARMEM);
2921 }
2922 
2923 void
imo_addref(struct ip_moptions * imo,int locked)2924 imo_addref(struct ip_moptions *imo, int locked)
2925 {
2926 	if (!locked) {
2927 		IMO_LOCK(imo);
2928 	} else {
2929 		IMO_LOCK_ASSERT_HELD(imo);
2930 	}
2931 
2932 	if (++imo->imo_refcnt == 0) {
2933 		panic("%s: imo %p wraparound refcnt", __func__, imo);
2934 		/* NOTREACHED */
2935 	} else if (imo->imo_trace != NULL) {
2936 		(*imo->imo_trace)(imo, TRUE);
2937 	}
2938 
2939 	if (!locked) {
2940 		IMO_UNLOCK(imo);
2941 	}
2942 }
2943 
2944 void
imo_remref(struct ip_moptions * imo)2945 imo_remref(struct ip_moptions *imo)
2946 {
2947 	IMO_LOCK(imo);
2948 	if (imo->imo_refcnt == 0) {
2949 		panic("%s: imo %p negative refcnt", __func__, imo);
2950 		/* NOTREACHED */
2951 	} else if (imo->imo_trace != NULL) {
2952 		(*imo->imo_trace)(imo, FALSE);
2953 	}
2954 
2955 	--imo->imo_refcnt;
2956 	if (imo->imo_refcnt > 0) {
2957 		IMO_UNLOCK(imo);
2958 		return;
2959 	}
2960 
2961 	IMO_PURGE_LOCKED(imo);
2962 
2963 	IMO_UNLOCK(imo);
2964 
2965 	kfree_type_counted_by(struct in_multi *, imo->imo_max_memberships, imo->imo_membership);
2966 	kfree_type_counted_by(struct in_mfilter, imo->imo_max_filters, imo->imo_mfilters);
2967 
2968 	lck_mtx_destroy(&imo->imo_lock, &ifa_mtx_grp);
2969 
2970 	if (!(imo->imo_debug & IFD_ALLOC)) {
2971 		panic("%s: imo %p cannot be freed", __func__, imo);
2972 		/* NOTREACHED */
2973 	}
2974 	zfree(imo_zone, imo);
2975 }
2976 
2977 static void
imo_trace(struct ip_moptions * imo,int refhold)2978 imo_trace(struct ip_moptions *imo, int refhold)
2979 {
2980 	struct ip_moptions_dbg *imo_dbg = (struct ip_moptions_dbg *)imo;
2981 	ctrace_t *tr;
2982 	u_int32_t idx;
2983 	u_int16_t *cnt;
2984 
2985 	if (!(imo->imo_debug & IFD_DEBUG)) {
2986 		panic("%s: imo %p has no debug structure", __func__, imo);
2987 		/* NOTREACHED */
2988 	}
2989 	if (refhold) {
2990 		cnt = &imo_dbg->imo_refhold_cnt;
2991 		tr = imo_dbg->imo_refhold;
2992 	} else {
2993 		cnt = &imo_dbg->imo_refrele_cnt;
2994 		tr = imo_dbg->imo_refrele;
2995 	}
2996 
2997 	idx = os_atomic_inc_orig(cnt, relaxed) % IMO_TRACE_HIST_SIZE;
2998 	ctrace_record(&tr[idx]);
2999 }
3000 
3001 struct ip_moptions *
ip_allocmoptions(zalloc_flags_t how)3002 ip_allocmoptions(zalloc_flags_t how)
3003 {
3004 	struct ip_moptions *imo;
3005 
3006 	imo = zalloc_flags(imo_zone, how | Z_ZERO);
3007 	if (imo != NULL) {
3008 		lck_mtx_init(&imo->imo_lock, &ifa_mtx_grp, &ifa_mtx_attr);
3009 		imo->imo_debug |= IFD_ALLOC;
3010 		if (imo_debug != 0) {
3011 			imo->imo_debug |= IFD_DEBUG;
3012 			imo->imo_trace = imo_trace;
3013 		}
3014 		IMO_ADDREF(imo);
3015 	}
3016 
3017 	return imo;
3018 }
3019 
3020 /*
3021  * Routine called from ip_output() to loop back a copy of an IP multicast
3022  * packet to the input queue of a specified interface.  Note that this
3023  * calls the output routine of the loopback "driver", but with an interface
3024  * pointer that might NOT be a loopback interface -- evil, but easier than
3025  * replicating that code here.
3026  */
3027 static void
ip_mloopback(struct ifnet * srcifp,struct ifnet * origifp,struct mbuf * m,struct sockaddr_in * dst,int hlen)3028 ip_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m,
3029     struct sockaddr_in *dst, int hlen)
3030 {
3031 	struct mbuf *copym;
3032 	struct ip *ip;
3033 
3034 	if (lo_ifp == NULL) {
3035 		return;
3036 	}
3037 
3038 	/*
3039 	 * Copy the packet header as it's needed for the checksum
3040 	 * Make sure to deep-copy IP header portion in case the data
3041 	 * is in an mbuf cluster, so that we can safely override the IP
3042 	 * header portion later.
3043 	 */
3044 	copym = m_copym_mode(m, 0, M_COPYALL, M_DONTWAIT, NULL, NULL, M_COPYM_COPY_HDR);
3045 	if (copym != NULL && ((copym->m_flags & M_EXT) || copym->m_len < hlen)) {
3046 		copym = m_pullup(copym, hlen);
3047 	}
3048 
3049 	if (copym == NULL) {
3050 		return;
3051 	}
3052 
3053 	/*
3054 	 * We don't bother to fragment if the IP length is greater
3055 	 * than the interface's MTU.  Can this possibly matter?
3056 	 */
3057 	ip = mtod(copym, struct ip *);
3058 #if BYTE_ORDER != BIG_ENDIAN
3059 	HTONS(ip->ip_len);
3060 	HTONS(ip->ip_off);
3061 #endif
3062 	ip->ip_sum = 0;
3063 	ip->ip_sum = ip_cksum_hdr_out(copym, hlen);
3064 
3065 	/*
3066 	 * Mark checksum as valid unless receive checksum offload is
3067 	 * disabled; if so, compute checksum in software.  If the
3068 	 * interface itself is lo0, this will be overridden by if_loop.
3069 	 */
3070 	if (hwcksum_rx) {
3071 		copym->m_pkthdr.csum_flags &= ~(CSUM_PARTIAL | CSUM_ZERO_INVERT);
3072 		copym->m_pkthdr.csum_flags |=
3073 		    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3074 		copym->m_pkthdr.csum_data = 0xffff;
3075 	} else if (copym->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3076 #if BYTE_ORDER != BIG_ENDIAN
3077 		NTOHS(ip->ip_len);
3078 #endif
3079 		in_delayed_cksum(copym);
3080 #if BYTE_ORDER != BIG_ENDIAN
3081 		HTONS(ip->ip_len);
3082 #endif
3083 	}
3084 
3085 	/*
3086 	 * Stuff the 'real' ifp into the pkthdr, to be used in matching
3087 	 * in ip_input(); we need the loopback ifp/dl_tag passed as args
3088 	 * to make the loopback driver compliant with the data link
3089 	 * requirements.
3090 	 */
3091 	copym->m_pkthdr.rcvif = origifp;
3092 
3093 	/*
3094 	 * Also record the source interface (which owns the source address).
3095 	 * This is basically a stripped down version of ifa_foraddr().
3096 	 */
3097 	if (srcifp == NULL) {
3098 		struct in_ifaddr *ia;
3099 
3100 		lck_rw_lock_shared(&in_ifaddr_rwlock);
3101 		TAILQ_FOREACH(ia, INADDR_HASH(ip->ip_src.s_addr), ia_hash) {
3102 			IFA_LOCK_SPIN(&ia->ia_ifa);
3103 			if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_src.s_addr) {
3104 				srcifp = ia->ia_ifp;
3105 				IFA_UNLOCK(&ia->ia_ifa);
3106 				break;
3107 			}
3108 			IFA_UNLOCK(&ia->ia_ifa);
3109 		}
3110 		lck_rw_done(&in_ifaddr_rwlock);
3111 	}
3112 	if (srcifp != NULL) {
3113 		ip_setsrcifaddr_info(copym, srcifp->if_index, NULL);
3114 	}
3115 	ip_setdstifaddr_info(copym, origifp->if_index, NULL);
3116 
3117 	dlil_output(lo_ifp, PF_INET, copym, NULL, SA(dst), 0, NULL);
3118 }
3119 
3120 /*
3121  * Given a source IP address (and route, if available), determine the best
3122  * interface to send the packet from.  Checking for (and updating) the
3123  * ROF_SRCIF_SELECTED flag in the pcb-supplied route placeholder is done
3124  * without any locks based on the assumption that ip_output() is single-
3125  * threaded per-pcb, i.e. for any given pcb there can only be one thread
3126  * performing output at the IP layer.
3127  *
3128  * This routine is analogous to in6_selectroute() for IPv6.
3129  */
3130 static struct ifaddr *
in_selectsrcif(struct ip * ip,struct route * ro,unsigned int ifscope)3131 in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope)
3132 {
3133 	struct ifaddr *ifa = NULL;
3134 	struct in_addr src = ip->ip_src;
3135 	struct in_addr dst = ip->ip_dst;
3136 	struct ifnet *rt_ifp;
3137 	char s_src[MAX_IPv4_STR_LEN], s_dst[MAX_IPv4_STR_LEN];
3138 
3139 	VERIFY(src.s_addr != INADDR_ANY);
3140 
3141 	if (ip_select_srcif_debug) {
3142 		(void) inet_ntop(AF_INET, &src.s_addr, s_src, sizeof(s_src));
3143 		(void) inet_ntop(AF_INET, &dst.s_addr, s_dst, sizeof(s_dst));
3144 	}
3145 
3146 	if (ro->ro_rt != NULL) {
3147 		RT_LOCK(ro->ro_rt);
3148 	}
3149 
3150 	rt_ifp = (ro->ro_rt != NULL) ? ro->ro_rt->rt_ifp : NULL;
3151 
3152 	/*
3153 	 * Given the source IP address, find a suitable source interface
3154 	 * to use for transmission; if the caller has specified a scope,
3155 	 * optimize the search by looking at the addresses only for that
3156 	 * interface.  This is still suboptimal, however, as we need to
3157 	 * traverse the per-interface list.
3158 	 */
3159 	if (ifscope != IFSCOPE_NONE || ro->ro_rt != NULL) {
3160 		unsigned int scope = ifscope;
3161 
3162 		/*
3163 		 * If no scope is specified and the route is stale (pointing
3164 		 * to a defunct interface) use the current primary interface;
3165 		 * this happens when switching between interfaces configured
3166 		 * with the same IP address.  Otherwise pick up the scope
3167 		 * information from the route; the ULP may have looked up a
3168 		 * correct route and we just need to verify it here and mark
3169 		 * it with the ROF_SRCIF_SELECTED flag below.
3170 		 */
3171 		if (scope == IFSCOPE_NONE) {
3172 			scope = rt_ifp->if_index;
3173 			if (scope != get_primary_ifscope(AF_INET) &&
3174 			    ROUTE_UNUSABLE(ro)) {
3175 				scope = get_primary_ifscope(AF_INET);
3176 			}
3177 		}
3178 
3179 		ifa = (struct ifaddr *)ifa_foraddr_scoped(src.s_addr, scope);
3180 
3181 		if (ifa == NULL && ip->ip_p != IPPROTO_UDP &&
3182 		    ip->ip_p != IPPROTO_TCP && ipforwarding) {
3183 			/*
3184 			 * If forwarding is enabled, and if the packet isn't
3185 			 * TCP or UDP, check if the source address belongs
3186 			 * to one of our own interfaces; if so, demote the
3187 			 * interface scope and do a route lookup right below.
3188 			 */
3189 			ifa = (struct ifaddr *)ifa_foraddr(src.s_addr);
3190 			if (ifa != NULL) {
3191 				ifa_remref(ifa);
3192 				ifa = NULL;
3193 				ifscope = IFSCOPE_NONE;
3194 			}
3195 		}
3196 
3197 		if (ip_select_srcif_debug && ifa != NULL) {
3198 			if (ro->ro_rt != NULL) {
3199 				printf("%s->%s ifscope %d->%d ifa_if %s "
3200 				    "ro_if %s\n", s_src, s_dst, ifscope,
3201 				    scope, if_name(ifa->ifa_ifp),
3202 				    if_name(rt_ifp));
3203 			} else {
3204 				printf("%s->%s ifscope %d->%d ifa_if %s\n",
3205 				    s_src, s_dst, ifscope, scope,
3206 				    if_name(ifa->ifa_ifp));
3207 			}
3208 		}
3209 	}
3210 
3211 	/*
3212 	 * Slow path; search for an interface having the corresponding source
3213 	 * IP address if the scope was not specified by the caller, and:
3214 	 *
3215 	 *   1) There currently isn't any route, or,
3216 	 *   2) The interface used by the route does not own that source
3217 	 *	IP address; in this case, the route will get blown away
3218 	 *	and we'll do a more specific scoped search using the newly
3219 	 *	found interface.
3220 	 */
3221 	if (ifa == NULL && ifscope == IFSCOPE_NONE) {
3222 		ifa = (struct ifaddr *)ifa_foraddr(src.s_addr);
3223 
3224 		/*
3225 		 * If we have the IP address, but not the route, we don't
3226 		 * really know whether or not it belongs to the correct
3227 		 * interface (it could be shared across multiple interfaces.)
3228 		 * The only way to find out is to do a route lookup.
3229 		 */
3230 		if (ifa != NULL && ro->ro_rt == NULL) {
3231 			struct rtentry *rt;
3232 			struct sockaddr_in sin;
3233 			struct ifaddr *oifa = NULL;
3234 
3235 			SOCKADDR_ZERO(&sin, sizeof(sin));
3236 			sin.sin_family = AF_INET;
3237 			sin.sin_len = sizeof(sin);
3238 			sin.sin_addr = dst;
3239 
3240 			lck_mtx_lock(rnh_lock);
3241 			if ((rt = rt_lookup(TRUE, SA(&sin), NULL,
3242 			    rt_tables[AF_INET], IFSCOPE_NONE)) != NULL) {
3243 				RT_LOCK(rt);
3244 				/*
3245 				 * If the route uses a different interface,
3246 				 * use that one instead.  The IP address of
3247 				 * the ifaddr that we pick up here is not
3248 				 * relevant.
3249 				 */
3250 				if (ifa->ifa_ifp != rt->rt_ifp) {
3251 					oifa = ifa;
3252 					ifa = rt->rt_ifa;
3253 					ifa_addref(ifa);
3254 					RT_UNLOCK(rt);
3255 				} else {
3256 					RT_UNLOCK(rt);
3257 				}
3258 				rtfree_locked(rt);
3259 			}
3260 			lck_mtx_unlock(rnh_lock);
3261 
3262 			if (oifa != NULL) {
3263 				struct ifaddr *iifa;
3264 
3265 				/*
3266 				 * See if the interface pointed to by the
3267 				 * route is configured with the source IP
3268 				 * address of the packet.
3269 				 */
3270 				iifa = (struct ifaddr *)ifa_foraddr_scoped(
3271 					src.s_addr, ifa->ifa_ifp->if_index);
3272 
3273 				if (iifa != NULL) {
3274 					/*
3275 					 * Found it; drop the original one
3276 					 * as well as the route interface
3277 					 * address, and use this instead.
3278 					 */
3279 					ifa_remref(oifa);
3280 					ifa_remref(ifa);
3281 					ifa = iifa;
3282 				} else if (!ipforwarding ||
3283 				    (rt->rt_flags & RTF_GATEWAY)) {
3284 					/*
3285 					 * This interface doesn't have that
3286 					 * source IP address; drop the route
3287 					 * interface address and just use the
3288 					 * original one, and let the caller
3289 					 * do a scoped route lookup.
3290 					 */
3291 					ifa_remref(ifa);
3292 					ifa = oifa;
3293 				} else {
3294 					/*
3295 					 * Forwarding is enabled and the source
3296 					 * address belongs to one of our own
3297 					 * interfaces which isn't the outgoing
3298 					 * interface, and we have a route, and
3299 					 * the destination is on a network that
3300 					 * is directly attached (onlink); drop
3301 					 * the original one and use the route
3302 					 * interface address instead.
3303 					 */
3304 					ifa_remref(oifa);
3305 				}
3306 			}
3307 		} else if (ifa != NULL && ro->ro_rt != NULL &&
3308 		    !(ro->ro_rt->rt_flags & RTF_GATEWAY) &&
3309 		    ifa->ifa_ifp != ro->ro_rt->rt_ifp && ipforwarding) {
3310 			/*
3311 			 * Forwarding is enabled and the source address belongs
3312 			 * to one of our own interfaces which isn't the same
3313 			 * as the interface used by the known route; drop the
3314 			 * original one and use the route interface address.
3315 			 */
3316 			ifa_remref(ifa);
3317 			ifa = ro->ro_rt->rt_ifa;
3318 			ifa_addref(ifa);
3319 		}
3320 
3321 		if (ip_select_srcif_debug && ifa != NULL) {
3322 			printf("%s->%s ifscope %d ifa_if %s\n",
3323 			    s_src, s_dst, ifscope, if_name(ifa->ifa_ifp));
3324 		}
3325 	}
3326 
3327 	if (ro->ro_rt != NULL) {
3328 		RT_LOCK_ASSERT_HELD(ro->ro_rt);
3329 	}
3330 	/*
3331 	 * If there is a non-loopback route with the wrong interface, or if
3332 	 * there is no interface configured with such an address, blow it
3333 	 * away.  Except for local/loopback, we look for one with a matching
3334 	 * interface scope/index.
3335 	 */
3336 	if (ro->ro_rt != NULL &&
3337 	    (ifa == NULL || (ifa->ifa_ifp != rt_ifp && rt_ifp != lo_ifp) ||
3338 	    !(ro->ro_rt->rt_flags & RTF_UP))) {
3339 		if (ip_select_srcif_debug) {
3340 			if (ifa != NULL) {
3341 				printf("%s->%s ifscope %d ro_if %s != "
3342 				    "ifa_if %s (cached route cleared)\n",
3343 				    s_src, s_dst, ifscope, if_name(rt_ifp),
3344 				    if_name(ifa->ifa_ifp));
3345 			} else {
3346 				printf("%s->%s ifscope %d ro_if %s "
3347 				    "(no ifa_if found)\n",
3348 				    s_src, s_dst, ifscope, if_name(rt_ifp));
3349 			}
3350 		}
3351 
3352 		RT_UNLOCK(ro->ro_rt);
3353 		ROUTE_RELEASE(ro);
3354 
3355 		/*
3356 		 * If the destination is IPv4 LLA and the route's interface
3357 		 * doesn't match the source interface, then the source IP
3358 		 * address is wrong; it most likely belongs to the primary
3359 		 * interface associated with the IPv4 LL subnet.  Drop the
3360 		 * packet rather than letting it go out and return an error
3361 		 * to the ULP.  This actually applies not only to IPv4 LL
3362 		 * but other shared subnets; for now we explicitly test only
3363 		 * for the former case and save the latter for future.
3364 		 */
3365 		if (IN_LINKLOCAL(ntohl(dst.s_addr)) &&
3366 		    !IN_LINKLOCAL(ntohl(src.s_addr)) && ifa != NULL) {
3367 			ifa_remref(ifa);
3368 			ifa = NULL;
3369 		}
3370 	}
3371 
3372 	if (ip_select_srcif_debug && ifa == NULL) {
3373 		printf("%s->%s ifscope %d (neither ro_if/ifa_if found)\n",
3374 		    s_src, s_dst, ifscope);
3375 	}
3376 
3377 	/*
3378 	 * If there is a route, mark it accordingly.  If there isn't one,
3379 	 * we'll get here again during the next transmit (possibly with a
3380 	 * route) and the flag will get set at that point.  For IPv4 LLA
3381 	 * destination, mark it only if the route has been fully resolved;
3382 	 * otherwise we want to come back here again when the route points
3383 	 * to the interface over which the ARP reply arrives on.
3384 	 */
3385 	if (ro->ro_rt != NULL && (!IN_LINKLOCAL(ntohl(dst.s_addr)) ||
3386 	    (ro->ro_rt->rt_gateway->sa_family == AF_LINK &&
3387 	    SDL(ro->ro_rt->rt_gateway)->sdl_alen != 0))) {
3388 		if (ifa != NULL) {
3389 			ifa_addref(ifa);        /* for route */
3390 		}
3391 		if (ro->ro_srcia != NULL) {
3392 			ifa_remref(ro->ro_srcia);
3393 		}
3394 		ro->ro_srcia = ifa;
3395 		ro->ro_flags |= ROF_SRCIF_SELECTED;
3396 		RT_GENID_SYNC(ro->ro_rt);
3397 	}
3398 
3399 	if (ro->ro_rt != NULL) {
3400 		RT_UNLOCK(ro->ro_rt);
3401 	}
3402 
3403 	return ifa;
3404 }
3405 
3406 /*
3407  * @brief	Given outgoing interface it determines what checksum needs
3408  *      to be computed in software and what needs to be offloaded to the
3409  *      interface.
3410  *
3411  * @param	ifp Pointer to the outgoing interface
3412  * @param	m Pointer to the packet
3413  * @param	hlen IP header length
3414  * @param	ip_len Total packet size i.e. headers + data payload
3415  * @param	sw_csum Pointer to a software checksum flag set
3416  *
3417  * @return	void
3418  */
3419 void
ip_output_checksum(struct ifnet * ifp,struct mbuf * m,int hlen,int ip_len,uint32_t * sw_csum)3420 ip_output_checksum(struct ifnet *ifp, struct mbuf *m, int hlen, int ip_len,
3421     uint32_t *sw_csum)
3422 {
3423 	uint32_t hwcap = ifp->if_hwassist;
3424 
3425 	m->m_pkthdr.csum_flags |= CSUM_IP;
3426 
3427 	if (!hwcksum_tx) {
3428 		/* do all in software; hardware checksum offload is disabled */
3429 		*sw_csum = (CSUM_DELAY_DATA | CSUM_DELAY_IP) &
3430 		    m->m_pkthdr.csum_flags;
3431 	} else {
3432 		/* do in software what the hardware cannot */
3433 		*sw_csum = m->m_pkthdr.csum_flags & ~IF_HWASSIST_CSUM_FLAGS(hwcap);
3434 	}
3435 
3436 	if (hlen != sizeof(struct ip)) {
3437 		*sw_csum |= ((CSUM_DELAY_DATA | CSUM_DELAY_IP) &
3438 		    m->m_pkthdr.csum_flags);
3439 	} else if ((*sw_csum & CSUM_DELAY_DATA) && (hwcap & CSUM_PARTIAL)) {
3440 		/*
3441 		 * If the explicitly required data csum offload is not supported by hardware,
3442 		 * do it by partial checksum. Here we assume TSO implies support for IP
3443 		 * and data sum.
3444 		 */
3445 		int interface_mtu = ifp->if_mtu;
3446 
3447 		if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
3448 			interface_mtu = IN6_LINKMTU(ifp);
3449 			/* Further adjust the size for CLAT46 expansion */
3450 			interface_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
3451 		}
3452 
3453 		/*
3454 		 * Partial checksum offload, if non-IP fragment, and TCP only
3455 		 * (no UDP support, as the hardware may not be able to convert
3456 		 * +0 to -0 (0xffff) per RFC1122 4.1.3.4. unless the interface
3457 		 * supports "invert zero" capability.)
3458 		 */
3459 		if (hwcksum_tx &&
3460 		    ((m->m_pkthdr.csum_flags & CSUM_TCP) ||
3461 		    ((hwcap & CSUM_ZERO_INVERT) &&
3462 		    (m->m_pkthdr.csum_flags & CSUM_ZERO_INVERT))) &&
3463 		    ip_len <= interface_mtu) {
3464 			uint16_t start = sizeof(struct ip);
3465 			uint16_t ulpoff = m->m_pkthdr.csum_data & 0xffff;
3466 			m->m_pkthdr.csum_flags |=
3467 			    (CSUM_DATA_VALID | CSUM_PARTIAL);
3468 			m->m_pkthdr.csum_tx_stuff = (ulpoff + start);
3469 			m->m_pkthdr.csum_tx_start = start;
3470 			/* do IP hdr chksum in software */
3471 			*sw_csum = CSUM_DELAY_IP;
3472 		} else {
3473 			*sw_csum |= (CSUM_DELAY_DATA & m->m_pkthdr.csum_flags);
3474 		}
3475 	}
3476 
3477 	if (*sw_csum & CSUM_DELAY_DATA) {
3478 		in_delayed_cksum(m);
3479 		*sw_csum &= ~CSUM_DELAY_DATA;
3480 	}
3481 
3482 	if (hwcksum_tx) {
3483 		uint32_t delay_data = m->m_pkthdr.csum_flags & CSUM_DELAY_DATA;
3484 		uint32_t hw_csum = IF_HWASSIST_CSUM_FLAGS(hwcap);
3485 
3486 		/*
3487 		 * Drop off bits that aren't supported by hardware;
3488 		 * also make sure to preserve non-checksum related bits.
3489 		 */
3490 		m->m_pkthdr.csum_flags =
3491 		    ((m->m_pkthdr.csum_flags & (hw_csum | CSUM_DATA_VALID)) |
3492 		    (m->m_pkthdr.csum_flags & ~IF_HWASSIST_CSUM_MASK));
3493 
3494 		/*
3495 		 * If hardware supports partial checksum but not delay_data,
3496 		 * add back delay_data.
3497 		 */
3498 		if ((hw_csum & CSUM_PARTIAL) != 0 &&
3499 		    (hw_csum & delay_data) == 0) {
3500 			m->m_pkthdr.csum_flags |= delay_data;
3501 		}
3502 	} else {
3503 		/* drop all bits; hardware checksum offload is disabled */
3504 		m->m_pkthdr.csum_flags = 0;
3505 	}
3506 }
3507 
3508 /*
3509  * GRE protocol output for PPP/PPTP
3510  */
3511 int
ip_gre_output(struct mbuf * m)3512 ip_gre_output(struct mbuf *m)
3513 {
3514 	struct route ro;
3515 	int error;
3516 
3517 	bzero(&ro, sizeof(ro));
3518 
3519 	error = ip_output(m, NULL, &ro, 0, NULL, NULL);
3520 
3521 	ROUTE_RELEASE(&ro);
3522 
3523 	return error;
3524 }
3525 
3526 static int
3527 sysctl_reset_ip_output_stats SYSCTL_HANDLER_ARGS
3528 {
3529 #pragma unused(arg1, arg2)
3530 	int error, i;
3531 
3532 	i = ip_output_measure;
3533 	error = sysctl_handle_int(oidp, &i, 0, req);
3534 	if (error || req->newptr == USER_ADDR_NULL) {
3535 		goto done;
3536 	}
3537 	/* impose bounds */
3538 	if (i < 0 || i > 1) {
3539 		error = EINVAL;
3540 		goto done;
3541 	}
3542 	if (ip_output_measure != i && i == 1) {
3543 		net_perf_initialize(&net_perf, ip_output_measure_bins);
3544 	}
3545 	ip_output_measure = i;
3546 done:
3547 	return error;
3548 }
3549 
3550 static int
3551 sysctl_ip_output_measure_bins SYSCTL_HANDLER_ARGS
3552 {
3553 #pragma unused(arg1, arg2)
3554 	int error;
3555 	uint64_t i;
3556 
3557 	i = ip_output_measure_bins;
3558 	error = sysctl_handle_quad(oidp, &i, 0, req);
3559 	if (error || req->newptr == USER_ADDR_NULL) {
3560 		goto done;
3561 	}
3562 	/* validate data */
3563 	if (!net_perf_validate_bins(i)) {
3564 		error = EINVAL;
3565 		goto done;
3566 	}
3567 	ip_output_measure_bins = i;
3568 done:
3569 	return error;
3570 }
3571 
3572 static int
3573 sysctl_ip_output_getperf SYSCTL_HANDLER_ARGS
3574 {
3575 #pragma unused(oidp, arg1, arg2)
3576 	if (req->oldptr == USER_ADDR_NULL) {
3577 		req->oldlen = (size_t)sizeof(struct ipstat);
3578 	}
3579 
3580 	return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen));
3581 }
3582