xref: /xnu-8792.41.9/bsd/netinet/ip_input.c (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1993
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)ip_input.c	8.2 (Berkeley) 1/4/94
61  */
62 /*
63  * NOTICE: This file was modified by SPARTA, Inc. in 2007 to introduce
64  * support for mandatory and extensible security protections.  This notice
65  * is included in support of clause 2.2 (b) of the Apple Public License,
66  * Version 2.0.
67  */
68 
69 #define _IP_VHL
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/mbuf.h>
74 #include <sys/malloc.h>
75 #include <sys/domain.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/time.h>
79 #include <sys/kernel.h>
80 #include <sys/syslog.h>
81 #include <sys/sysctl.h>
82 #include <sys/mcache.h>
83 #include <sys/socketvar.h>
84 #include <sys/kdebug.h>
85 #include <mach/mach_time.h>
86 #include <mach/sdt.h>
87 
88 #include <machine/endian.h>
89 #include <dev/random/randomdev.h>
90 
91 #include <kern/queue.h>
92 #include <kern/locks.h>
93 #include <libkern/OSAtomic.h>
94 
95 #include <pexpert/pexpert.h>
96 
97 #include <net/if.h>
98 #include <net/if_var.h>
99 #include <net/if_dl.h>
100 #include <net/route.h>
101 #include <net/kpi_protocol.h>
102 #include <net/ntstat.h>
103 #include <net/dlil.h>
104 #include <net/classq/classq.h>
105 #include <net/net_perf.h>
106 #include <net/init.h>
107 #if PF
108 #include <net/pfvar.h>
109 #endif /* PF */
110 #include <net/if_ports_used.h>
111 
112 #include <netinet/in.h>
113 #include <netinet/in_systm.h>
114 #include <netinet/in_var.h>
115 #include <netinet/in_arp.h>
116 #include <netinet/ip.h>
117 #include <netinet/in_pcb.h>
118 #include <netinet/ip_var.h>
119 #include <netinet/ip_icmp.h>
120 #include <netinet/kpi_ipfilter_var.h>
121 #include <netinet/udp.h>
122 #include <netinet/udp_var.h>
123 #include <netinet/bootp.h>
124 
125 #if DUMMYNET
126 #include <netinet/ip_dummynet.h>
127 #endif /* DUMMYNET */
128 
129 #if IPSEC
130 #include <netinet6/ipsec.h>
131 #include <netkey/key.h>
132 #endif /* IPSEC */
133 
134 #include <os/log.h>
135 
136 #define DBG_LAYER_BEG           NETDBG_CODE(DBG_NETIP, 0)
137 #define DBG_LAYER_END           NETDBG_CODE(DBG_NETIP, 2)
138 #define DBG_FNC_IP_INPUT        NETDBG_CODE(DBG_NETIP, (2 << 8))
139 
140 #if IPSEC
141 extern int ipsec_bypass;
142 #endif /* IPSEC */
143 
144 MBUFQ_HEAD(fq_head);
145 
146 static int frag_timeout_run;            /* frag timer is scheduled to run */
147 static void frag_timeout(void *);
148 static void frag_sched_timeout(void);
149 
150 static struct ipq *ipq_alloc(int);
151 static void ipq_free(struct ipq *);
152 static void ipq_updateparams(void);
153 static void ip_input_second_pass(struct mbuf *, struct ifnet *,
154     int, int, struct ip_fw_in_args *);
155 
156 static LCK_GRP_DECLARE(ipqlock_grp, "ipqlock");
157 static LCK_MTX_DECLARE(ipqlock, &ipqlock_grp);
158 
159 
160 /* Packet reassembly stuff */
161 #define IPREASS_NHASH_LOG2      6
162 #define IPREASS_NHASH           (1 << IPREASS_NHASH_LOG2)
163 #define IPREASS_HMASK           (IPREASS_NHASH - 1)
164 #define IPREASS_HASH(x, y) \
165 	(((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
166 
167 /* IP fragment reassembly queues (protected by ipqlock) */
168 static TAILQ_HEAD(ipqhead, ipq) ipq[IPREASS_NHASH]; /* ip reassembly queues */
169 static int maxnipq;                     /* max packets in reass queues */
170 static u_int32_t maxfragsperpacket;     /* max frags/packet in reass queues */
171 static u_int32_t nipq;                  /* # of packets in reass queues */
172 static u_int32_t ipq_limit;             /* ipq allocation limit */
173 static u_int32_t ipq_count;             /* current # of allocated ipq's */
174 
175 static int sysctl_ipforwarding SYSCTL_HANDLER_ARGS;
176 static int sysctl_maxnipq SYSCTL_HANDLER_ARGS;
177 static int sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS;
178 
179 #if (DEBUG || DEVELOPMENT)
180 static int sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS;
181 static int sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS;
182 static int sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS;
183 #endif /* (DEBUG || DEVELOPMENT) */
184 
185 int ipforwarding = 0;
186 SYSCTL_PROC(_net_inet_ip, IPCTL_FORWARDING, forwarding,
187     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ipforwarding, 0,
188     sysctl_ipforwarding, "I", "Enable IP forwarding between interfaces");
189 
190 static int ipsendredirects = 1; /* XXX */
191 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect,
192     CTLFLAG_RW | CTLFLAG_LOCKED, &ipsendredirects, 0,
193     "Enable sending IP redirects");
194 
195 int ip_defttl = IPDEFTTL;
196 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW | CTLFLAG_LOCKED,
197     &ip_defttl, 0, "Maximum TTL on IP packets");
198 
199 static int ip_dosourceroute = 0;
200 SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute,
201     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_dosourceroute, 0,
202     "Enable forwarding source routed IP packets");
203 
204 static int ip_acceptsourceroute = 0;
205 SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
206     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_acceptsourceroute, 0,
207     "Enable accepting source routed IP packets");
208 
209 static int ip_sendsourcequench = 0;
210 SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench,
211     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_sendsourcequench, 0,
212     "Enable the transmission of source quench packets");
213 
214 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets,
215     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxnipq, 0, sysctl_maxnipq,
216     "I", "Maximum number of IPv4 fragment reassembly queue entries");
217 
218 SYSCTL_UINT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD | CTLFLAG_LOCKED,
219     &nipq, 0, "Current number of IPv4 fragment reassembly queue entries");
220 
221 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragsperpacket,
222     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxfragsperpacket, 0,
223     sysctl_maxfragsperpacket, "I",
224     "Maximum number of IPv4 fragments allowed per packet");
225 
226 static uint32_t ip_adj_clear_hwcksum = 0;
227 SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_clear_hwcksum,
228     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_clear_hwcksum, 0,
229     "Invalidate hwcksum info when adjusting length");
230 
231 static uint32_t ip_adj_partial_sum = 1;
232 SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_partial_sum,
233     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_partial_sum, 0,
234     "Perform partial sum adjustment of trailing bytes at IP layer");
235 
236 /*
237  * ip_checkinterface controls the receive side of the models for multihoming
238  * that are discussed in RFC 1122.
239  *
240  * ip_checkinterface values are:
241  *  IP_CHECKINTERFACE_WEAK_ES:
242  *	This corresponds to the Weak End-System model where incoming packets from
243  *	any interface are accepted provided the destination address of the incoming packet
244  *	is assigned to some interface.
245  *
246  *  IP_CHECKINTERFACE_HYBRID_ES:
247  *	The Hybrid End-System model use the Strong End-System for tunnel interfaces
248  *	(ipsec and utun) and the weak End-System model for other interfaces families.
249  *	This prevents a rogue middle box to probe for signs of TCP connections
250  *	that use the tunnel interface.
251  *
252  *  IP_CHECKINTERFACE_STRONG_ES:
253  *	The Strong model model requires the packet arrived on an interface that
254  *	is assigned the destination address of the packet.
255  *
256  * Since the routing table and transmit implementation do not implement the Strong ES model,
257  * setting this to a value different from IP_CHECKINTERFACE_WEAK_ES may lead to unexpected results.
258  *
259  * When forwarding is enabled, the system reverts to the Weak ES model as a router
260  * is expected by design to receive packets from several interfaces to the same address.
261  *
262  * XXX - ip_checkinterface currently must be set to IP_CHECKINTERFACE_WEAK_ES if you use ipnat
263  * to translate the destination address to another local interface.
264  *
265  * XXX - ip_checkinterface must be set to IP_CHECKINTERFACE_WEAK_ES if you add IP aliases
266  * to the loopback interface instead of the interface where the
267  * packets for those addresses are received.
268  */
269 #define IP_CHECKINTERFACE_WEAK_ES       0
270 #define IP_CHECKINTERFACE_HYBRID_ES     1
271 #define IP_CHECKINTERFACE_STRONG_ES     2
272 
273 static int ip_checkinterface = IP_CHECKINTERFACE_HYBRID_ES;
274 
275 static int sysctl_ip_checkinterface SYSCTL_HANDLER_ARGS;
276 SYSCTL_PROC(_net_inet_ip, OID_AUTO, check_interface,
277     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
278     0, 0, sysctl_ip_checkinterface, "I", "Verify packet arrives on correct interface");
279 
280 #if (DEBUG || DEVELOPMENT)
281 #define IP_CHECK_IF_DEBUG 1
282 #else
283 #define IP_CHECK_IF_DEBUG 0
284 #endif /* (DEBUG || DEVELOPMENT) */
285 static int ip_checkinterface_debug = IP_CHECK_IF_DEBUG;
286 SYSCTL_INT(_net_inet_ip, OID_AUTO, checkinterface_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
287     &ip_checkinterface_debug, IP_CHECK_IF_DEBUG, "");
288 
289 static int ip_chaining = 1;
290 SYSCTL_INT(_net_inet_ip, OID_AUTO, rx_chaining, CTLFLAG_RW | CTLFLAG_LOCKED,
291     &ip_chaining, 1, "Do receive side ip address based chaining");
292 
293 static int ip_chainsz = 6;
294 SYSCTL_INT(_net_inet_ip, OID_AUTO, rx_chainsz, CTLFLAG_RW | CTLFLAG_LOCKED,
295     &ip_chainsz, 1, "IP receive side max chaining");
296 
297 #if (DEBUG || DEVELOPMENT)
298 static int ip_input_measure = 0;
299 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf,
300     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
301     &ip_input_measure, 0, sysctl_reset_ip_input_stats, "I", "Do time measurement");
302 
303 static uint64_t ip_input_measure_bins = 0;
304 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_bins,
305     CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_input_measure_bins, 0,
306     sysctl_ip_input_measure_bins, "I",
307     "bins for chaining performance data histogram");
308 
309 static net_perf_t net_perf;
310 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_data,
311     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
312     0, 0, sysctl_ip_input_getperf, "S,net_perf",
313     "IP input performance data (struct net_perf, net/net_perf.h)");
314 #endif /* (DEBUG || DEVELOPMENT) */
315 
316 #if DIAGNOSTIC
317 static int ipprintfs = 0;
318 #endif
319 
320 struct protosw *ip_protox[IPPROTO_MAX];
321 
322 static LCK_GRP_DECLARE(in_ifaddr_rwlock_grp, "in_ifaddr_rwlock");
323 LCK_RW_DECLARE(in_ifaddr_rwlock, &in_ifaddr_rwlock_grp);
324 
325 /* Protected by in_ifaddr_rwlock */
326 struct in_ifaddrhead in_ifaddrhead;             /* first inet address */
327 struct in_ifaddrhashhead *in_ifaddrhashtbl;     /* inet addr hash table  */
328 
329 #define INADDR_NHASH    61
330 static uint32_t inaddr_nhash;                  /* hash table size */
331 static uint32_t inaddr_hashp;                  /* next largest prime */
332 
333 static int ip_getstat SYSCTL_HANDLER_ARGS;
334 struct ipstat ipstat;
335 SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats,
336     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
337     0, 0, ip_getstat, "S,ipstat",
338     "IP statistics (struct ipstat, netinet/ip_var.h)");
339 
340 #if IPCTL_DEFMTU
341 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW | CTLFLAG_LOCKED,
342     &ip_mtu, 0, "Default MTU");
343 #endif /* IPCTL_DEFMTU */
344 
345 #if IPSTEALTH
346 static int      ipstealth = 0;
347 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW | CTLFLAG_LOCKED,
348     &ipstealth, 0, "");
349 #endif /* IPSTEALTH */
350 
351 #if DUMMYNET
352 ip_dn_io_t *ip_dn_io_ptr;
353 #endif /* DUMMYNET */
354 
355 SYSCTL_NODE(_net_inet_ip, OID_AUTO, linklocal,
356     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local");
357 
358 struct ip_linklocal_stat ip_linklocal_stat;
359 SYSCTL_STRUCT(_net_inet_ip_linklocal, OID_AUTO, stat,
360     CTLFLAG_RD | CTLFLAG_LOCKED, &ip_linklocal_stat, ip_linklocal_stat,
361     "Number of link local packets with TTL less than 255");
362 
363 SYSCTL_NODE(_net_inet_ip_linklocal, OID_AUTO, in,
364     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local input");
365 
366 int ip_linklocal_in_allowbadttl = 1;
367 SYSCTL_INT(_net_inet_ip_linklocal_in, OID_AUTO, allowbadttl,
368     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_linklocal_in_allowbadttl, 0,
369     "Allow incoming link local packets with TTL less than 255");
370 
371 
372 /*
373  * We need to save the IP options in case a protocol wants to respond
374  * to an incoming packet over the same route if the packet got here
375  * using IP source routing.  This allows connection establishment and
376  * maintenance when the remote end is on a network that is not known
377  * to us.
378  */
379 static int      ip_nhops = 0;
380 static  struct ip_srcrt {
381 	struct  in_addr dst;                    /* final destination */
382 	char    nop;                            /* one NOP to align */
383 	char    srcopt[IPOPT_OFFSET + 1];       /* OPTVAL, OLEN and OFFSET */
384 	struct  in_addr route[MAX_IPOPTLEN / sizeof(struct in_addr)];
385 } ip_srcrt;
386 
387 static void in_ifaddrhashtbl_init(void);
388 static void save_rte(u_char *, struct in_addr);
389 static int ip_dooptions(struct mbuf *, int, struct sockaddr_in *);
390 static void ip_forward(struct mbuf *, int, struct sockaddr_in *);
391 static void frag_freef(struct ipqhead *, struct ipq *);
392 static struct mbuf *ip_reass(struct mbuf *);
393 static void ip_fwd_route_copyout(struct ifnet *, struct route *);
394 static void ip_fwd_route_copyin(struct ifnet *, struct route *);
395 static inline u_short ip_cksum(struct mbuf *, int);
396 
397 /*
398  * On platforms which require strict alignment (currently for anything but
399  * i386 or x86_64), check if the IP header pointer is 32-bit aligned; if not,
400  * copy the contents of the mbuf chain into a new chain, and free the original
401  * one.  Create some head room in the first mbuf of the new chain, in case
402  * it's needed later on.
403  */
404 #if defined(__i386__) || defined(__x86_64__)
405 #define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0)
406 #else /* !__i386__ && !__x86_64__ */
407 #define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do {                  \
408 	if (!IP_HDR_ALIGNED_P(mtod(_m, caddr_t))) {                     \
409 	        struct mbuf *_n;                                        \
410 	        struct ifnet *__ifp = (_ifp);                           \
411 	        atomic_add_64(&(__ifp)->if_alignerrs, 1);               \
412 	        if (((_m)->m_flags & M_PKTHDR) &&                       \
413 	            (_m)->m_pkthdr.pkt_hdr != NULL)                     \
414 	                (_m)->m_pkthdr.pkt_hdr = NULL;                  \
415 	        _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT);        \
416 	        if (_n == NULL) {                                       \
417 	                atomic_add_32(&ipstat.ips_toosmall, 1);         \
418 	                m_freem(_m);                                    \
419 	                (_m) = NULL;                                    \
420 	                _action;                                        \
421 	        } else {                                                \
422 	                VERIFY(_n != (_m));                             \
423 	                (_m) = _n;                                      \
424 	        }                                                       \
425 	}                                                               \
426 } while (0)
427 #endif /* !__i386__ && !__x86_64__ */
428 
429 
430 typedef enum ip_check_if_result {
431 	IP_CHECK_IF_NONE = 0,
432 	IP_CHECK_IF_OURS = 1,
433 	IP_CHECK_IF_DROP = 2,
434 	IP_CHECK_IF_FORWARD = 3
435 } ip_check_if_result_t;
436 
437 static ip_check_if_result_t ip_input_check_interface(struct mbuf **, struct ip *, struct ifnet *);
438 
439 /*
440  * GRE input handler function, settable via ip_gre_register_input() for PPTP.
441  */
442 static gre_input_func_t gre_input_func;
443 
444 static void
ip_init_delayed(void)445 ip_init_delayed(void)
446 {
447 	struct ifreq ifr;
448 	int error;
449 	struct sockaddr_in *sin;
450 
451 	bzero(&ifr, sizeof(ifr));
452 	strlcpy(ifr.ifr_name, "lo0", sizeof(ifr.ifr_name));
453 	sin = (struct sockaddr_in *)(void *)&ifr.ifr_addr;
454 	sin->sin_len = sizeof(struct sockaddr_in);
455 	sin->sin_family = AF_INET;
456 	sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
457 	error = in_control(NULL, SIOCSIFADDR, (caddr_t)&ifr, lo_ifp, kernproc);
458 	if (error) {
459 		printf("%s: failed to initialise lo0's address, error=%d\n",
460 		    __func__, error);
461 	}
462 }
463 
464 /*
465  * IP initialization: fill in IP protocol switch table.
466  * All protocols not implemented in kernel go to raw IP protocol handler.
467  */
468 void
ip_init(struct protosw * pp,struct domain * dp)469 ip_init(struct protosw *pp, struct domain *dp)
470 {
471 	static int ip_initialized = 0;
472 	struct protosw *pr;
473 	struct timeval tv;
474 	int i;
475 
476 	domain_proto_mtx_lock_assert_held();
477 	VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
478 
479 	/* ipq_alloc() uses mbufs for IP fragment queue structures */
480 	_CASSERT(sizeof(struct ipq) <= _MLEN);
481 
482 	/*
483 	 * Some ioctls (e.g. SIOCAIFADDR) use ifaliasreq struct, which is
484 	 * interchangeable with in_aliasreq; they must have the same size.
485 	 */
486 	_CASSERT(sizeof(struct ifaliasreq) == sizeof(struct in_aliasreq));
487 
488 	if (ip_initialized) {
489 		return;
490 	}
491 	ip_initialized = 1;
492 
493 	in_ifaddr_init();
494 
495 	TAILQ_INIT(&in_ifaddrhead);
496 	in_ifaddrhashtbl_init();
497 
498 	ip_moptions_init();
499 
500 	pr = pffindproto_locked(PF_INET, IPPROTO_RAW, SOCK_RAW);
501 	if (pr == NULL) {
502 		panic("%s: Unable to find [PF_INET,IPPROTO_RAW,SOCK_RAW]",
503 		    __func__);
504 		/* NOTREACHED */
505 	}
506 
507 	/* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
508 	for (i = 0; i < IPPROTO_MAX; i++) {
509 		ip_protox[i] = pr;
510 	}
511 	/*
512 	 * Cycle through IP protocols and put them into the appropriate place
513 	 * in ip_protox[], skipping protocols IPPROTO_{IP,RAW}.
514 	 */
515 	VERIFY(dp == inetdomain && dp->dom_family == PF_INET);
516 	TAILQ_FOREACH(pr, &dp->dom_protosw, pr_entry) {
517 		VERIFY(pr->pr_domain == dp);
518 		if (pr->pr_protocol != 0 && pr->pr_protocol != IPPROTO_RAW) {
519 			/* Be careful to only index valid IP protocols. */
520 			if (pr->pr_protocol < IPPROTO_MAX) {
521 				ip_protox[pr->pr_protocol] = pr;
522 			}
523 		}
524 	}
525 
526 	lck_mtx_lock(&ipqlock);
527 	/* Initialize IP reassembly queue. */
528 	for (i = 0; i < IPREASS_NHASH; i++) {
529 		TAILQ_INIT(&ipq[i]);
530 	}
531 
532 	maxnipq = nmbclusters / 32;
533 	maxfragsperpacket = 128; /* enough for 64k in 512 byte fragments */
534 	ipq_updateparams();
535 	lck_mtx_unlock(&ipqlock);
536 
537 	getmicrotime(&tv);
538 	ip_id = (u_short)(RandomULong() ^ tv.tv_usec);
539 
540 	PE_parse_boot_argn("ip_checkinterface", &i, sizeof(i));
541 	switch (i) {
542 	case IP_CHECKINTERFACE_WEAK_ES:
543 	case IP_CHECKINTERFACE_HYBRID_ES:
544 	case IP_CHECKINTERFACE_STRONG_ES:
545 		ip_checkinterface = i;
546 		break;
547 	default:
548 		break;
549 	}
550 
551 	arp_init();
552 	net_init_add(ip_init_delayed);
553 }
554 
555 /*
556  * Initialize IPv4 source address hash table.
557  */
558 static void
in_ifaddrhashtbl_init(void)559 in_ifaddrhashtbl_init(void)
560 {
561 	int i, k, p;
562 
563 	if (in_ifaddrhashtbl != NULL) {
564 		return;
565 	}
566 
567 	PE_parse_boot_argn("inaddr_nhash", &inaddr_nhash,
568 	    sizeof(inaddr_nhash));
569 	if (inaddr_nhash == 0) {
570 		inaddr_nhash = INADDR_NHASH;
571 	}
572 
573 	in_ifaddrhashtbl = zalloc_permanent(
574 		inaddr_nhash * sizeof(*in_ifaddrhashtbl),
575 		ZALIGN_PTR);
576 
577 	/*
578 	 * Generate the next largest prime greater than inaddr_nhash.
579 	 */
580 	k = (inaddr_nhash % 2 == 0) ? inaddr_nhash + 1 : inaddr_nhash + 2;
581 	for (;;) {
582 		p = 1;
583 		for (i = 3; i * i <= k; i += 2) {
584 			if (k % i == 0) {
585 				p = 0;
586 			}
587 		}
588 		if (p == 1) {
589 			break;
590 		}
591 		k += 2;
592 	}
593 	inaddr_hashp = k;
594 }
595 
596 uint32_t
inaddr_hashval(uint32_t key)597 inaddr_hashval(uint32_t key)
598 {
599 	/*
600 	 * The hash index is the computed prime times the key modulo
601 	 * the hash size, as documented in "Introduction to Algorithms"
602 	 * (Cormen, Leiserson, Rivest).
603 	 */
604 	if (inaddr_nhash > 1) {
605 		return (key * inaddr_hashp) % inaddr_nhash;
606 	} else {
607 		return 0;
608 	}
609 }
610 
611 struct in_ifaddrhashhead *
inaddr_hashlookup(uint32_t key)612 inaddr_hashlookup(uint32_t key)
613 {
614 	return &in_ifaddrhashtbl[inaddr_hashval(key)];
615 }
616 
617 __private_extern__ void
ip_proto_dispatch_in(struct mbuf * m,int hlen,u_int8_t proto,ipfilter_t inject_ipfref)618 ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto,
619     ipfilter_t inject_ipfref)
620 {
621 	struct ipfilter *filter;
622 	int seen = (inject_ipfref == NULL);
623 	int     changed_header = 0;
624 	struct ip *ip;
625 	void (*pr_input)(struct mbuf *, int len);
626 
627 	if (!TAILQ_EMPTY(&ipv4_filters)) {
628 		ipf_ref();
629 		TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
630 			if (seen == 0) {
631 				if ((struct ipfilter *)inject_ipfref == filter) {
632 					seen = 1;
633 				}
634 			} else if (filter->ipf_filter.ipf_input) {
635 				errno_t result;
636 
637 				if (changed_header == 0) {
638 					/*
639 					 * Perform IP header alignment fixup,
640 					 * if needed, before passing packet
641 					 * into filter(s).
642 					 */
643 					IP_HDR_ALIGNMENT_FIXUP(m,
644 					    m->m_pkthdr.rcvif, ipf_unref());
645 
646 					/* ipf_unref() already called */
647 					if (m == NULL) {
648 						return;
649 					}
650 
651 					changed_header = 1;
652 					ip = mtod(m, struct ip *);
653 					ip->ip_len = htons(ip->ip_len + (uint16_t)hlen);
654 					ip->ip_off = htons(ip->ip_off);
655 					ip->ip_sum = 0;
656 					ip->ip_sum = ip_cksum_hdr_in(m, hlen);
657 				}
658 				result = filter->ipf_filter.ipf_input(
659 					filter->ipf_filter.cookie, (mbuf_t *)&m,
660 					hlen, proto);
661 				if (result == EJUSTRETURN) {
662 					ipf_unref();
663 					return;
664 				}
665 				if (result != 0) {
666 					ipf_unref();
667 					m_freem(m);
668 					return;
669 				}
670 			}
671 		}
672 		ipf_unref();
673 	}
674 
675 	/* Perform IP header alignment fixup (post-filters), if needed */
676 	IP_HDR_ALIGNMENT_FIXUP(m, m->m_pkthdr.rcvif, return );
677 
678 	ip = mtod(m, struct ip *);
679 
680 	if (changed_header) {
681 		ip->ip_len = ntohs(ip->ip_len) - (u_short)hlen;
682 		ip->ip_off = ntohs(ip->ip_off);
683 	}
684 
685 	/*
686 	 * If there isn't a specific lock for the protocol
687 	 * we're about to call, use the generic lock for AF_INET.
688 	 * otherwise let the protocol deal with its own locking
689 	 */
690 	if ((pr_input = ip_protox[ip->ip_p]->pr_input) == NULL) {
691 		m_freem(m);
692 	} else if (!(ip_protox[ip->ip_p]->pr_flags & PR_PROTOLOCK)) {
693 		lck_mtx_lock(inet_domain_mutex);
694 		pr_input(m, hlen);
695 		lck_mtx_unlock(inet_domain_mutex);
696 	} else {
697 		pr_input(m, hlen);
698 	}
699 }
700 
701 struct pktchain_elm {
702 	struct mbuf     *pkte_head;
703 	struct mbuf     *pkte_tail;
704 	struct in_addr  pkte_saddr;
705 	struct in_addr  pkte_daddr;
706 	uint16_t        pkte_npkts;
707 	uint16_t        pkte_proto;
708 	uint32_t        pkte_nbytes;
709 };
710 
711 typedef struct pktchain_elm pktchain_elm_t;
712 
713 /* Store upto PKTTBL_SZ unique flows on the stack */
714 #define PKTTBL_SZ       7
715 
716 static struct mbuf *
ip_chain_insert(struct mbuf * packet,pktchain_elm_t * tbl)717 ip_chain_insert(struct mbuf *packet, pktchain_elm_t *tbl)
718 {
719 	struct ip*      ip;
720 	int             pkttbl_idx = 0;
721 
722 	ip = mtod(packet, struct ip*);
723 
724 	/* reusing the hash function from inaddr_hashval */
725 	pkttbl_idx = inaddr_hashval(ntohl(ip->ip_src.s_addr)) % PKTTBL_SZ;
726 	if (tbl[pkttbl_idx].pkte_head == NULL) {
727 		tbl[pkttbl_idx].pkte_head = packet;
728 		tbl[pkttbl_idx].pkte_saddr.s_addr = ip->ip_src.s_addr;
729 		tbl[pkttbl_idx].pkte_daddr.s_addr = ip->ip_dst.s_addr;
730 		tbl[pkttbl_idx].pkte_proto = ip->ip_p;
731 	} else {
732 		if ((ip->ip_dst.s_addr == tbl[pkttbl_idx].pkte_daddr.s_addr) &&
733 		    (ip->ip_src.s_addr == tbl[pkttbl_idx].pkte_saddr.s_addr) &&
734 		    (ip->ip_p == tbl[pkttbl_idx].pkte_proto)) {
735 		} else {
736 			return packet;
737 		}
738 	}
739 	if (tbl[pkttbl_idx].pkte_tail != NULL) {
740 		mbuf_setnextpkt(tbl[pkttbl_idx].pkte_tail, packet);
741 	}
742 
743 	tbl[pkttbl_idx].pkte_tail = packet;
744 	tbl[pkttbl_idx].pkte_npkts += 1;
745 	tbl[pkttbl_idx].pkte_nbytes += packet->m_pkthdr.len;
746 	return NULL;
747 }
748 
749 /* args is a dummy variable here for backward compatibility */
750 static void
ip_input_second_pass_loop_tbl(pktchain_elm_t * tbl,struct ip_fw_in_args * args)751 ip_input_second_pass_loop_tbl(pktchain_elm_t *tbl, struct ip_fw_in_args *args)
752 {
753 	int i = 0;
754 
755 	for (i = 0; i < PKTTBL_SZ; i++) {
756 		if (tbl[i].pkte_head != NULL) {
757 			struct mbuf *m = tbl[i].pkte_head;
758 			ip_input_second_pass(m, m->m_pkthdr.rcvif,
759 			    tbl[i].pkte_npkts, tbl[i].pkte_nbytes, args);
760 
761 			if (tbl[i].pkte_npkts > 2) {
762 				ipstat.ips_rxc_chainsz_gt2++;
763 			}
764 			if (tbl[i].pkte_npkts > 4) {
765 				ipstat.ips_rxc_chainsz_gt4++;
766 			}
767 #if (DEBUG || DEVELOPMENT)
768 			if (ip_input_measure) {
769 				net_perf_histogram(&net_perf, tbl[i].pkte_npkts);
770 			}
771 #endif /* (DEBUG || DEVELOPMENT) */
772 			tbl[i].pkte_head = tbl[i].pkte_tail = NULL;
773 			tbl[i].pkte_npkts = 0;
774 			tbl[i].pkte_nbytes = 0;
775 			/* no need to initialize address and protocol in tbl */
776 		}
777 	}
778 }
779 
780 static void
ip_input_cpout_args(struct ip_fw_in_args * args,struct ip_fw_args * args1,boolean_t * done_init)781 ip_input_cpout_args(struct ip_fw_in_args *args, struct ip_fw_args *args1,
782     boolean_t *done_init)
783 {
784 	if (*done_init == FALSE) {
785 		bzero(args1, sizeof(struct ip_fw_args));
786 		*done_init = TRUE;
787 	}
788 	args1->fwa_pf_rule = args->fwai_pf_rule;
789 }
790 
791 static void
ip_input_cpin_args(struct ip_fw_args * args1,struct ip_fw_in_args * args)792 ip_input_cpin_args(struct ip_fw_args *args1, struct ip_fw_in_args *args)
793 {
794 	args->fwai_pf_rule = args1->fwa_pf_rule;
795 }
796 
797 typedef enum {
798 	IPINPUT_DOCHAIN = 0,
799 	IPINPUT_DONTCHAIN,
800 	IPINPUT_FREED,
801 	IPINPUT_DONE
802 } ipinput_chain_ret_t;
803 
804 static void
ip_input_update_nstat(struct ifnet * ifp,struct in_addr src_ip,u_int32_t packets,u_int32_t bytes)805 ip_input_update_nstat(struct ifnet *ifp, struct in_addr src_ip,
806     u_int32_t packets, u_int32_t bytes)
807 {
808 	if (nstat_collect) {
809 		struct rtentry *rt = ifnet_cached_rtlookup_inet(ifp,
810 		    src_ip);
811 		if (rt != NULL) {
812 			nstat_route_rx(rt, packets, bytes, 0);
813 			rtfree(rt);
814 		}
815 	}
816 }
817 
818 static void
ip_input_dispatch_chain(struct mbuf * m)819 ip_input_dispatch_chain(struct mbuf *m)
820 {
821 	struct mbuf *tmp_mbuf = m;
822 	struct mbuf *nxt_mbuf = NULL;
823 	struct ip *ip = NULL;
824 	unsigned int hlen;
825 
826 	ip = mtod(tmp_mbuf, struct ip *);
827 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
828 	while (tmp_mbuf != NULL) {
829 		nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
830 		mbuf_setnextpkt(tmp_mbuf, NULL);
831 		ip_proto_dispatch_in(tmp_mbuf, hlen, ip->ip_p, 0);
832 		tmp_mbuf = nxt_mbuf;
833 		if (tmp_mbuf) {
834 			ip = mtod(tmp_mbuf, struct ip *);
835 			/* first mbuf of chain already has adjusted ip_len */
836 			hlen = IP_VHL_HL(ip->ip_vhl) << 2;
837 			ip->ip_len -= hlen;
838 		}
839 	}
840 }
841 
842 static void
ip_input_setdst_chain(struct mbuf * m,uint16_t ifindex,struct in_ifaddr * ia)843 ip_input_setdst_chain(struct mbuf *m, uint16_t ifindex, struct in_ifaddr *ia)
844 {
845 	struct mbuf *tmp_mbuf = m;
846 
847 	while (tmp_mbuf != NULL) {
848 		ip_setdstifaddr_info(tmp_mbuf, ifindex, ia);
849 		tmp_mbuf = mbuf_nextpkt(tmp_mbuf);
850 	}
851 }
852 
853 static void
ip_input_adjust(struct mbuf * m,struct ip * ip,struct ifnet * inifp)854 ip_input_adjust(struct mbuf *m, struct ip *ip, struct ifnet *inifp)
855 {
856 	boolean_t adjust = TRUE;
857 
858 	ASSERT(m_pktlen(m) > ip->ip_len);
859 
860 	/*
861 	 * Invalidate hardware checksum info if ip_adj_clear_hwcksum
862 	 * is set; useful to handle buggy drivers.  Note that this
863 	 * should not be enabled by default, as we may get here due
864 	 * to link-layer padding.
865 	 */
866 	if (ip_adj_clear_hwcksum &&
867 	    (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) &&
868 	    !(inifp->if_flags & IFF_LOOPBACK) &&
869 	    !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
870 		m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
871 		m->m_pkthdr.csum_data = 0;
872 		ipstat.ips_adj_hwcsum_clr++;
873 	}
874 
875 	/*
876 	 * If partial checksum information is available, subtract
877 	 * out the partial sum of postpended extraneous bytes, and
878 	 * update the checksum metadata accordingly.  By doing it
879 	 * here, the upper layer transport only needs to adjust any
880 	 * prepended extraneous bytes (else it will do both.)
881 	 */
882 	if (ip_adj_partial_sum &&
883 	    (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
884 	    (CSUM_DATA_VALID | CSUM_PARTIAL)) {
885 		m->m_pkthdr.csum_rx_val = m_adj_sum16(m,
886 		    m->m_pkthdr.csum_rx_start, m->m_pkthdr.csum_rx_start,
887 		    (ip->ip_len - m->m_pkthdr.csum_rx_start),
888 		    m->m_pkthdr.csum_rx_val);
889 	} else if ((m->m_pkthdr.csum_flags &
890 	    (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
891 	    (CSUM_DATA_VALID | CSUM_PARTIAL)) {
892 		/*
893 		 * If packet has partial checksum info and we decided not
894 		 * to subtract the partial sum of postpended extraneous
895 		 * bytes here (not the default case), leave that work to
896 		 * be handled by the other layers.  For now, only TCP, UDP
897 		 * layers are capable of dealing with this.  For all other
898 		 * protocols (including fragments), trim and ditch the
899 		 * partial sum as those layers might not implement partial
900 		 * checksumming (or adjustment) at all.
901 		 */
902 		if ((ip->ip_off & (IP_MF | IP_OFFMASK)) == 0 &&
903 		    (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_UDP)) {
904 			adjust = FALSE;
905 		} else {
906 			m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
907 			m->m_pkthdr.csum_data = 0;
908 			ipstat.ips_adj_hwcsum_clr++;
909 		}
910 	}
911 
912 	if (adjust) {
913 		ipstat.ips_adj++;
914 		if (m->m_len == m->m_pkthdr.len) {
915 			m->m_len = ip->ip_len;
916 			m->m_pkthdr.len = ip->ip_len;
917 		} else {
918 			m_adj(m, ip->ip_len - m->m_pkthdr.len);
919 		}
920 	}
921 }
922 
923 /*
924  * First pass does all essential packet validation and places on a per flow
925  * queue for doing operations that have same outcome for all packets of a flow.
926  */
927 static ipinput_chain_ret_t
ip_input_first_pass(struct mbuf * m,struct ip_fw_in_args * args,struct mbuf ** modm)928 ip_input_first_pass(struct mbuf *m, struct ip_fw_in_args *args, struct mbuf **modm)
929 {
930 	struct ip       *ip;
931 	struct ifnet    *inifp;
932 	unsigned int    hlen;
933 	int             retval = IPINPUT_DOCHAIN;
934 	int             len = 0;
935 	struct in_addr  src_ip;
936 #if DUMMYNET
937 	struct m_tag            *copy;
938 	struct m_tag            *p;
939 	boolean_t               delete = FALSE;
940 	struct ip_fw_args       args1;
941 	boolean_t               init = FALSE;
942 #endif /* DUMMYNET */
943 	ipfilter_t inject_filter_ref = NULL;
944 
945 	/* Check if the mbuf is still valid after interface filter processing */
946 	MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
947 	inifp = mbuf_pkthdr_rcvif(m);
948 	VERIFY(inifp != NULL);
949 
950 	/* Perform IP header alignment fixup, if needed */
951 	IP_HDR_ALIGNMENT_FIXUP(m, inifp, goto bad);
952 
953 	m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
954 
955 #if DUMMYNET
956 	/*
957 	 * Don't bother searching for tag(s) if there's none.
958 	 */
959 	if (SLIST_EMPTY(&m->m_pkthdr.tags)) {
960 		goto ipfw_tags_done;
961 	}
962 
963 	/* Grab info from mtags prepended to the chain */
964 	p = m_tag_first(m);
965 	while (p) {
966 		if (p->m_tag_id == KERNEL_MODULE_TAG_ID) {
967 			if (p->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET) {
968 				struct dn_pkt_tag *dn_tag;
969 
970 				dn_tag = (struct dn_pkt_tag *)(p + 1);
971 				args->fwai_pf_rule = dn_tag->dn_pf_rule;
972 				delete = TRUE;
973 			}
974 
975 			if (delete) {
976 				copy = p;
977 				p = m_tag_next(m, p);
978 				m_tag_delete(m, copy);
979 			} else {
980 				p = m_tag_next(m, p);
981 			}
982 		} else {
983 			p = m_tag_next(m, p);
984 		}
985 	}
986 
987 #if DIAGNOSTIC
988 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
989 		panic("ip_input no HDR");
990 	}
991 #endif
992 
993 	if (args->fwai_pf_rule) {
994 		/* dummynet already filtered us */
995 		ip = mtod(m, struct ip *);
996 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
997 		inject_filter_ref = ipf_get_inject_filter(m);
998 		if (args->fwai_pf_rule) {
999 			goto check_with_pf;
1000 		}
1001 	}
1002 ipfw_tags_done:
1003 #endif /* DUMMYNET */
1004 
1005 	/*
1006 	 * No need to process packet twice if we've already seen it.
1007 	 */
1008 	if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
1009 		inject_filter_ref = ipf_get_inject_filter(m);
1010 	}
1011 	if (inject_filter_ref != NULL) {
1012 		ip = mtod(m, struct ip *);
1013 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1014 
1015 		DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1016 		    struct ip *, ip, struct ifnet *, inifp,
1017 		    struct ip *, ip, struct ip6_hdr *, NULL);
1018 
1019 		ip->ip_len = ntohs(ip->ip_len) - (u_short)hlen;
1020 		ip->ip_off = ntohs(ip->ip_off);
1021 		ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref);
1022 		return IPINPUT_DONE;
1023 	}
1024 
1025 	if (__improbable(m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
1026 		if_ports_used_match_mbuf(inifp, PF_INET, m);
1027 	}
1028 
1029 	if (m->m_pkthdr.len < sizeof(struct ip)) {
1030 		OSAddAtomic(1, &ipstat.ips_total);
1031 		OSAddAtomic(1, &ipstat.ips_tooshort);
1032 		m_freem(m);
1033 		return IPINPUT_FREED;
1034 	}
1035 
1036 	if (m->m_len < sizeof(struct ip) &&
1037 	    (m = m_pullup(m, sizeof(struct ip))) == NULL) {
1038 		OSAddAtomic(1, &ipstat.ips_total);
1039 		OSAddAtomic(1, &ipstat.ips_toosmall);
1040 		return IPINPUT_FREED;
1041 	}
1042 
1043 	ip = mtod(m, struct ip *);
1044 	*modm = m;
1045 
1046 	KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
1047 	    ip->ip_p, ip->ip_off, ip->ip_len);
1048 
1049 	if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
1050 		OSAddAtomic(1, &ipstat.ips_total);
1051 		OSAddAtomic(1, &ipstat.ips_badvers);
1052 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1053 		m_freem(m);
1054 		return IPINPUT_FREED;
1055 	}
1056 
1057 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1058 	if (hlen < sizeof(struct ip)) {
1059 		OSAddAtomic(1, &ipstat.ips_total);
1060 		OSAddAtomic(1, &ipstat.ips_badhlen);
1061 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1062 		m_freem(m);
1063 		return IPINPUT_FREED;
1064 	}
1065 
1066 	if (hlen > m->m_len) {
1067 		if ((m = m_pullup(m, hlen)) == NULL) {
1068 			OSAddAtomic(1, &ipstat.ips_total);
1069 			OSAddAtomic(1, &ipstat.ips_badhlen);
1070 			KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1071 			return IPINPUT_FREED;
1072 		}
1073 		ip = mtod(m, struct ip *);
1074 		*modm = m;
1075 	}
1076 
1077 	/* 127/8 must not appear on wire - RFC1122 */
1078 	if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
1079 	    (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
1080 		/*
1081 		 * Allow for the following exceptions:
1082 		 *
1083 		 *   1. If the packet was sent to loopback (i.e. rcvif
1084 		 *      would have been set earlier at output time.)
1085 		 *
1086 		 *   2. If the packet was sent out on loopback from a local
1087 		 *      source address which belongs to a non-loopback
1088 		 *      interface (i.e. rcvif may not necessarily be a
1089 		 *      loopback interface, hence the test for PKTF_LOOP.)
1090 		 *      Unlike IPv6, there is no interface scope ID, and
1091 		 *      therefore we don't care so much about PKTF_IFINFO.
1092 		 */
1093 		if (!(inifp->if_flags & IFF_LOOPBACK) &&
1094 		    !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1095 			OSAddAtomic(1, &ipstat.ips_total);
1096 			OSAddAtomic(1, &ipstat.ips_badaddr);
1097 			KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1098 			m_freem(m);
1099 			return IPINPUT_FREED;
1100 		}
1101 	}
1102 
1103 	/* IPv4 Link-Local Addresses as defined in RFC3927 */
1104 	if ((IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
1105 	    IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)))) {
1106 		ip_linklocal_stat.iplls_in_total++;
1107 		if (ip->ip_ttl != MAXTTL) {
1108 			OSAddAtomic(1, &ip_linklocal_stat.iplls_in_badttl);
1109 			/* Silently drop link local traffic with bad TTL */
1110 			if (!ip_linklocal_in_allowbadttl) {
1111 				OSAddAtomic(1, &ipstat.ips_total);
1112 				KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1113 				m_freem(m);
1114 				return IPINPUT_FREED;
1115 			}
1116 		}
1117 	}
1118 
1119 	if (ip_cksum(m, hlen)) {
1120 		OSAddAtomic(1, &ipstat.ips_total);
1121 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1122 		m_freem(m);
1123 		return IPINPUT_FREED;
1124 	}
1125 
1126 	DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1127 	    struct ip *, ip, struct ifnet *, inifp,
1128 	    struct ip *, ip, struct ip6_hdr *, NULL);
1129 
1130 	/*
1131 	 * Convert fields to host representation.
1132 	 */
1133 #if BYTE_ORDER != BIG_ENDIAN
1134 	NTOHS(ip->ip_len);
1135 #endif
1136 
1137 	if (ip->ip_len < hlen) {
1138 		OSAddAtomic(1, &ipstat.ips_total);
1139 		OSAddAtomic(1, &ipstat.ips_badlen);
1140 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1141 		m_freem(m);
1142 		return IPINPUT_FREED;
1143 	}
1144 
1145 #if BYTE_ORDER != BIG_ENDIAN
1146 	NTOHS(ip->ip_off);
1147 #endif
1148 
1149 	/*
1150 	 * Check that the amount of data in the buffers
1151 	 * is as at least much as the IP header would have us expect.
1152 	 * Trim mbufs if longer than we expect.
1153 	 * Drop packet if shorter than we expect.
1154 	 */
1155 	if (m->m_pkthdr.len < ip->ip_len) {
1156 		OSAddAtomic(1, &ipstat.ips_total);
1157 		OSAddAtomic(1, &ipstat.ips_tooshort);
1158 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1159 		m_freem(m);
1160 		return IPINPUT_FREED;
1161 	}
1162 
1163 	if (m->m_pkthdr.len > ip->ip_len) {
1164 		ip_input_adjust(m, ip, inifp);
1165 	}
1166 
1167 	/* for netstat route statistics */
1168 	src_ip = ip->ip_src;
1169 	len = m->m_pkthdr.len;
1170 
1171 #if DUMMYNET
1172 check_with_pf:
1173 #endif /* DUMMYNET */
1174 #if PF
1175 	/* Invoke inbound packet filter */
1176 	if (PF_IS_ENABLED) {
1177 		int error;
1178 		ip_input_cpout_args(args, &args1, &init);
1179 		ip = mtod(m, struct ip *);
1180 		src_ip = ip->ip_src;
1181 
1182 #if DUMMYNET
1183 		error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, &args1);
1184 #else
1185 		error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, NULL);
1186 #endif /* DUMMYNET */
1187 		if (error != 0 || m == NULL) {
1188 			if (m != NULL) {
1189 				panic("%s: unexpected packet %p",
1190 				    __func__, m);
1191 				/* NOTREACHED */
1192 			}
1193 			/* Already freed by callee */
1194 			ip_input_update_nstat(inifp, src_ip, 1, len);
1195 			KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1196 			OSAddAtomic(1, &ipstat.ips_total);
1197 			return IPINPUT_FREED;
1198 		}
1199 		ip = mtod(m, struct ip *);
1200 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1201 		*modm = m;
1202 		ip_input_cpin_args(&args1, args);
1203 	}
1204 #endif /* PF */
1205 
1206 #if IPSEC
1207 	if (ipsec_bypass == 0 && ipsec_gethist(m, NULL)) {
1208 		retval = IPINPUT_DONTCHAIN; /* XXX scope for chaining here? */
1209 		goto pass;
1210 	}
1211 #endif
1212 
1213 #if IPSEC
1214 pass:
1215 #endif
1216 	/*
1217 	 * Process options and, if not destined for us,
1218 	 * ship it on.  ip_dooptions returns 1 when an
1219 	 * error was detected (causing an icmp message
1220 	 * to be sent and the original packet to be freed).
1221 	 */
1222 	ip_nhops = 0;           /* for source routed packets */
1223 	if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) {
1224 		src_ip = ip->ip_src;
1225 		ip_input_update_nstat(inifp, src_ip, 1, len);
1226 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1227 		OSAddAtomic(1, &ipstat.ips_total);
1228 		return IPINPUT_FREED;
1229 	}
1230 
1231 	/*
1232 	 * Don't chain fragmented packets
1233 	 */
1234 	if (ip->ip_off & ~(IP_DF | IP_RF)) {
1235 		return IPINPUT_DONTCHAIN;
1236 	}
1237 
1238 	/* Allow DHCP/BootP responses through */
1239 	if ((inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
1240 	    hlen == sizeof(struct ip) && ip->ip_p == IPPROTO_UDP) {
1241 		struct udpiphdr *ui;
1242 
1243 		if (m->m_len < sizeof(struct udpiphdr) &&
1244 		    (m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) {
1245 			OSAddAtomic(1, &udpstat.udps_hdrops);
1246 			KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1247 			OSAddAtomic(1, &ipstat.ips_total);
1248 			return IPINPUT_FREED;
1249 		}
1250 		*modm = m;
1251 		ui = mtod(m, struct udpiphdr *);
1252 		if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
1253 			ip_setdstifaddr_info(m, inifp->if_index, NULL);
1254 			return IPINPUT_DONTCHAIN;
1255 		}
1256 	}
1257 
1258 	/* Avoid chaining raw sockets as ipsec checks occur later for them */
1259 	if (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR) {
1260 		return IPINPUT_DONTCHAIN;
1261 	}
1262 
1263 	return retval;
1264 #if !defined(__i386__) && !defined(__x86_64__)
1265 bad:
1266 	m_freem(m);
1267 	return IPINPUT_FREED;
1268 #endif
1269 }
1270 
1271 /*
1272  * Because the call to m_pullup() may freem the mbuf, the function frees the mbuf packet
1273  * chain before it return IP_CHECK_IF_DROP
1274  */
1275 static ip_check_if_result_t
ip_input_check_interface(struct mbuf ** mp,struct ip * ip,struct ifnet * inifp)1276 ip_input_check_interface(struct mbuf **mp, struct ip *ip, struct ifnet *inifp)
1277 {
1278 	struct mbuf *m = *mp;
1279 	struct in_ifaddr *ia = NULL;
1280 	struct in_ifaddr *best_ia = NULL;
1281 	struct ifnet *match_ifp = NULL;
1282 	ip_check_if_result_t result = IP_CHECK_IF_NONE;
1283 
1284 	/*
1285 	 * Host broadcast and all network broadcast addresses are always a match
1286 	 */
1287 	if (ip->ip_dst.s_addr == (u_int32_t)INADDR_BROADCAST ||
1288 	    ip->ip_dst.s_addr == INADDR_ANY) {
1289 		ip_input_setdst_chain(m, inifp->if_index, NULL);
1290 		return IP_CHECK_IF_OURS;
1291 	}
1292 
1293 	/*
1294 	 * Check for a match in the hash bucket.
1295 	 */
1296 	lck_rw_lock_shared(&in_ifaddr_rwlock);
1297 	TAILQ_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
1298 		if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr) {
1299 			best_ia = ia;
1300 			match_ifp = best_ia->ia_ifp;
1301 
1302 			if (ia->ia_ifp == inifp || (inifp->if_flags & IFF_LOOPBACK) ||
1303 			    (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1304 				/*
1305 				 * A locally originated packet or packet from the loopback
1306 				 * interface is always an exact interface address match
1307 				 */
1308 				match_ifp = inifp;
1309 				break;
1310 			}
1311 			/*
1312 			 * Continue the loop in case there's a exact match with another
1313 			 * interface
1314 			 */
1315 		}
1316 	}
1317 	if (best_ia != NULL) {
1318 		if (match_ifp != inifp && ipforwarding == 0 &&
1319 		    ((ip_checkinterface == IP_CHECKINTERFACE_HYBRID_ES &&
1320 		    (match_ifp->if_family == IFNET_FAMILY_IPSEC ||
1321 		    match_ifp->if_family == IFNET_FAMILY_UTUN)) ||
1322 		    ip_checkinterface == IP_CHECKINTERFACE_STRONG_ES)) {
1323 			/*
1324 			 * Drop when interface address check is strict and forwarding
1325 			 * is disabled
1326 			 */
1327 			result = IP_CHECK_IF_DROP;
1328 		} else {
1329 			result = IP_CHECK_IF_OURS;
1330 			ip_input_setdst_chain(m, 0, best_ia);
1331 		}
1332 	}
1333 	lck_rw_done(&in_ifaddr_rwlock);
1334 
1335 	if (result == IP_CHECK_IF_NONE && (inifp->if_flags & IFF_BROADCAST)) {
1336 		/*
1337 		 * Check for broadcast addresses.
1338 		 *
1339 		 * Only accept broadcast packets that arrive via the matching
1340 		 * interface.  Reception of forwarded directed broadcasts would be
1341 		 * handled via ip_forward() and ether_frameout() with the loopback
1342 		 * into the stack for SIMPLEX interfaces handled by ether_frameout().
1343 		 */
1344 		struct ifaddr *ifa;
1345 
1346 		ifnet_lock_shared(inifp);
1347 		TAILQ_FOREACH(ifa, &inifp->if_addrhead, ifa_link) {
1348 			if (ifa->ifa_addr->sa_family != AF_INET) {
1349 				continue;
1350 			}
1351 			ia = ifatoia(ifa);
1352 			if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == ip->ip_dst.s_addr ||
1353 			    ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) {
1354 				ip_input_setdst_chain(m, 0, ia);
1355 				result = IP_CHECK_IF_OURS;
1356 				match_ifp = inifp;
1357 				break;
1358 			}
1359 		}
1360 		ifnet_lock_done(inifp);
1361 	}
1362 
1363 	/* Allow DHCP/BootP responses through */
1364 	if (result == IP_CHECK_IF_NONE && (inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
1365 	    ip->ip_p == IPPROTO_UDP && (IP_VHL_HL(ip->ip_vhl) << 2) == sizeof(struct ip)) {
1366 		struct udpiphdr *ui;
1367 
1368 		if (m->m_len < sizeof(struct udpiphdr)) {
1369 			if ((m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) {
1370 				OSAddAtomic(1, &udpstat.udps_hdrops);
1371 				*mp = NULL;
1372 				return IP_CHECK_IF_DROP;
1373 			}
1374 			/*
1375 			 * m_pullup can return a different mbuf
1376 			 */
1377 			*mp = m;
1378 			ip = mtod(m, struct ip *);
1379 		}
1380 		ui = mtod(m, struct udpiphdr *);
1381 		if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
1382 			ip_input_setdst_chain(m, inifp->if_index, NULL);
1383 			result = IP_CHECK_IF_OURS;
1384 			match_ifp = inifp;
1385 		}
1386 	}
1387 
1388 	if (result == IP_CHECK_IF_NONE) {
1389 		if (ipforwarding == 0) {
1390 			result = IP_CHECK_IF_DROP;
1391 		} else {
1392 			result = IP_CHECK_IF_FORWARD;
1393 			ip_input_setdst_chain(m, inifp->if_index, NULL);
1394 		}
1395 	}
1396 
1397 	if (result == IP_CHECK_IF_OURS && match_ifp != inifp) {
1398 		ipstat.ips_rcv_if_weak_match++;
1399 
1400 		/*  Logging is too noisy when forwarding is enabled */
1401 		if (ip_checkinterface_debug != 0 && ipforwarding == 0) {
1402 			char src_str[MAX_IPv4_STR_LEN];
1403 			char dst_str[MAX_IPv4_STR_LEN];
1404 
1405 			inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str));
1406 			inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str));
1407 			os_log_info(OS_LOG_DEFAULT,
1408 			    "%s: weak ES interface match to %s for packet from %s to %s proto %u received via %s",
1409 			    __func__, best_ia->ia_ifp->if_xname, src_str, dst_str, ip->ip_p, inifp->if_xname);
1410 		}
1411 	} else if (result == IP_CHECK_IF_DROP) {
1412 		if (ip_checkinterface_debug > 0) {
1413 			char src_str[MAX_IPv4_STR_LEN];
1414 			char dst_str[MAX_IPv4_STR_LEN];
1415 
1416 			inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str));
1417 			inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str));
1418 			os_log(OS_LOG_DEFAULT,
1419 			    "%s: no interface match for packet from %s to %s proto %u received via %s",
1420 			    __func__, src_str, dst_str, ip->ip_p, inifp->if_xname);
1421 		}
1422 		struct mbuf *tmp_mbuf = m;
1423 		while (tmp_mbuf != NULL) {
1424 			ipstat.ips_rcv_if_no_match++;
1425 			tmp_mbuf = tmp_mbuf->m_nextpkt;
1426 		}
1427 		m_freem_list(m);
1428 		*mp = NULL;
1429 	}
1430 
1431 	return result;
1432 }
1433 
1434 static void
ip_input_second_pass(struct mbuf * m,struct ifnet * inifp,int npkts_in_chain,int bytes_in_chain,struct ip_fw_in_args * args)1435 ip_input_second_pass(struct mbuf *m, struct ifnet *inifp,
1436     int npkts_in_chain, int bytes_in_chain, struct ip_fw_in_args *args)
1437 {
1438 	struct mbuf             *tmp_mbuf = NULL;
1439 	unsigned int            hlen;
1440 
1441 #pragma unused (args)
1442 
1443 	struct ip *ip = mtod(m, struct ip *);
1444 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1445 
1446 	OSAddAtomic(npkts_in_chain, &ipstat.ips_total);
1447 
1448 	/*
1449 	 * Naively assume we can attribute inbound data to the route we would
1450 	 * use to send to this destination. Asymmetric routing breaks this
1451 	 * assumption, but it still allows us to account for traffic from
1452 	 * a remote node in the routing table.
1453 	 * this has a very significant performance impact so we bypass
1454 	 * if nstat_collect is disabled. We may also bypass if the
1455 	 * protocol is tcp in the future because tcp will have a route that
1456 	 * we can use to attribute the data to. That does mean we would not
1457 	 * account for forwarded tcp traffic.
1458 	 */
1459 	ip_input_update_nstat(inifp, ip->ip_src, npkts_in_chain,
1460 	    bytes_in_chain);
1461 
1462 	/*
1463 	 * Check our list of addresses, to see if the packet is for us.
1464 	 * If we don't have any addresses, assume any unicast packet
1465 	 * we receive might be for us (and let the upper layers deal
1466 	 * with it).
1467 	 */
1468 	tmp_mbuf = m;
1469 	if (TAILQ_EMPTY(&in_ifaddrhead)) {
1470 		while (tmp_mbuf != NULL) {
1471 			if (!(tmp_mbuf->m_flags & (M_MCAST | M_BCAST))) {
1472 				ip_setdstifaddr_info(tmp_mbuf, inifp->if_index,
1473 				    NULL);
1474 			}
1475 			tmp_mbuf = mbuf_nextpkt(tmp_mbuf);
1476 		}
1477 		goto ours;
1478 	}
1479 
1480 	/*
1481 	 * Enable a consistency check between the destination address
1482 	 * and the arrival interface for a unicast packet (the RFC 1122
1483 	 * strong ES model) if IP forwarding is disabled and the packet
1484 	 * is not locally generated
1485 	 *
1486 	 * XXX - Checking also should be disabled if the destination
1487 	 * address is ipnat'ed to a different interface.
1488 	 *
1489 	 * XXX - Checking is incompatible with IP aliases added
1490 	 * to the loopback interface instead of the interface where
1491 	 * the packets are received.
1492 	 */
1493 	if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
1494 		ip_check_if_result_t ip_check_if_result = IP_CHECK_IF_NONE;
1495 
1496 		ip_check_if_result = ip_input_check_interface(&m, ip, inifp);
1497 		ASSERT(ip_check_if_result != IP_CHECK_IF_NONE);
1498 		if (ip_check_if_result == IP_CHECK_IF_OURS) {
1499 			goto ours;
1500 		} else if (ip_check_if_result == IP_CHECK_IF_DROP) {
1501 			return;
1502 		}
1503 	} else {
1504 		struct in_multi *inm;
1505 		/*
1506 		 * See if we belong to the destination multicast group on the
1507 		 * arrival interface.
1508 		 */
1509 		in_multihead_lock_shared();
1510 		IN_LOOKUP_MULTI(&ip->ip_dst, inifp, inm);
1511 		in_multihead_lock_done();
1512 		if (inm == NULL) {
1513 			OSAddAtomic(npkts_in_chain, &ipstat.ips_notmember);
1514 			m_freem_list(m);
1515 			KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1516 			return;
1517 		}
1518 		ip_input_setdst_chain(m, inifp->if_index, NULL);
1519 		INM_REMREF(inm);
1520 		goto ours;
1521 	}
1522 
1523 	tmp_mbuf = m;
1524 	struct mbuf *nxt_mbuf = NULL;
1525 	while (tmp_mbuf != NULL) {
1526 		nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
1527 		/*
1528 		 * Not for us; forward if possible and desirable.
1529 		 */
1530 		mbuf_setnextpkt(tmp_mbuf, NULL);
1531 		if (ipforwarding == 0) {
1532 			OSAddAtomic(1, &ipstat.ips_cantforward);
1533 			m_freem(tmp_mbuf);
1534 		} else {
1535 			ip_forward(tmp_mbuf, 0, NULL);
1536 		}
1537 		tmp_mbuf = nxt_mbuf;
1538 	}
1539 	KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1540 	return;
1541 ours:
1542 	ip = mtod(m, struct ip *); /* in case it changed */
1543 	/*
1544 	 * If offset is set, must reassemble.
1545 	 */
1546 	if (ip->ip_off & ~(IP_DF | IP_RF)) {
1547 		VERIFY(npkts_in_chain == 1);
1548 		m = ip_reass(m);
1549 		if (m == NULL) {
1550 			return;
1551 		}
1552 		ip = mtod(m, struct ip *);
1553 		/* Get the header length of the reassembled packet */
1554 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1555 	}
1556 
1557 	/*
1558 	 * Further protocols expect the packet length to be w/o the
1559 	 * IP header.
1560 	 */
1561 	ip->ip_len -= hlen;
1562 
1563 #if IPSEC
1564 	/*
1565 	 * enforce IPsec policy checking if we are seeing last header.
1566 	 * note that we do not visit this with protocols with pcb layer
1567 	 * code - like udp/tcp/raw ip.
1568 	 */
1569 	if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR)) {
1570 		VERIFY(npkts_in_chain == 1);
1571 		if (ipsec4_in_reject(m, NULL)) {
1572 			IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
1573 			goto bad;
1574 		}
1575 	}
1576 #endif /* IPSEC */
1577 
1578 	/*
1579 	 * Switch out to protocol's input routine.
1580 	 */
1581 	OSAddAtomic(npkts_in_chain, &ipstat.ips_delivered);
1582 
1583 	ip_input_dispatch_chain(m);
1584 
1585 	KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1586 	return;
1587 bad:
1588 	KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1589 	m_freem(m);
1590 }
1591 
1592 void
ip_input_process_list(struct mbuf * packet_list)1593 ip_input_process_list(struct mbuf *packet_list)
1594 {
1595 	pktchain_elm_t  pktchain_tbl[PKTTBL_SZ];
1596 
1597 	struct mbuf     *packet = NULL;
1598 	struct mbuf     *modm = NULL; /* modified mbuf */
1599 	int             retval = 0;
1600 #if (DEBUG || DEVELOPMENT)
1601 	struct timeval start_tv;
1602 #endif /* (DEBUG || DEVELOPMENT) */
1603 	int     num_pkts = 0;
1604 	int chain = 0;
1605 	struct ip_fw_in_args       args;
1606 
1607 	if (ip_chaining == 0) {
1608 		struct mbuf *m = packet_list;
1609 #if (DEBUG || DEVELOPMENT)
1610 		if (ip_input_measure) {
1611 			net_perf_start_time(&net_perf, &start_tv);
1612 		}
1613 #endif /* (DEBUG || DEVELOPMENT) */
1614 
1615 		while (m) {
1616 			packet_list = mbuf_nextpkt(m);
1617 			mbuf_setnextpkt(m, NULL);
1618 			ip_input(m);
1619 			m = packet_list;
1620 			num_pkts++;
1621 		}
1622 #if (DEBUG || DEVELOPMENT)
1623 		if (ip_input_measure) {
1624 			net_perf_measure_time(&net_perf, &start_tv, num_pkts);
1625 		}
1626 #endif /* (DEBUG || DEVELOPMENT) */
1627 		return;
1628 	}
1629 #if (DEBUG || DEVELOPMENT)
1630 	if (ip_input_measure) {
1631 		net_perf_start_time(&net_perf, &start_tv);
1632 	}
1633 #endif /* (DEBUG || DEVELOPMENT) */
1634 
1635 	bzero(&pktchain_tbl, sizeof(pktchain_tbl));
1636 restart_list_process:
1637 	chain = 0;
1638 	for (packet = packet_list; packet; packet = packet_list) {
1639 		m_add_crumb(packet, PKT_CRUMB_IP_INPUT);
1640 
1641 		packet_list = mbuf_nextpkt(packet);
1642 		mbuf_setnextpkt(packet, NULL);
1643 
1644 		num_pkts++;
1645 		modm = NULL;
1646 		bzero(&args, sizeof(args));
1647 
1648 		retval = ip_input_first_pass(packet, &args, &modm);
1649 
1650 		if (retval == IPINPUT_DOCHAIN) {
1651 			if (modm) {
1652 				packet = modm;
1653 			}
1654 			packet = ip_chain_insert(packet, &pktchain_tbl[0]);
1655 			if (packet == NULL) {
1656 				ipstat.ips_rxc_chained++;
1657 				chain++;
1658 				if (chain > ip_chainsz) {
1659 					break;
1660 				}
1661 			} else {
1662 				ipstat.ips_rxc_collisions++;
1663 				break;
1664 			}
1665 		} else if (retval == IPINPUT_DONTCHAIN) {
1666 			/* in order to preserve order, exit from chaining */
1667 			if (modm) {
1668 				packet = modm;
1669 			}
1670 			ipstat.ips_rxc_notchain++;
1671 			break;
1672 		} else {
1673 			/* packet was freed or delivered, do nothing. */
1674 		}
1675 	}
1676 
1677 	/* do second pass here for pktchain_tbl */
1678 	if (chain) {
1679 		ip_input_second_pass_loop_tbl(&pktchain_tbl[0], &args);
1680 	}
1681 
1682 	if (packet) {
1683 		/*
1684 		 * equivalent update in chaining case if performed in
1685 		 * ip_input_second_pass_loop_tbl().
1686 		 */
1687 #if (DEBUG || DEVELOPMENT)
1688 		if (ip_input_measure) {
1689 			net_perf_histogram(&net_perf, 1);
1690 		}
1691 #endif /* (DEBUG || DEVELOPMENT) */
1692 		ip_input_second_pass(packet, packet->m_pkthdr.rcvif,
1693 		    1, packet->m_pkthdr.len, &args);
1694 	}
1695 
1696 	if (packet_list) {
1697 		goto restart_list_process;
1698 	}
1699 
1700 #if (DEBUG || DEVELOPMENT)
1701 	if (ip_input_measure) {
1702 		net_perf_measure_time(&net_perf, &start_tv, num_pkts);
1703 	}
1704 #endif /* (DEBUG || DEVELOPMENT) */
1705 }
1706 /*
1707  * Ip input routine.  Checksum and byte swap header.  If fragmented
1708  * try to reassemble.  Process options.  Pass to next level.
1709  */
1710 void
ip_input(struct mbuf * m)1711 ip_input(struct mbuf *m)
1712 {
1713 	struct ip *ip;
1714 	unsigned int hlen;
1715 	u_short sum = 0;
1716 #if DUMMYNET
1717 	struct ip_fw_args args;
1718 	struct m_tag    *tag;
1719 #endif
1720 	ipfilter_t inject_filter_ref = NULL;
1721 	struct ifnet *inifp;
1722 
1723 	/* Check if the mbuf is still valid after interface filter processing */
1724 	MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
1725 	inifp = m->m_pkthdr.rcvif;
1726 	VERIFY(inifp != NULL);
1727 
1728 	m_add_crumb(m, PKT_CRUMB_IP_INPUT);
1729 
1730 	ipstat.ips_rxc_notlist++;
1731 
1732 	/* Perform IP header alignment fixup, if needed */
1733 	IP_HDR_ALIGNMENT_FIXUP(m, inifp, goto bad);
1734 
1735 	m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
1736 
1737 #if DUMMYNET
1738 	bzero(&args, sizeof(struct ip_fw_args));
1739 
1740 	/*
1741 	 * Don't bother searching for tag(s) if there's none.
1742 	 */
1743 	if (SLIST_EMPTY(&m->m_pkthdr.tags)) {
1744 		goto ipfw_tags_done;
1745 	}
1746 
1747 	/* Grab info from mtags prepended to the chain */
1748 	if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
1749 	    KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) {
1750 		struct dn_pkt_tag *dn_tag;
1751 
1752 		dn_tag = (struct dn_pkt_tag *)(tag + 1);
1753 		args.fwa_pf_rule = dn_tag->dn_pf_rule;
1754 
1755 		m_tag_delete(m, tag);
1756 	}
1757 
1758 #if DIAGNOSTIC
1759 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1760 		panic("ip_input no HDR");
1761 	}
1762 #endif
1763 
1764 	if (args.fwa_pf_rule) {
1765 		/* dummynet already filtered us */
1766 		ip = mtod(m, struct ip *);
1767 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1768 		inject_filter_ref = ipf_get_inject_filter(m);
1769 		if (args.fwa_pf_rule) {
1770 			goto check_with_pf;
1771 		}
1772 	}
1773 ipfw_tags_done:
1774 #endif /* DUMMYNET */
1775 
1776 	/*
1777 	 * No need to process packet twice if we've already seen it.
1778 	 */
1779 	if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
1780 		inject_filter_ref = ipf_get_inject_filter(m);
1781 	}
1782 	if (inject_filter_ref != NULL) {
1783 		ip = mtod(m, struct ip *);
1784 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1785 
1786 		DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1787 		    struct ip *, ip, struct ifnet *, inifp,
1788 		    struct ip *, ip, struct ip6_hdr *, NULL);
1789 
1790 		ip->ip_len = ntohs(ip->ip_len) - (u_short)hlen;
1791 		ip->ip_off = ntohs(ip->ip_off);
1792 		ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref);
1793 		return;
1794 	}
1795 
1796 	if (__improbable(m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
1797 		if_ports_used_match_mbuf(inifp, PF_INET, m);
1798 	}
1799 
1800 	OSAddAtomic(1, &ipstat.ips_total);
1801 	if (m->m_pkthdr.len < sizeof(struct ip)) {
1802 		goto tooshort;
1803 	}
1804 
1805 	if (m->m_len < sizeof(struct ip) &&
1806 	    (m = m_pullup(m, sizeof(struct ip))) == NULL) {
1807 		OSAddAtomic(1, &ipstat.ips_toosmall);
1808 		return;
1809 	}
1810 	ip = mtod(m, struct ip *);
1811 
1812 	KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
1813 	    ip->ip_p, ip->ip_off, ip->ip_len);
1814 
1815 	if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
1816 		OSAddAtomic(1, &ipstat.ips_badvers);
1817 		goto bad;
1818 	}
1819 
1820 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1821 	if (hlen < sizeof(struct ip)) {         /* minimum header length */
1822 		OSAddAtomic(1, &ipstat.ips_badhlen);
1823 		goto bad;
1824 	}
1825 	if (hlen > m->m_len) {
1826 		if ((m = m_pullup(m, hlen)) == NULL) {
1827 			OSAddAtomic(1, &ipstat.ips_badhlen);
1828 			return;
1829 		}
1830 		ip = mtod(m, struct ip *);
1831 	}
1832 
1833 	/* 127/8 must not appear on wire - RFC1122 */
1834 	if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
1835 	    (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
1836 		/*
1837 		 * Allow for the following exceptions:
1838 		 *
1839 		 *   1. If the packet was sent to loopback (i.e. rcvif
1840 		 *	would have been set earlier at output time.)
1841 		 *
1842 		 *   2. If the packet was sent out on loopback from a local
1843 		 *	source address which belongs to a non-loopback
1844 		 *	interface (i.e. rcvif may not necessarily be a
1845 		 *	loopback interface, hence the test for PKTF_LOOP.)
1846 		 *	Unlike IPv6, there is no interface scope ID, and
1847 		 *	therefore we don't care so much about PKTF_IFINFO.
1848 		 */
1849 		if (!(inifp->if_flags & IFF_LOOPBACK) &&
1850 		    !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1851 			OSAddAtomic(1, &ipstat.ips_badaddr);
1852 			goto bad;
1853 		}
1854 	}
1855 
1856 	/* IPv4 Link-Local Addresses as defined in RFC3927 */
1857 	if ((IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
1858 	    IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)))) {
1859 		ip_linklocal_stat.iplls_in_total++;
1860 		if (ip->ip_ttl != MAXTTL) {
1861 			OSAddAtomic(1, &ip_linklocal_stat.iplls_in_badttl);
1862 			/* Silently drop link local traffic with bad TTL */
1863 			if (!ip_linklocal_in_allowbadttl) {
1864 				goto bad;
1865 			}
1866 		}
1867 	}
1868 
1869 	sum = ip_cksum(m, hlen);
1870 	if (sum) {
1871 		goto bad;
1872 	}
1873 
1874 	DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1875 	    struct ip *, ip, struct ifnet *, inifp,
1876 	    struct ip *, ip, struct ip6_hdr *, NULL);
1877 
1878 	/*
1879 	 * Naively assume we can attribute inbound data to the route we would
1880 	 * use to send to this destination. Asymmetric routing breaks this
1881 	 * assumption, but it still allows us to account for traffic from
1882 	 * a remote node in the routing table.
1883 	 * this has a very significant performance impact so we bypass
1884 	 * if nstat_collect is disabled. We may also bypass if the
1885 	 * protocol is tcp in the future because tcp will have a route that
1886 	 * we can use to attribute the data to. That does mean we would not
1887 	 * account for forwarded tcp traffic.
1888 	 */
1889 	if (nstat_collect) {
1890 		struct rtentry *rt =
1891 		    ifnet_cached_rtlookup_inet(inifp, ip->ip_src);
1892 		if (rt != NULL) {
1893 			nstat_route_rx(rt, 1, m->m_pkthdr.len, 0);
1894 			rtfree(rt);
1895 		}
1896 	}
1897 
1898 	/*
1899 	 * Convert fields to host representation.
1900 	 */
1901 #if BYTE_ORDER != BIG_ENDIAN
1902 	NTOHS(ip->ip_len);
1903 #endif
1904 
1905 	if (ip->ip_len < hlen) {
1906 		OSAddAtomic(1, &ipstat.ips_badlen);
1907 		goto bad;
1908 	}
1909 
1910 #if BYTE_ORDER != BIG_ENDIAN
1911 	NTOHS(ip->ip_off);
1912 #endif
1913 	/*
1914 	 * Check that the amount of data in the buffers
1915 	 * is as at least much as the IP header would have us expect.
1916 	 * Trim mbufs if longer than we expect.
1917 	 * Drop packet if shorter than we expect.
1918 	 */
1919 	if (m->m_pkthdr.len < ip->ip_len) {
1920 tooshort:
1921 		OSAddAtomic(1, &ipstat.ips_tooshort);
1922 		goto bad;
1923 	}
1924 	if (m->m_pkthdr.len > ip->ip_len) {
1925 		ip_input_adjust(m, ip, inifp);
1926 	}
1927 
1928 #if DUMMYNET
1929 check_with_pf:
1930 #endif
1931 #if PF
1932 	/* Invoke inbound packet filter */
1933 	if (PF_IS_ENABLED) {
1934 		int error;
1935 #if DUMMYNET
1936 		error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, &args);
1937 #else
1938 		error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, NULL);
1939 #endif /* DUMMYNET */
1940 		if (error != 0 || m == NULL) {
1941 			if (m != NULL) {
1942 				panic("%s: unexpected packet %p",
1943 				    __func__, m);
1944 				/* NOTREACHED */
1945 			}
1946 			/* Already freed by callee */
1947 			return;
1948 		}
1949 		ip = mtod(m, struct ip *);
1950 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1951 	}
1952 #endif /* PF */
1953 
1954 #if IPSEC
1955 	if (ipsec_bypass == 0 && ipsec_gethist(m, NULL)) {
1956 		goto pass;
1957 	}
1958 #endif
1959 
1960 pass:
1961 	/*
1962 	 * Process options and, if not destined for us,
1963 	 * ship it on.  ip_dooptions returns 1 when an
1964 	 * error was detected (causing an icmp message
1965 	 * to be sent and the original packet to be freed).
1966 	 */
1967 	ip_nhops = 0;           /* for source routed packets */
1968 	if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) {
1969 		return;
1970 	}
1971 
1972 	/*
1973 	 * Check our list of addresses, to see if the packet is for us.
1974 	 * If we don't have any addresses, assume any unicast packet
1975 	 * we receive might be for us (and let the upper layers deal
1976 	 * with it).
1977 	 */
1978 	if (TAILQ_EMPTY(&in_ifaddrhead) && !(m->m_flags & (M_MCAST | M_BCAST))) {
1979 		ip_setdstifaddr_info(m, inifp->if_index, NULL);
1980 		goto ours;
1981 	}
1982 
1983 	/*
1984 	 * Enable a consistency check between the destination address
1985 	 * and the arrival interface for a unicast packet (the RFC 1122
1986 	 * strong ES model) if IP forwarding is disabled and the packet
1987 	 * is not locally generated and the packet is not subject to
1988 	 * 'ipfw fwd'.
1989 	 *
1990 	 * XXX - Checking also should be disabled if the destination
1991 	 * address is ipnat'ed to a different interface.
1992 	 *
1993 	 * XXX - Checking is incompatible with IP aliases added
1994 	 * to the loopback interface instead of the interface where
1995 	 * the packets are received.
1996 	 */
1997 	if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
1998 		ip_check_if_result_t check_if_result = IP_CHECK_IF_NONE;
1999 
2000 		check_if_result = ip_input_check_interface(&m, ip, inifp);
2001 		ASSERT(check_if_result != IP_CHECK_IF_NONE);
2002 		if (check_if_result == IP_CHECK_IF_OURS) {
2003 			goto ours;
2004 		} else if (check_if_result == IP_CHECK_IF_DROP) {
2005 			return;
2006 		}
2007 	} else {
2008 		struct in_multi *inm;
2009 		/*
2010 		 * See if we belong to the destination multicast group on the
2011 		 * arrival interface.
2012 		 */
2013 		in_multihead_lock_shared();
2014 		IN_LOOKUP_MULTI(&ip->ip_dst, inifp, inm);
2015 		in_multihead_lock_done();
2016 		if (inm == NULL) {
2017 			OSAddAtomic(1, &ipstat.ips_notmember);
2018 			m_freem(m);
2019 			return;
2020 		}
2021 		ip_setdstifaddr_info(m, inifp->if_index, NULL);
2022 		INM_REMREF(inm);
2023 		goto ours;
2024 	}
2025 
2026 	/*
2027 	 * Not for us; forward if possible and desirable.
2028 	 */
2029 	if (ipforwarding == 0) {
2030 		OSAddAtomic(1, &ipstat.ips_cantforward);
2031 		m_freem(m);
2032 	} else {
2033 		ip_forward(m, 0, NULL);
2034 	}
2035 	return;
2036 
2037 ours:
2038 	/*
2039 	 * If offset or IP_MF are set, must reassemble.
2040 	 */
2041 	if (ip->ip_off & ~(IP_DF | IP_RF)) {
2042 		m = ip_reass(m);
2043 		if (m == NULL) {
2044 			return;
2045 		}
2046 		ip = mtod(m, struct ip *);
2047 		/* Get the header length of the reassembled packet */
2048 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2049 	}
2050 
2051 	/*
2052 	 * Further protocols expect the packet length to be w/o the
2053 	 * IP header.
2054 	 */
2055 	ip->ip_len -= hlen;
2056 
2057 
2058 #if IPSEC
2059 	/*
2060 	 * enforce IPsec policy checking if we are seeing last header.
2061 	 * note that we do not visit this with protocols with pcb layer
2062 	 * code - like udp/tcp/raw ip.
2063 	 */
2064 	if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR)) {
2065 		if (ipsec4_in_reject(m, NULL)) {
2066 			IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
2067 			goto bad;
2068 		}
2069 	}
2070 #endif /* IPSEC */
2071 
2072 	/*
2073 	 * Switch out to protocol's input routine.
2074 	 */
2075 	OSAddAtomic(1, &ipstat.ips_delivered);
2076 
2077 	ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
2078 	return;
2079 
2080 bad:
2081 	KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
2082 	m_freem(m);
2083 }
2084 
2085 static void
ipq_updateparams(void)2086 ipq_updateparams(void)
2087 {
2088 	LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2089 	/*
2090 	 * -1 for unlimited allocation.
2091 	 */
2092 	if (maxnipq < 0) {
2093 		ipq_limit = 0;
2094 	}
2095 	/*
2096 	 * Positive number for specific bound.
2097 	 */
2098 	if (maxnipq > 0) {
2099 		ipq_limit = maxnipq;
2100 	}
2101 	/*
2102 	 * Zero specifies no further fragment queue allocation -- set the
2103 	 * bound very low, but rely on implementation elsewhere to actually
2104 	 * prevent allocation and reclaim current queues.
2105 	 */
2106 	if (maxnipq == 0) {
2107 		ipq_limit = 1;
2108 	}
2109 	/*
2110 	 * Arm the purge timer if not already and if there's work to do
2111 	 */
2112 	frag_sched_timeout();
2113 }
2114 
2115 static int
2116 sysctl_maxnipq SYSCTL_HANDLER_ARGS
2117 {
2118 #pragma unused(arg1, arg2)
2119 	int error, i;
2120 
2121 	lck_mtx_lock(&ipqlock);
2122 	i = maxnipq;
2123 	error = sysctl_handle_int(oidp, &i, 0, req);
2124 	if (error || req->newptr == USER_ADDR_NULL) {
2125 		goto done;
2126 	}
2127 	/* impose bounds */
2128 	if (i < -1 || i > (nmbclusters / 4)) {
2129 		error = EINVAL;
2130 		goto done;
2131 	}
2132 	maxnipq = i;
2133 	ipq_updateparams();
2134 done:
2135 	lck_mtx_unlock(&ipqlock);
2136 	return error;
2137 }
2138 
2139 static int
2140 sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS
2141 {
2142 #pragma unused(arg1, arg2)
2143 	int error, i;
2144 
2145 	lck_mtx_lock(&ipqlock);
2146 	i = maxfragsperpacket;
2147 	error = sysctl_handle_int(oidp, &i, 0, req);
2148 	if (error || req->newptr == USER_ADDR_NULL) {
2149 		goto done;
2150 	}
2151 	maxfragsperpacket = i;
2152 	ipq_updateparams();     /* see if we need to arm timer */
2153 done:
2154 	lck_mtx_unlock(&ipqlock);
2155 	return error;
2156 }
2157 
2158 /*
2159  * Take incoming datagram fragment and try to reassemble it into
2160  * whole datagram.  If a chain for reassembly of this datagram already
2161  * exists, then it is given as fp; otherwise have to make a chain.
2162  *
2163  * The IP header is *NOT* adjusted out of iplen (but in host byte order).
2164  */
2165 static struct mbuf *
ip_reass(struct mbuf * m)2166 ip_reass(struct mbuf *m)
2167 {
2168 	struct ip *ip;
2169 	struct mbuf *p, *q, *nq, *t;
2170 	struct ipq *fp = NULL;
2171 	struct ipqhead *head;
2172 	int i, hlen, next;
2173 	u_int8_t ecn, ecn0;
2174 	uint32_t csum, csum_flags;
2175 	uint16_t hash;
2176 	struct fq_head dfq;
2177 
2178 	MBUFQ_INIT(&dfq);       /* for deferred frees */
2179 
2180 	/* If maxnipq or maxfragsperpacket is 0, never accept fragments. */
2181 	if (maxnipq == 0 || maxfragsperpacket == 0) {
2182 		ipstat.ips_fragments++;
2183 		ipstat.ips_fragdropped++;
2184 		m_freem(m);
2185 		if (nipq > 0) {
2186 			lck_mtx_lock(&ipqlock);
2187 			frag_sched_timeout();   /* purge stale fragments */
2188 			lck_mtx_unlock(&ipqlock);
2189 		}
2190 		return NULL;
2191 	}
2192 
2193 	ip = mtod(m, struct ip *);
2194 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2195 
2196 	lck_mtx_lock(&ipqlock);
2197 
2198 	hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
2199 	head = &ipq[hash];
2200 
2201 	/*
2202 	 * Look for queue of fragments
2203 	 * of this datagram.
2204 	 */
2205 	TAILQ_FOREACH(fp, head, ipq_list) {
2206 		if (ip->ip_id == fp->ipq_id &&
2207 		    ip->ip_src.s_addr == fp->ipq_src.s_addr &&
2208 		    ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
2209 		    ip->ip_p == fp->ipq_p) {
2210 			goto found;
2211 		}
2212 	}
2213 
2214 	fp = NULL;
2215 
2216 	/*
2217 	 * Attempt to trim the number of allocated fragment queues if it
2218 	 * exceeds the administrative limit.
2219 	 */
2220 	if ((nipq > (unsigned)maxnipq) && (maxnipq > 0)) {
2221 		/*
2222 		 * drop something from the tail of the current queue
2223 		 * before proceeding further
2224 		 */
2225 		struct ipq *fq = TAILQ_LAST(head, ipqhead);
2226 		if (fq == NULL) {   /* gak */
2227 			for (i = 0; i < IPREASS_NHASH; i++) {
2228 				struct ipq *r = TAILQ_LAST(&ipq[i], ipqhead);
2229 				if (r) {
2230 					ipstat.ips_fragtimeout += r->ipq_nfrags;
2231 					frag_freef(&ipq[i], r);
2232 					break;
2233 				}
2234 			}
2235 		} else {
2236 			ipstat.ips_fragtimeout += fq->ipq_nfrags;
2237 			frag_freef(head, fq);
2238 		}
2239 	}
2240 
2241 found:
2242 	/*
2243 	 * Leverage partial checksum offload for IP fragments.  Narrow down
2244 	 * the scope to cover only UDP without IP options, as that is the
2245 	 * most common case.
2246 	 *
2247 	 * Perform 1's complement adjustment of octets that got included/
2248 	 * excluded in the hardware-calculated checksum value.  Ignore cases
2249 	 * where the value includes the entire IPv4 header span, as the sum
2250 	 * for those octets would already be 0 by the time we get here; IP
2251 	 * has already performed its header checksum validation.  Also take
2252 	 * care of any trailing bytes and subtract out their partial sum.
2253 	 */
2254 	if (ip->ip_p == IPPROTO_UDP && hlen == sizeof(struct ip) &&
2255 	    (m->m_pkthdr.csum_flags &
2256 	    (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) ==
2257 	    (CSUM_DATA_VALID | CSUM_PARTIAL)) {
2258 		uint32_t start = m->m_pkthdr.csum_rx_start;
2259 		int32_t trailer = (m_pktlen(m) - ip->ip_len);
2260 		uint32_t swbytes = (uint32_t)trailer;
2261 
2262 		csum = m->m_pkthdr.csum_rx_val;
2263 
2264 		ASSERT(trailer >= 0);
2265 		if ((start != 0 && start != hlen) || trailer != 0) {
2266 			uint32_t datalen = ip->ip_len - hlen;
2267 
2268 #if BYTE_ORDER != BIG_ENDIAN
2269 			if (start < hlen) {
2270 				HTONS(ip->ip_len);
2271 				HTONS(ip->ip_off);
2272 			}
2273 #endif /* BYTE_ORDER != BIG_ENDIAN */
2274 			/* callee folds in sum */
2275 			csum = m_adj_sum16(m, start, hlen, datalen, csum);
2276 			if (hlen > start) {
2277 				swbytes += (hlen - start);
2278 			} else {
2279 				swbytes += (start - hlen);
2280 			}
2281 #if BYTE_ORDER != BIG_ENDIAN
2282 			if (start < hlen) {
2283 				NTOHS(ip->ip_off);
2284 				NTOHS(ip->ip_len);
2285 			}
2286 #endif /* BYTE_ORDER != BIG_ENDIAN */
2287 		}
2288 		csum_flags = m->m_pkthdr.csum_flags;
2289 
2290 		if (swbytes != 0) {
2291 			udp_in_cksum_stats(swbytes);
2292 		}
2293 		if (trailer != 0) {
2294 			m_adj(m, -trailer);
2295 		}
2296 	} else {
2297 		csum = 0;
2298 		csum_flags = 0;
2299 	}
2300 
2301 	/* Invalidate checksum */
2302 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
2303 
2304 	ipstat.ips_fragments++;
2305 
2306 	/*
2307 	 * Adjust ip_len to not reflect header,
2308 	 * convert offset of this to bytes.
2309 	 */
2310 	ip->ip_len -= hlen;
2311 	if (ip->ip_off & IP_MF) {
2312 		/*
2313 		 * Make sure that fragments have a data length
2314 		 * that's a non-zero multiple of 8 bytes.
2315 		 */
2316 		if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
2317 			OSAddAtomic(1, &ipstat.ips_toosmall);
2318 			/*
2319 			 * Reassembly queue may have been found if previous
2320 			 * fragments were valid; given that this one is bad,
2321 			 * we need to drop it.  Make sure to set fp to NULL
2322 			 * if not already, since we don't want to decrement
2323 			 * ipq_nfrags as it doesn't include this packet.
2324 			 */
2325 			fp = NULL;
2326 			goto dropfrag;
2327 		}
2328 		m->m_flags |= M_FRAG;
2329 	} else {
2330 		/* Clear the flag in case packet comes from loopback */
2331 		m->m_flags &= ~M_FRAG;
2332 	}
2333 	ip->ip_off <<= 3;
2334 
2335 	m->m_pkthdr.pkt_hdr = ip;
2336 
2337 	/* Previous ip_reass() started here. */
2338 	/*
2339 	 * Presence of header sizes in mbufs
2340 	 * would confuse code below.
2341 	 */
2342 	m->m_data += hlen;
2343 	m->m_len -= hlen;
2344 
2345 	/*
2346 	 * If first fragment to arrive, create a reassembly queue.
2347 	 */
2348 	if (fp == NULL) {
2349 		fp = ipq_alloc(M_DONTWAIT);
2350 		if (fp == NULL) {
2351 			goto dropfrag;
2352 		}
2353 		TAILQ_INSERT_HEAD(head, fp, ipq_list);
2354 		nipq++;
2355 		fp->ipq_nfrags = 1;
2356 		fp->ipq_ttl = IPFRAGTTL;
2357 		fp->ipq_p = ip->ip_p;
2358 		fp->ipq_id = ip->ip_id;
2359 		fp->ipq_src = ip->ip_src;
2360 		fp->ipq_dst = ip->ip_dst;
2361 		fp->ipq_frags = m;
2362 		m->m_nextpkt = NULL;
2363 		/*
2364 		 * If the first fragment has valid checksum offload
2365 		 * info, the rest of fragments are eligible as well.
2366 		 */
2367 		if (csum_flags != 0) {
2368 			fp->ipq_csum = csum;
2369 			fp->ipq_csum_flags = csum_flags;
2370 		}
2371 		m = NULL;       /* nothing to return */
2372 		goto done;
2373 	} else {
2374 		fp->ipq_nfrags++;
2375 	}
2376 
2377 #define GETIP(m)        ((struct ip *)((m)->m_pkthdr.pkt_hdr))
2378 
2379 	/*
2380 	 * Handle ECN by comparing this segment with the first one;
2381 	 * if CE is set, do not lose CE.
2382 	 * drop if CE and not-ECT are mixed for the same packet.
2383 	 */
2384 	ecn = ip->ip_tos & IPTOS_ECN_MASK;
2385 	ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
2386 	if (ecn == IPTOS_ECN_CE) {
2387 		if (ecn0 == IPTOS_ECN_NOTECT) {
2388 			goto dropfrag;
2389 		}
2390 		if (ecn0 != IPTOS_ECN_CE) {
2391 			GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
2392 		}
2393 	}
2394 	if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
2395 		goto dropfrag;
2396 	}
2397 
2398 	/*
2399 	 * Find a segment which begins after this one does.
2400 	 */
2401 	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
2402 		if (GETIP(q)->ip_off > ip->ip_off) {
2403 			break;
2404 		}
2405 	}
2406 
2407 	/*
2408 	 * If there is a preceding segment, it may provide some of
2409 	 * our data already.  If so, drop the data from the incoming
2410 	 * segment.  If it provides all of our data, drop us, otherwise
2411 	 * stick new segment in the proper place.
2412 	 *
2413 	 * If some of the data is dropped from the preceding
2414 	 * segment, then it's checksum is invalidated.
2415 	 */
2416 	if (p) {
2417 		i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
2418 		if (i > 0) {
2419 			if (i >= ip->ip_len) {
2420 				goto dropfrag;
2421 			}
2422 			m_adj(m, i);
2423 			fp->ipq_csum_flags = 0;
2424 			ip->ip_off += i;
2425 			ip->ip_len -= i;
2426 		}
2427 		m->m_nextpkt = p->m_nextpkt;
2428 		p->m_nextpkt = m;
2429 	} else {
2430 		m->m_nextpkt = fp->ipq_frags;
2431 		fp->ipq_frags = m;
2432 	}
2433 
2434 	/*
2435 	 * While we overlap succeeding segments trim them or,
2436 	 * if they are completely covered, dequeue them.
2437 	 */
2438 	for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
2439 	    q = nq) {
2440 		i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
2441 		if (i < GETIP(q)->ip_len) {
2442 			GETIP(q)->ip_len -= i;
2443 			GETIP(q)->ip_off += i;
2444 			m_adj(q, i);
2445 			fp->ipq_csum_flags = 0;
2446 			break;
2447 		}
2448 		nq = q->m_nextpkt;
2449 		m->m_nextpkt = nq;
2450 		ipstat.ips_fragdropped++;
2451 		fp->ipq_nfrags--;
2452 		/* defer freeing until after lock is dropped */
2453 		MBUFQ_ENQUEUE(&dfq, q);
2454 	}
2455 
2456 	/*
2457 	 * If this fragment contains similar checksum offload info
2458 	 * as that of the existing ones, accumulate checksum.  Otherwise,
2459 	 * invalidate checksum offload info for the entire datagram.
2460 	 */
2461 	if (csum_flags != 0 && csum_flags == fp->ipq_csum_flags) {
2462 		fp->ipq_csum += csum;
2463 	} else if (fp->ipq_csum_flags != 0) {
2464 		fp->ipq_csum_flags = 0;
2465 	}
2466 
2467 
2468 	/*
2469 	 * Check for complete reassembly and perform frag per packet
2470 	 * limiting.
2471 	 *
2472 	 * Frag limiting is performed here so that the nth frag has
2473 	 * a chance to complete the packet before we drop the packet.
2474 	 * As a result, n+1 frags are actually allowed per packet, but
2475 	 * only n will ever be stored. (n = maxfragsperpacket.)
2476 	 *
2477 	 */
2478 	next = 0;
2479 	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
2480 		if (GETIP(q)->ip_off != next) {
2481 			if (fp->ipq_nfrags > maxfragsperpacket) {
2482 				ipstat.ips_fragdropped += fp->ipq_nfrags;
2483 				frag_freef(head, fp);
2484 			}
2485 			m = NULL;       /* nothing to return */
2486 			goto done;
2487 		}
2488 		next += GETIP(q)->ip_len;
2489 	}
2490 	/* Make sure the last packet didn't have the IP_MF flag */
2491 	if (p->m_flags & M_FRAG) {
2492 		if (fp->ipq_nfrags > maxfragsperpacket) {
2493 			ipstat.ips_fragdropped += fp->ipq_nfrags;
2494 			frag_freef(head, fp);
2495 		}
2496 		m = NULL;               /* nothing to return */
2497 		goto done;
2498 	}
2499 
2500 	/*
2501 	 * Reassembly is complete.  Make sure the packet is a sane size.
2502 	 */
2503 	q = fp->ipq_frags;
2504 	ip = GETIP(q);
2505 	if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) {
2506 		ipstat.ips_toolong++;
2507 		ipstat.ips_fragdropped += fp->ipq_nfrags;
2508 		frag_freef(head, fp);
2509 		m = NULL;               /* nothing to return */
2510 		goto done;
2511 	}
2512 
2513 	/*
2514 	 * Concatenate fragments.
2515 	 */
2516 	m = q;
2517 	t = m->m_next;
2518 	m->m_next = NULL;
2519 	m_cat(m, t);
2520 	nq = q->m_nextpkt;
2521 	q->m_nextpkt = NULL;
2522 	for (q = nq; q != NULL; q = nq) {
2523 		nq = q->m_nextpkt;
2524 		q->m_nextpkt = NULL;
2525 		m_cat(m, q);
2526 	}
2527 
2528 	/*
2529 	 * Store partial hardware checksum info from the fragment queue;
2530 	 * the receive start offset is set to 20 bytes (see code at the
2531 	 * top of this routine.)
2532 	 */
2533 	if (fp->ipq_csum_flags != 0) {
2534 		csum = fp->ipq_csum;
2535 
2536 		ADDCARRY(csum);
2537 
2538 		m->m_pkthdr.csum_rx_val = (uint16_t)csum;
2539 		m->m_pkthdr.csum_rx_start = sizeof(struct ip);
2540 		m->m_pkthdr.csum_flags = fp->ipq_csum_flags;
2541 	} else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) ||
2542 	    (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
2543 		/* loopback checksums are always OK */
2544 		m->m_pkthdr.csum_data = 0xffff;
2545 		m->m_pkthdr.csum_flags =
2546 		    CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
2547 		    CSUM_IP_CHECKED | CSUM_IP_VALID;
2548 	}
2549 
2550 	/*
2551 	 * Create header for new ip packet by modifying header of first
2552 	 * packet; dequeue and discard fragment reassembly header.
2553 	 * Make header visible.
2554 	 */
2555 	ip->ip_len = (u_short)((IP_VHL_HL(ip->ip_vhl) << 2) + next);
2556 	ip->ip_src = fp->ipq_src;
2557 	ip->ip_dst = fp->ipq_dst;
2558 
2559 	fp->ipq_frags = NULL;   /* return to caller as 'm' */
2560 	frag_freef(head, fp);
2561 	fp = NULL;
2562 
2563 	m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2);
2564 	m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2);
2565 	/* some debugging cruft by sklower, below, will go away soon */
2566 	if (m->m_flags & M_PKTHDR) {    /* XXX this should be done elsewhere */
2567 		m_fixhdr(m);
2568 	}
2569 	ipstat.ips_reassembled++;
2570 
2571 	/* arm the purge timer if not already and if there's work to do */
2572 	frag_sched_timeout();
2573 	lck_mtx_unlock(&ipqlock);
2574 	/* perform deferred free (if needed) now that lock is dropped */
2575 	if (!MBUFQ_EMPTY(&dfq)) {
2576 		MBUFQ_DRAIN(&dfq);
2577 	}
2578 	VERIFY(MBUFQ_EMPTY(&dfq));
2579 	return m;
2580 
2581 done:
2582 	VERIFY(m == NULL);
2583 	/* arm the purge timer if not already and if there's work to do */
2584 	frag_sched_timeout();
2585 	lck_mtx_unlock(&ipqlock);
2586 	/* perform deferred free (if needed) */
2587 	if (!MBUFQ_EMPTY(&dfq)) {
2588 		MBUFQ_DRAIN(&dfq);
2589 	}
2590 	VERIFY(MBUFQ_EMPTY(&dfq));
2591 	return NULL;
2592 
2593 dropfrag:
2594 	ipstat.ips_fragdropped++;
2595 	if (fp != NULL) {
2596 		fp->ipq_nfrags--;
2597 	}
2598 	/* arm the purge timer if not already and if there's work to do */
2599 	frag_sched_timeout();
2600 	lck_mtx_unlock(&ipqlock);
2601 	m_freem(m);
2602 	/* perform deferred free (if needed) */
2603 	if (!MBUFQ_EMPTY(&dfq)) {
2604 		MBUFQ_DRAIN(&dfq);
2605 	}
2606 	VERIFY(MBUFQ_EMPTY(&dfq));
2607 	return NULL;
2608 #undef GETIP
2609 }
2610 
2611 /*
2612  * Free a fragment reassembly header and all
2613  * associated datagrams.
2614  */
2615 static void
frag_freef(struct ipqhead * fhp,struct ipq * fp)2616 frag_freef(struct ipqhead *fhp, struct ipq *fp)
2617 {
2618 	LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2619 
2620 	fp->ipq_nfrags = 0;
2621 	if (fp->ipq_frags != NULL) {
2622 		m_freem_list(fp->ipq_frags);
2623 		fp->ipq_frags = NULL;
2624 	}
2625 	TAILQ_REMOVE(fhp, fp, ipq_list);
2626 	nipq--;
2627 	ipq_free(fp);
2628 }
2629 
2630 /*
2631  * IP reassembly timer processing
2632  */
2633 static void
frag_timeout(void * arg)2634 frag_timeout(void *arg)
2635 {
2636 #pragma unused(arg)
2637 	struct ipq *fp;
2638 	int i;
2639 
2640 	/*
2641 	 * Update coarse-grained networking timestamp (in sec.); the idea
2642 	 * is to piggy-back on the timeout callout to update the counter
2643 	 * returnable via net_uptime().
2644 	 */
2645 	net_update_uptime();
2646 
2647 	lck_mtx_lock(&ipqlock);
2648 	for (i = 0; i < IPREASS_NHASH; i++) {
2649 		for (fp = TAILQ_FIRST(&ipq[i]); fp;) {
2650 			struct ipq *fpp;
2651 
2652 			fpp = fp;
2653 			fp = TAILQ_NEXT(fp, ipq_list);
2654 			if (--fpp->ipq_ttl == 0) {
2655 				ipstat.ips_fragtimeout += fpp->ipq_nfrags;
2656 				frag_freef(&ipq[i], fpp);
2657 			}
2658 		}
2659 	}
2660 	/*
2661 	 * If we are over the maximum number of fragments
2662 	 * (due to the limit being lowered), drain off
2663 	 * enough to get down to the new limit.
2664 	 */
2665 	if (maxnipq >= 0 && nipq > (unsigned)maxnipq) {
2666 		for (i = 0; i < IPREASS_NHASH; i++) {
2667 			while (nipq > (unsigned)maxnipq &&
2668 			    !TAILQ_EMPTY(&ipq[i])) {
2669 				ipstat.ips_fragdropped +=
2670 				    TAILQ_FIRST(&ipq[i])->ipq_nfrags;
2671 				frag_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
2672 			}
2673 		}
2674 	}
2675 	/* re-arm the purge timer if there's work to do */
2676 	frag_timeout_run = 0;
2677 	frag_sched_timeout();
2678 	lck_mtx_unlock(&ipqlock);
2679 }
2680 
2681 static void
frag_sched_timeout(void)2682 frag_sched_timeout(void)
2683 {
2684 	LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2685 
2686 	if (!frag_timeout_run && nipq > 0) {
2687 		frag_timeout_run = 1;
2688 		timeout(frag_timeout, NULL, hz);
2689 	}
2690 }
2691 
2692 /*
2693  * Drain off all datagram fragments.
2694  */
2695 static void
frag_drain(void)2696 frag_drain(void)
2697 {
2698 	int i;
2699 
2700 	lck_mtx_lock(&ipqlock);
2701 	for (i = 0; i < IPREASS_NHASH; i++) {
2702 		while (!TAILQ_EMPTY(&ipq[i])) {
2703 			ipstat.ips_fragdropped +=
2704 			    TAILQ_FIRST(&ipq[i])->ipq_nfrags;
2705 			frag_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
2706 		}
2707 	}
2708 	lck_mtx_unlock(&ipqlock);
2709 }
2710 
2711 static struct ipq *
ipq_alloc(int how)2712 ipq_alloc(int how)
2713 {
2714 	struct mbuf *t;
2715 	struct ipq *fp;
2716 
2717 	/*
2718 	 * See comments in ipq_updateparams().  Keep the count separate
2719 	 * from nipq since the latter represents the elements already
2720 	 * in the reassembly queues.
2721 	 */
2722 	if (ipq_limit > 0 && ipq_count > ipq_limit) {
2723 		return NULL;
2724 	}
2725 
2726 	t = m_get(how, MT_FTABLE);
2727 	if (t != NULL) {
2728 		atomic_add_32(&ipq_count, 1);
2729 		fp = mtod(t, struct ipq *);
2730 		bzero(fp, sizeof(*fp));
2731 	} else {
2732 		fp = NULL;
2733 	}
2734 	return fp;
2735 }
2736 
2737 static void
ipq_free(struct ipq * fp)2738 ipq_free(struct ipq *fp)
2739 {
2740 	(void) m_free(dtom(fp));
2741 	atomic_add_32(&ipq_count, -1);
2742 }
2743 
2744 /*
2745  * Drain callback
2746  */
2747 void
ip_drain(void)2748 ip_drain(void)
2749 {
2750 	frag_drain();           /* fragments */
2751 	in_rtqdrain();          /* protocol cloned routes */
2752 	in_arpdrain(NULL);      /* cloned routes: ARP */
2753 }
2754 
2755 /*
2756  * Do option processing on a datagram,
2757  * possibly discarding it if bad options are encountered,
2758  * or forwarding it if source-routed.
2759  * The pass argument is used when operating in the IPSTEALTH
2760  * mode to tell what options to process:
2761  * [LS]SRR (pass 0) or the others (pass 1).
2762  * The reason for as many as two passes is that when doing IPSTEALTH,
2763  * non-routing options should be processed only if the packet is for us.
2764  * Returns 1 if packet has been forwarded/freed,
2765  * 0 if the packet should be processed further.
2766  */
2767 static int
ip_dooptions(struct mbuf * m,int pass,struct sockaddr_in * next_hop)2768 ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop)
2769 {
2770 #pragma unused(pass)
2771 	struct ip *ip = mtod(m, struct ip *);
2772 	u_char *cp;
2773 	struct ip_timestamp *ipt;
2774 	struct in_ifaddr *ia;
2775 	int opt, optlen, cnt, off, type = ICMP_PARAMPROB, forward = 0;
2776 	uint8_t code = 0;
2777 	struct in_addr *sin, dst;
2778 	u_int32_t ntime;
2779 	struct sockaddr_in ipaddr = {
2780 		.sin_len = sizeof(ipaddr),
2781 		.sin_family = AF_INET,
2782 		.sin_port = 0,
2783 		.sin_addr = { .s_addr = 0 },
2784 		.sin_zero = { 0, }
2785 	};
2786 
2787 	/* Expect 32-bit aligned data pointer on strict-align platforms */
2788 	MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
2789 
2790 	dst = ip->ip_dst;
2791 	cp = (u_char *)(ip + 1);
2792 	cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
2793 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
2794 		opt = cp[IPOPT_OPTVAL];
2795 		if (opt == IPOPT_EOL) {
2796 			break;
2797 		}
2798 		if (opt == IPOPT_NOP) {
2799 			optlen = 1;
2800 		} else {
2801 			if (cnt < IPOPT_OLEN + sizeof(*cp)) {
2802 				code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2803 				goto bad;
2804 			}
2805 			optlen = cp[IPOPT_OLEN];
2806 			if (optlen < IPOPT_OLEN + sizeof(*cp) ||
2807 			    optlen > cnt) {
2808 				code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2809 				goto bad;
2810 			}
2811 		}
2812 		switch (opt) {
2813 		default:
2814 			break;
2815 
2816 		/*
2817 		 * Source routing with record.
2818 		 * Find interface with current destination address.
2819 		 * If none on this machine then drop if strictly routed,
2820 		 * or do nothing if loosely routed.
2821 		 * Record interface address and bring up next address
2822 		 * component.  If strictly routed make sure next
2823 		 * address is on directly accessible net.
2824 		 */
2825 		case IPOPT_LSRR:
2826 		case IPOPT_SSRR:
2827 			if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
2828 				code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2829 				goto bad;
2830 			}
2831 			if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
2832 				code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2833 				goto bad;
2834 			}
2835 			ipaddr.sin_addr = ip->ip_dst;
2836 			ia = (struct in_ifaddr *)ifa_ifwithaddr(SA(&ipaddr));
2837 			if (ia == NULL) {
2838 				if (opt == IPOPT_SSRR) {
2839 					type = ICMP_UNREACH;
2840 					code = ICMP_UNREACH_SRCFAIL;
2841 					goto bad;
2842 				}
2843 				if (!ip_dosourceroute) {
2844 					goto nosourcerouting;
2845 				}
2846 				/*
2847 				 * Loose routing, and not at next destination
2848 				 * yet; nothing to do except forward.
2849 				 */
2850 				break;
2851 			} else {
2852 				IFA_REMREF(&ia->ia_ifa);
2853 				ia = NULL;
2854 			}
2855 			off--;                  /* 0 origin */
2856 			if (off > optlen - (int)sizeof(struct in_addr)) {
2857 				/*
2858 				 * End of source route.  Should be for us.
2859 				 */
2860 				if (!ip_acceptsourceroute) {
2861 					goto nosourcerouting;
2862 				}
2863 				save_rte(cp, ip->ip_src);
2864 				break;
2865 			}
2866 
2867 			if (!ip_dosourceroute) {
2868 				if (ipforwarding) {
2869 					char buf[MAX_IPv4_STR_LEN];
2870 					char buf2[MAX_IPv4_STR_LEN];
2871 					/*
2872 					 * Acting as a router, so generate ICMP
2873 					 */
2874 nosourcerouting:
2875 					log(LOG_WARNING,
2876 					    "attempted source route from %s "
2877 					    "to %s\n",
2878 					    inet_ntop(AF_INET, &ip->ip_src,
2879 					    buf, sizeof(buf)),
2880 					    inet_ntop(AF_INET, &ip->ip_dst,
2881 					    buf2, sizeof(buf2)));
2882 					type = ICMP_UNREACH;
2883 					code = ICMP_UNREACH_SRCFAIL;
2884 					goto bad;
2885 				} else {
2886 					/*
2887 					 * Not acting as a router,
2888 					 * so silently drop.
2889 					 */
2890 					OSAddAtomic(1, &ipstat.ips_cantforward);
2891 					m_freem(m);
2892 					return 1;
2893 				}
2894 			}
2895 
2896 			/*
2897 			 * locate outgoing interface
2898 			 */
2899 			(void) memcpy(&ipaddr.sin_addr, cp + off,
2900 			    sizeof(ipaddr.sin_addr));
2901 
2902 			if (opt == IPOPT_SSRR) {
2903 #define INA     struct in_ifaddr *
2904 				if ((ia = (INA)ifa_ifwithdstaddr(
2905 					    SA(&ipaddr))) == NULL) {
2906 					ia = (INA)ifa_ifwithnet(SA(&ipaddr));
2907 				}
2908 			} else {
2909 				ia = ip_rtaddr(ipaddr.sin_addr);
2910 			}
2911 			if (ia == NULL) {
2912 				type = ICMP_UNREACH;
2913 				code = ICMP_UNREACH_SRCFAIL;
2914 				goto bad;
2915 			}
2916 			ip->ip_dst = ipaddr.sin_addr;
2917 			IFA_LOCK(&ia->ia_ifa);
2918 			(void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
2919 			    sizeof(struct in_addr));
2920 			IFA_UNLOCK(&ia->ia_ifa);
2921 			IFA_REMREF(&ia->ia_ifa);
2922 			ia = NULL;
2923 			cp[IPOPT_OFFSET] += sizeof(struct in_addr);
2924 			/*
2925 			 * Let ip_intr's mcast routing check handle mcast pkts
2926 			 */
2927 			forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
2928 			break;
2929 
2930 		case IPOPT_RR:
2931 			if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
2932 				code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2933 				goto bad;
2934 			}
2935 			if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
2936 				code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2937 				goto bad;
2938 			}
2939 			/*
2940 			 * If no space remains, ignore.
2941 			 */
2942 			off--;                  /* 0 origin */
2943 			if (off > optlen - (int)sizeof(struct in_addr)) {
2944 				break;
2945 			}
2946 			(void) memcpy(&ipaddr.sin_addr, &ip->ip_dst,
2947 			    sizeof(ipaddr.sin_addr));
2948 			/*
2949 			 * locate outgoing interface; if we're the destination,
2950 			 * use the incoming interface (should be same).
2951 			 */
2952 			if ((ia = (INA)ifa_ifwithaddr(SA(&ipaddr))) == NULL) {
2953 				if ((ia = ip_rtaddr(ipaddr.sin_addr)) == NULL) {
2954 					type = ICMP_UNREACH;
2955 					code = ICMP_UNREACH_HOST;
2956 					goto bad;
2957 				}
2958 			}
2959 			IFA_LOCK(&ia->ia_ifa);
2960 			(void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
2961 			    sizeof(struct in_addr));
2962 			IFA_UNLOCK(&ia->ia_ifa);
2963 			IFA_REMREF(&ia->ia_ifa);
2964 			ia = NULL;
2965 			cp[IPOPT_OFFSET] += sizeof(struct in_addr);
2966 			break;
2967 
2968 		case IPOPT_TS:
2969 			code = (uint8_t)(cp - (u_char *)ip);
2970 			ipt = (struct ip_timestamp *)(void *)cp;
2971 			if (ipt->ipt_len < 4 || ipt->ipt_len > 40) {
2972 				code = (uint8_t)((u_char *)&ipt->ipt_len -
2973 				    (u_char *)ip);
2974 				goto bad;
2975 			}
2976 			if (ipt->ipt_ptr < 5) {
2977 				code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2978 				    (u_char *)ip);
2979 				goto bad;
2980 			}
2981 			if (ipt->ipt_ptr >
2982 			    ipt->ipt_len - (int)sizeof(int32_t)) {
2983 				if (++ipt->ipt_oflw == 0) {
2984 					code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2985 					    (u_char *)ip);
2986 					goto bad;
2987 				}
2988 				break;
2989 			}
2990 			sin = (struct in_addr *)(void *)(cp + ipt->ipt_ptr - 1);
2991 			switch (ipt->ipt_flg) {
2992 			case IPOPT_TS_TSONLY:
2993 				break;
2994 
2995 			case IPOPT_TS_TSANDADDR:
2996 				if (ipt->ipt_ptr - 1 + sizeof(n_time) +
2997 				    sizeof(struct in_addr) > ipt->ipt_len) {
2998 					code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2999 					    (u_char *)ip);
3000 					goto bad;
3001 				}
3002 				ipaddr.sin_addr = dst;
3003 				ia = (INA)ifaof_ifpforaddr(SA(&ipaddr),
3004 				    m->m_pkthdr.rcvif);
3005 				if (ia == NULL) {
3006 					continue;
3007 				}
3008 				IFA_LOCK(&ia->ia_ifa);
3009 				(void) memcpy(sin, &IA_SIN(ia)->sin_addr,
3010 				    sizeof(struct in_addr));
3011 				IFA_UNLOCK(&ia->ia_ifa);
3012 				ipt->ipt_ptr += sizeof(struct in_addr);
3013 				IFA_REMREF(&ia->ia_ifa);
3014 				ia = NULL;
3015 				break;
3016 
3017 			case IPOPT_TS_PRESPEC:
3018 				if (ipt->ipt_ptr - 1 + sizeof(n_time) +
3019 				    sizeof(struct in_addr) > ipt->ipt_len) {
3020 					code = (uint8_t)((u_char *)&ipt->ipt_ptr -
3021 					    (u_char *)ip);
3022 					goto bad;
3023 				}
3024 				(void) memcpy(&ipaddr.sin_addr, sin,
3025 				    sizeof(struct in_addr));
3026 				if ((ia = (struct in_ifaddr *)ifa_ifwithaddr(
3027 					    SA(&ipaddr))) == NULL) {
3028 					continue;
3029 				}
3030 				IFA_REMREF(&ia->ia_ifa);
3031 				ia = NULL;
3032 				ipt->ipt_ptr += sizeof(struct in_addr);
3033 				break;
3034 
3035 			default:
3036 				/* XXX can't take &ipt->ipt_flg */
3037 				code = (uint8_t)((u_char *)&ipt->ipt_ptr -
3038 				    (u_char *)ip + 1);
3039 				goto bad;
3040 			}
3041 			ntime = iptime();
3042 			(void) memcpy(cp + ipt->ipt_ptr - 1, &ntime,
3043 			    sizeof(n_time));
3044 			ipt->ipt_ptr += sizeof(n_time);
3045 		}
3046 	}
3047 	if (forward && ipforwarding) {
3048 		ip_forward(m, 1, next_hop);
3049 		return 1;
3050 	}
3051 	return 0;
3052 bad:
3053 	icmp_error(m, type, code, 0, 0);
3054 	OSAddAtomic(1, &ipstat.ips_badoptions);
3055 	return 1;
3056 }
3057 
3058 /*
3059  * Check for the presence of the IP Router Alert option [RFC2113]
3060  * in the header of an IPv4 datagram.
3061  *
3062  * This call is not intended for use from the forwarding path; it is here
3063  * so that protocol domains may check for the presence of the option.
3064  * Given how FreeBSD's IPv4 stack is currently structured, the Router Alert
3065  * option does not have much relevance to the implementation, though this
3066  * may change in future.
3067  * Router alert options SHOULD be passed if running in IPSTEALTH mode and
3068  * we are not the endpoint.
3069  * Length checks on individual options should already have been peformed
3070  * by ip_dooptions() therefore they are folded under DIAGNOSTIC here.
3071  *
3072  * Return zero if not present or options are invalid, non-zero if present.
3073  */
3074 int
ip_checkrouteralert(struct mbuf * m)3075 ip_checkrouteralert(struct mbuf *m)
3076 {
3077 	struct ip *ip = mtod(m, struct ip *);
3078 	u_char *cp;
3079 	int opt, optlen, cnt, found_ra;
3080 
3081 	found_ra = 0;
3082 	cp = (u_char *)(ip + 1);
3083 	cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
3084 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
3085 		opt = cp[IPOPT_OPTVAL];
3086 		if (opt == IPOPT_EOL) {
3087 			break;
3088 		}
3089 		if (opt == IPOPT_NOP) {
3090 			optlen = 1;
3091 		} else {
3092 #ifdef DIAGNOSTIC
3093 			if (cnt < IPOPT_OLEN + sizeof(*cp)) {
3094 				break;
3095 			}
3096 #endif
3097 			optlen = cp[IPOPT_OLEN];
3098 #ifdef DIAGNOSTIC
3099 			if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
3100 				break;
3101 			}
3102 #endif
3103 		}
3104 		switch (opt) {
3105 		case IPOPT_RA:
3106 #ifdef DIAGNOSTIC
3107 			if (optlen != IPOPT_OFFSET + sizeof(uint16_t) ||
3108 			    (*((uint16_t *)(void *)&cp[IPOPT_OFFSET]) != 0)) {
3109 				break;
3110 			} else
3111 #endif
3112 			found_ra = 1;
3113 			break;
3114 		default:
3115 			break;
3116 		}
3117 	}
3118 
3119 	return found_ra;
3120 }
3121 
3122 /*
3123  * Given address of next destination (final or next hop),
3124  * return internet address info of interface to be used to get there.
3125  */
3126 struct in_ifaddr *
ip_rtaddr(struct in_addr dst)3127 ip_rtaddr(struct in_addr dst)
3128 {
3129 	struct sockaddr_in *sin;
3130 	struct ifaddr *rt_ifa;
3131 	struct route ro;
3132 
3133 	bzero(&ro, sizeof(ro));
3134 	sin = SIN(&ro.ro_dst);
3135 	sin->sin_family = AF_INET;
3136 	sin->sin_len = sizeof(*sin);
3137 	sin->sin_addr = dst;
3138 
3139 	rtalloc_ign(&ro, RTF_PRCLONING);
3140 	if (ro.ro_rt == NULL) {
3141 		ROUTE_RELEASE(&ro);
3142 		return NULL;
3143 	}
3144 
3145 	RT_LOCK(ro.ro_rt);
3146 	if ((rt_ifa = ro.ro_rt->rt_ifa) != NULL) {
3147 		IFA_ADDREF(rt_ifa);
3148 	}
3149 	RT_UNLOCK(ro.ro_rt);
3150 	ROUTE_RELEASE(&ro);
3151 
3152 	return (struct in_ifaddr *)rt_ifa;
3153 }
3154 
3155 /*
3156  * Save incoming source route for use in replies,
3157  * to be picked up later by ip_srcroute if the receiver is interested.
3158  */
3159 void
save_rte(u_char * option,struct in_addr dst)3160 save_rte(u_char *option, struct in_addr dst)
3161 {
3162 	unsigned olen;
3163 
3164 	olen = option[IPOPT_OLEN];
3165 #if DIAGNOSTIC
3166 	if (ipprintfs) {
3167 		printf("save_rte: olen %d\n", olen);
3168 	}
3169 #endif
3170 	if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst))) {
3171 		return;
3172 	}
3173 	bcopy(option, ip_srcrt.srcopt, olen);
3174 	ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
3175 	ip_srcrt.dst = dst;
3176 }
3177 
3178 /*
3179  * Retrieve incoming source route for use in replies,
3180  * in the same form used by setsockopt.
3181  * The first hop is placed before the options, will be removed later.
3182  */
3183 struct mbuf *
ip_srcroute(void)3184 ip_srcroute(void)
3185 {
3186 	struct in_addr *p, *q;
3187 	struct mbuf *m;
3188 
3189 	if (ip_nhops == 0) {
3190 		return NULL;
3191 	}
3192 
3193 	m = m_get(M_DONTWAIT, MT_HEADER);
3194 	if (m == NULL) {
3195 		return NULL;
3196 	}
3197 
3198 #define OPTSIZ  (sizeof (ip_srcrt.nop) + sizeof (ip_srcrt.srcopt))
3199 
3200 	/* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */
3201 	m->m_len = ip_nhops * sizeof(struct in_addr) +
3202 	    sizeof(struct in_addr) + OPTSIZ;
3203 #if DIAGNOSTIC
3204 	if (ipprintfs) {
3205 		printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len);
3206 	}
3207 #endif
3208 
3209 	/*
3210 	 * First save first hop for return route
3211 	 */
3212 	p = &ip_srcrt.route[ip_nhops - 1];
3213 	*(mtod(m, struct in_addr *)) = *p--;
3214 #if DIAGNOSTIC
3215 	if (ipprintfs) {
3216 		printf(" hops %lx",
3217 		    (u_int32_t)ntohl(mtod(m, struct in_addr *)->s_addr));
3218 	}
3219 #endif
3220 
3221 	/*
3222 	 * Copy option fields and padding (nop) to mbuf.
3223 	 */
3224 	ip_srcrt.nop = IPOPT_NOP;
3225 	ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF;
3226 	(void) memcpy(mtod(m, caddr_t) + sizeof(struct in_addr),
3227 	    &ip_srcrt.nop, OPTSIZ);
3228 	q = (struct in_addr *)(void *)(mtod(m, caddr_t) +
3229 	    sizeof(struct in_addr) + OPTSIZ);
3230 #undef OPTSIZ
3231 	/*
3232 	 * Record return path as an IP source route,
3233 	 * reversing the path (pointers are now aligned).
3234 	 */
3235 	while (p >= ip_srcrt.route) {
3236 #if DIAGNOSTIC
3237 		if (ipprintfs) {
3238 			printf(" %lx", (u_int32_t)ntohl(q->s_addr));
3239 		}
3240 #endif
3241 		*q++ = *p--;
3242 	}
3243 	/*
3244 	 * Last hop goes to final destination.
3245 	 */
3246 	*q = ip_srcrt.dst;
3247 #if DIAGNOSTIC
3248 	if (ipprintfs) {
3249 		printf(" %lx\n", (u_int32_t)ntohl(q->s_addr));
3250 	}
3251 #endif
3252 	return m;
3253 }
3254 
3255 /*
3256  * Strip out IP options, at higher level protocol in the kernel.
3257  */
3258 void
ip_stripoptions(struct mbuf * m)3259 ip_stripoptions(struct mbuf *m)
3260 {
3261 	int i;
3262 	struct ip *ip = mtod(m, struct ip *);
3263 	caddr_t opts;
3264 	int olen;
3265 
3266 	/* Expect 32-bit aligned data pointer on strict-align platforms */
3267 	MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
3268 
3269 	/* use bcopy() since it supports overlapping range */
3270 	olen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
3271 	opts = (caddr_t)(ip + 1);
3272 	i = m->m_len - (sizeof(struct ip) + olen);
3273 	bcopy(opts + olen, opts, (unsigned)i);
3274 	m->m_len -= olen;
3275 	if (m->m_flags & M_PKTHDR) {
3276 		m->m_pkthdr.len -= olen;
3277 	}
3278 	ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
3279 
3280 	/*
3281 	 * We expect ip_{off,len} to be in host order by now, and
3282 	 * that the original IP header length has been subtracted
3283 	 * out from ip_len.  Temporarily adjust ip_len for checksum
3284 	 * recalculation, and restore it afterwards.
3285 	 */
3286 	ip->ip_len += sizeof(struct ip);
3287 
3288 	/* recompute checksum now that IP header is smaller */
3289 #if BYTE_ORDER != BIG_ENDIAN
3290 	HTONS(ip->ip_len);
3291 	HTONS(ip->ip_off);
3292 #endif /* BYTE_ORDER != BIG_ENDIAN */
3293 	ip->ip_sum = in_cksum_hdr(ip);
3294 #if BYTE_ORDER != BIG_ENDIAN
3295 	NTOHS(ip->ip_off);
3296 	NTOHS(ip->ip_len);
3297 #endif /* BYTE_ORDER != BIG_ENDIAN */
3298 
3299 	ip->ip_len -= sizeof(struct ip);
3300 
3301 	/*
3302 	 * Given that we've just stripped IP options from the header,
3303 	 * we need to adjust the start offset accordingly if this
3304 	 * packet had gone thru partial checksum offload.
3305 	 */
3306 	if ((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
3307 	    (CSUM_DATA_VALID | CSUM_PARTIAL)) {
3308 		if (m->m_pkthdr.csum_rx_start >= (sizeof(struct ip) + olen)) {
3309 			/* most common case */
3310 			m->m_pkthdr.csum_rx_start -= olen;
3311 		} else {
3312 			/* compute checksum in software instead */
3313 			m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
3314 			m->m_pkthdr.csum_data = 0;
3315 			ipstat.ips_adj_hwcsum_clr++;
3316 		}
3317 	}
3318 }
3319 
3320 u_char inetctlerrmap[PRC_NCMDS] = {
3321 	0, 0, 0, 0,
3322 	0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
3323 	ENETUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
3324 	EMSGSIZE, EHOSTUNREACH, 0, 0,
3325 	0, 0, EHOSTUNREACH, 0,
3326 	ENOPROTOOPT, ECONNREFUSED
3327 };
3328 
3329 static int
3330 sysctl_ipforwarding SYSCTL_HANDLER_ARGS
3331 {
3332 #pragma unused(arg1, arg2)
3333 	int i, was_ipforwarding = ipforwarding;
3334 
3335 	i = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
3336 	if (i != 0 || req->newptr == USER_ADDR_NULL) {
3337 		return i;
3338 	}
3339 
3340 	if (was_ipforwarding && !ipforwarding) {
3341 		/* clean up IPv4 forwarding cached routes */
3342 		ifnet_head_lock_shared();
3343 		for (i = 0; i <= if_index; i++) {
3344 			struct ifnet *ifp = ifindex2ifnet[i];
3345 			if (ifp != NULL) {
3346 				lck_mtx_lock(&ifp->if_cached_route_lock);
3347 				ROUTE_RELEASE(&ifp->if_fwd_route);
3348 				bzero(&ifp->if_fwd_route,
3349 				    sizeof(ifp->if_fwd_route));
3350 				lck_mtx_unlock(&ifp->if_cached_route_lock);
3351 			}
3352 		}
3353 		ifnet_head_done();
3354 	}
3355 
3356 	return 0;
3357 }
3358 
3359 /*
3360  * Similar to inp_route_{copyout,copyin} routines except that these copy
3361  * out the cached IPv4 forwarding route from struct ifnet instead of the
3362  * inpcb.  See comments for those routines for explanations.
3363  */
3364 static void
ip_fwd_route_copyout(struct ifnet * ifp,struct route * dst)3365 ip_fwd_route_copyout(struct ifnet *ifp, struct route *dst)
3366 {
3367 	struct route *src = &ifp->if_fwd_route;
3368 
3369 	lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3370 	lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3371 
3372 	/* Minor sanity check */
3373 	if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3374 		panic("%s: wrong or corrupted route: %p", __func__, src);
3375 	}
3376 
3377 	route_copyout(dst, src, sizeof(*dst));
3378 
3379 	lck_mtx_unlock(&ifp->if_cached_route_lock);
3380 }
3381 
3382 static void
ip_fwd_route_copyin(struct ifnet * ifp,struct route * src)3383 ip_fwd_route_copyin(struct ifnet *ifp, struct route *src)
3384 {
3385 	struct route *dst = &ifp->if_fwd_route;
3386 
3387 	lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3388 	lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3389 
3390 	/* Minor sanity check */
3391 	if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3392 		panic("%s: wrong or corrupted route: %p", __func__, src);
3393 	}
3394 
3395 	if (ifp->if_fwd_cacheok) {
3396 		route_copyin(src, dst, sizeof(*src));
3397 	}
3398 
3399 	lck_mtx_unlock(&ifp->if_cached_route_lock);
3400 }
3401 
3402 /*
3403  * Forward a packet.  If some error occurs return the sender
3404  * an icmp packet.  Note we can't always generate a meaningful
3405  * icmp message because icmp doesn't have a large enough repertoire
3406  * of codes and types.
3407  *
3408  * If not forwarding, just drop the packet.  This could be confusing
3409  * if ipforwarding was zero but some routing protocol was advancing
3410  * us as a gateway to somewhere.  However, we must let the routing
3411  * protocol deal with that.
3412  *
3413  * The srcrt parameter indicates whether the packet is being forwarded
3414  * via a source route.
3415  */
3416 static void
ip_forward(struct mbuf * m,int srcrt,struct sockaddr_in * next_hop)3417 ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop)
3418 {
3419 #pragma unused(next_hop)
3420 	struct ip *ip = mtod(m, struct ip *);
3421 	struct sockaddr_in *sin;
3422 	struct rtentry *rt;
3423 	struct route fwd_rt;
3424 	int error, type = 0, code = 0;
3425 	struct mbuf *mcopy;
3426 	n_long dest;
3427 	struct in_addr pkt_dst;
3428 	u_int32_t nextmtu = 0, len;
3429 	struct ip_out_args ipoa;
3430 	struct ifnet *rcvifp = m->m_pkthdr.rcvif;
3431 
3432 	bzero(&ipoa, sizeof(ipoa));
3433 	ipoa.ipoa_boundif = IFSCOPE_NONE;
3434 	ipoa.ipoa_sotc = SO_TC_UNSPEC;
3435 	ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3436 
3437 #if IPSEC
3438 	struct secpolicy *sp = NULL;
3439 	int ipsecerror;
3440 #endif /* IPSEC */
3441 #if PF
3442 	struct pf_mtag *pf_mtag;
3443 #endif /* PF */
3444 
3445 	dest = 0;
3446 	pkt_dst = ip->ip_dst;
3447 
3448 #if DIAGNOSTIC
3449 	if (ipprintfs) {
3450 		printf("forward: src %lx dst %lx ttl %x\n",
3451 		    (u_int32_t)ip->ip_src.s_addr, (u_int32_t)pkt_dst.s_addr,
3452 		    ip->ip_ttl);
3453 	}
3454 #endif
3455 
3456 	if (m->m_flags & (M_BCAST | M_MCAST) || !in_canforward(pkt_dst)) {
3457 		OSAddAtomic(1, &ipstat.ips_cantforward);
3458 		m_freem(m);
3459 		return;
3460 	}
3461 #if IPSTEALTH
3462 	if (!ipstealth) {
3463 #endif /* IPSTEALTH */
3464 	if (ip->ip_ttl <= IPTTLDEC) {
3465 		icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
3466 		    dest, 0);
3467 		return;
3468 	}
3469 #if IPSTEALTH
3470 }
3471 #endif /* IPSTEALTH */
3472 
3473 #if PF
3474 	pf_mtag = pf_find_mtag(m);
3475 	if (pf_mtag != NULL && pf_mtag->pftag_rtableid != IFSCOPE_NONE) {
3476 		ipoa.ipoa_boundif = pf_mtag->pftag_rtableid;
3477 		ipoa.ipoa_flags |= IPOAF_BOUND_IF;
3478 	}
3479 #endif /* PF */
3480 
3481 	ip_fwd_route_copyout(rcvifp, &fwd_rt);
3482 
3483 	sin = SIN(&fwd_rt.ro_dst);
3484 	if (ROUTE_UNUSABLE(&fwd_rt) || pkt_dst.s_addr != sin->sin_addr.s_addr) {
3485 		ROUTE_RELEASE(&fwd_rt);
3486 
3487 		sin->sin_family = AF_INET;
3488 		sin->sin_len = sizeof(*sin);
3489 		sin->sin_addr = pkt_dst;
3490 
3491 		rtalloc_scoped_ign(&fwd_rt, RTF_PRCLONING, ipoa.ipoa_boundif);
3492 		if (fwd_rt.ro_rt == NULL) {
3493 			icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0);
3494 			goto done;
3495 		}
3496 	}
3497 	rt = fwd_rt.ro_rt;
3498 
3499 	/*
3500 	 * Save the IP header and at most 8 bytes of the payload,
3501 	 * in case we need to generate an ICMP message to the src.
3502 	 *
3503 	 * We don't use m_copy() because it might return a reference
3504 	 * to a shared cluster. Both this function and ip_output()
3505 	 * assume exclusive access to the IP header in `m', so any
3506 	 * data in a cluster may change before we reach icmp_error().
3507 	 */
3508 	MGET(mcopy, M_DONTWAIT, m->m_type);
3509 	if (mcopy != NULL && m_dup_pkthdr(mcopy, m, M_DONTWAIT) == 0) {
3510 		mcopy->m_len = imin((IP_VHL_HL(ip->ip_vhl) << 2) + 8,
3511 		    (int)ip->ip_len);
3512 		m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
3513 	}
3514 
3515 #if IPSTEALTH
3516 	if (!ipstealth) {
3517 #endif /* IPSTEALTH */
3518 	ip->ip_ttl -= IPTTLDEC;
3519 #if IPSTEALTH
3520 }
3521 #endif /* IPSTEALTH */
3522 
3523 	/*
3524 	 * If forwarding packet using same interface that it came in on,
3525 	 * perhaps should send a redirect to sender to shortcut a hop.
3526 	 * Only send redirect if source is sending directly to us,
3527 	 * and if packet was not source routed (or has any options).
3528 	 * Also, don't send redirect if forwarding using a default route
3529 	 * or a route modified by a redirect.
3530 	 */
3531 	RT_LOCK_SPIN(rt);
3532 	if (rt->rt_ifp == m->m_pkthdr.rcvif &&
3533 	    !(rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) &&
3534 	    satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY &&
3535 	    ipsendredirects && !srcrt && rt->rt_ifa != NULL) {
3536 		struct in_ifaddr *ia = (struct in_ifaddr *)rt->rt_ifa;
3537 		u_int32_t src = ntohl(ip->ip_src.s_addr);
3538 
3539 		/* Become a regular mutex */
3540 		RT_CONVERT_LOCK(rt);
3541 		IFA_LOCK_SPIN(&ia->ia_ifa);
3542 		if ((src & ia->ia_subnetmask) == ia->ia_subnet) {
3543 			if (rt->rt_flags & RTF_GATEWAY) {
3544 				dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
3545 			} else {
3546 				dest = pkt_dst.s_addr;
3547 			}
3548 			/*
3549 			 * Router requirements says to only send
3550 			 * host redirects.
3551 			 */
3552 			type = ICMP_REDIRECT;
3553 			code = ICMP_REDIRECT_HOST;
3554 #if DIAGNOSTIC
3555 			if (ipprintfs) {
3556 				printf("redirect (%d) to %lx\n", code,
3557 				    (u_int32_t)dest);
3558 			}
3559 #endif
3560 		}
3561 		IFA_UNLOCK(&ia->ia_ifa);
3562 	}
3563 	RT_UNLOCK(rt);
3564 
3565 
3566 	/* Mark this packet as being forwarded from another interface */
3567 	m->m_pkthdr.pkt_flags |= PKTF_FORWARDED;
3568 	len = m_pktlen(m);
3569 
3570 	error = ip_output(m, NULL, &fwd_rt, IP_FORWARDING | IP_OUTARGS,
3571 	    NULL, &ipoa);
3572 
3573 	/* Refresh rt since the route could have changed while in IP */
3574 	rt = fwd_rt.ro_rt;
3575 
3576 	if (error != 0) {
3577 		OSAddAtomic(1, &ipstat.ips_cantforward);
3578 	} else {
3579 		/*
3580 		 * Increment stats on the source interface; the ones
3581 		 * for destination interface has been taken care of
3582 		 * during output above by virtue of PKTF_FORWARDED.
3583 		 */
3584 		rcvifp->if_fpackets++;
3585 		rcvifp->if_fbytes += len;
3586 
3587 		OSAddAtomic(1, &ipstat.ips_forward);
3588 		if (type != 0) {
3589 			OSAddAtomic(1, &ipstat.ips_redirectsent);
3590 		} else {
3591 			if (mcopy != NULL) {
3592 				/*
3593 				 * If we didn't have to go thru ipflow and
3594 				 * the packet was successfully consumed by
3595 				 * ip_output, the mcopy is rather a waste;
3596 				 * this could be further optimized.
3597 				 */
3598 				m_freem(mcopy);
3599 			}
3600 			goto done;
3601 		}
3602 	}
3603 	if (mcopy == NULL) {
3604 		goto done;
3605 	}
3606 
3607 	switch (error) {
3608 	case 0:                         /* forwarded, but need redirect */
3609 		/* type, code set above */
3610 		break;
3611 
3612 	case ENETUNREACH:               /* shouldn't happen, checked above */
3613 	case EHOSTUNREACH:
3614 	case ENETDOWN:
3615 	case EHOSTDOWN:
3616 	default:
3617 		type = ICMP_UNREACH;
3618 		code = ICMP_UNREACH_HOST;
3619 		break;
3620 
3621 	case EMSGSIZE:
3622 		type = ICMP_UNREACH;
3623 		code = ICMP_UNREACH_NEEDFRAG;
3624 
3625 		if (rt == NULL) {
3626 			break;
3627 		} else {
3628 			RT_LOCK_SPIN(rt);
3629 			if (rt->rt_ifp != NULL) {
3630 				nextmtu = rt->rt_ifp->if_mtu;
3631 			}
3632 			RT_UNLOCK(rt);
3633 		}
3634 #ifdef IPSEC
3635 		if (ipsec_bypass) {
3636 			break;
3637 		}
3638 
3639 		/*
3640 		 * If the packet is routed over IPsec tunnel, tell the
3641 		 * originator the tunnel MTU.
3642 		 *	tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
3643 		 * XXX quickhack!!!
3644 		 */
3645 		sp = ipsec4_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND,
3646 		    IP_FORWARDING, &ipsecerror);
3647 
3648 		if (sp == NULL) {
3649 			break;
3650 		}
3651 
3652 		/*
3653 		 * find the correct route for outer IPv4
3654 		 * header, compute tunnel MTU.
3655 		 */
3656 		nextmtu = 0;
3657 
3658 		if (sp->req != NULL &&
3659 		    sp->req->saidx.mode == IPSEC_MODE_TUNNEL) {
3660 			struct secasindex saidx;
3661 			struct secasvar *sav;
3662 			struct route *ro;
3663 			struct ip *ipm;
3664 			size_t ipsechdr;
3665 
3666 			/* count IPsec header size */
3667 			ipsechdr = ipsec_hdrsiz(sp);
3668 
3669 			ipm = mtod(mcopy, struct ip *);
3670 			bcopy(&sp->req->saidx, &saidx, sizeof(saidx));
3671 			saidx.mode = sp->req->saidx.mode;
3672 			saidx.reqid = sp->req->saidx.reqid;
3673 			sin = SIN(&saidx.src);
3674 			if (sin->sin_len == 0) {
3675 				sin->sin_len = sizeof(*sin);
3676 				sin->sin_family = AF_INET;
3677 				sin->sin_port = IPSEC_PORT_ANY;
3678 				bcopy(&ipm->ip_src, &sin->sin_addr,
3679 				    sizeof(sin->sin_addr));
3680 			}
3681 			sin = SIN(&saidx.dst);
3682 			if (sin->sin_len == 0) {
3683 				sin->sin_len = sizeof(*sin);
3684 				sin->sin_family = AF_INET;
3685 				sin->sin_port = IPSEC_PORT_ANY;
3686 				bcopy(&ipm->ip_dst, &sin->sin_addr,
3687 				    sizeof(sin->sin_addr));
3688 			}
3689 			sav = key_allocsa_policy(&saidx);
3690 			if (sav != NULL) {
3691 				lck_mtx_lock(sadb_mutex);
3692 				if (sav->sah != NULL) {
3693 					ro = (struct route *)&sav->sah->sa_route;
3694 					if (ro->ro_rt != NULL) {
3695 						RT_LOCK(ro->ro_rt);
3696 						if (ro->ro_rt->rt_ifp != NULL) {
3697 							nextmtu = ro->ro_rt->
3698 							    rt_ifp->if_mtu;
3699 							nextmtu -= ipsechdr;
3700 						}
3701 						RT_UNLOCK(ro->ro_rt);
3702 					}
3703 				}
3704 				key_freesav(sav, KEY_SADB_LOCKED);
3705 				lck_mtx_unlock(sadb_mutex);
3706 			}
3707 		}
3708 		key_freesp(sp, KEY_SADB_UNLOCKED);
3709 #endif /* IPSEC */
3710 		break;
3711 
3712 	case ENOBUFS:
3713 		/*
3714 		 * A router should not generate ICMP_SOURCEQUENCH as
3715 		 * required in RFC1812 Requirements for IP Version 4 Routers.
3716 		 * Source quench could be a big problem under DoS attacks,
3717 		 * or if the underlying interface is rate-limited.
3718 		 * Those who need source quench packets may re-enable them
3719 		 * via the net.inet.ip.sendsourcequench sysctl.
3720 		 */
3721 		if (ip_sendsourcequench == 0) {
3722 			m_freem(mcopy);
3723 			goto done;
3724 		} else {
3725 			type = ICMP_SOURCEQUENCH;
3726 			code = 0;
3727 		}
3728 		break;
3729 
3730 	case EACCES:
3731 		m_freem(mcopy);
3732 		goto done;
3733 	}
3734 
3735 	if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG) {
3736 		OSAddAtomic(1, &ipstat.ips_cantfrag);
3737 	}
3738 
3739 	icmp_error(mcopy, type, code, dest, nextmtu);
3740 done:
3741 	ip_fwd_route_copyin(rcvifp, &fwd_rt);
3742 }
3743 
3744 int
ip_savecontrol(struct inpcb * inp,struct mbuf ** mp,struct ip * ip,struct mbuf * m)3745 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
3746     struct mbuf *m)
3747 {
3748 	*mp = NULL;
3749 	if (inp->inp_socket->so_options & SO_TIMESTAMP) {
3750 		struct timeval tv;
3751 
3752 		getmicrotime(&tv);
3753 		mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof(tv),
3754 		    SCM_TIMESTAMP, SOL_SOCKET, mp);
3755 		if (*mp == NULL) {
3756 			goto no_mbufs;
3757 		}
3758 	}
3759 	if (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) {
3760 		uint64_t time;
3761 
3762 		time = mach_absolute_time();
3763 		mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
3764 		    SCM_TIMESTAMP_MONOTONIC, SOL_SOCKET, mp);
3765 		if (*mp == NULL) {
3766 			goto no_mbufs;
3767 		}
3768 	}
3769 	if (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) {
3770 		uint64_t time;
3771 
3772 		time = mach_continuous_time();
3773 		mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
3774 		    SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp);
3775 		if (*mp == NULL) {
3776 			goto no_mbufs;
3777 		}
3778 	}
3779 	if (inp->inp_socket->so_flags & SOF_RECV_TRAFFIC_CLASS) {
3780 		int tc = m_get_traffic_class(m);
3781 
3782 		mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof(tc),
3783 		    SO_TRAFFIC_CLASS, SOL_SOCKET, mp);
3784 		if (*mp == NULL) {
3785 			goto no_mbufs;
3786 		}
3787 	}
3788 	if ((inp->inp_socket->so_flags & SOF_RECV_WAKE_PKT) &&
3789 	    (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
3790 		int flag = 1;
3791 
3792 		mp = sbcreatecontrol_mbuf((caddr_t)&flag, sizeof(flag),
3793 		    SO_RECV_WAKE_PKT, SOL_SOCKET, mp);
3794 		if (*mp == NULL) {
3795 			goto no_mbufs;
3796 		}
3797 	}
3798 
3799 	if (inp->inp_flags & INP_RECVDSTADDR || SOFLOW_ENABLED(inp->inp_socket)) {
3800 		mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_dst,
3801 		    sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP, mp);
3802 		if (*mp == NULL) {
3803 			goto no_mbufs;
3804 		}
3805 	}
3806 #ifdef notyet
3807 	/*
3808 	 * XXX
3809 	 * Moving these out of udp_input() made them even more broken
3810 	 * than they already were.
3811 	 */
3812 	/* options were tossed already */
3813 	if (inp->inp_flags & INP_RECVOPTS) {
3814 		mp = sbcreatecontrol_mbuf((caddr_t)opts_deleted_above,
3815 		    sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP, mp);
3816 		if (*mp == NULL) {
3817 			goto no_mbufs;
3818 		}
3819 	}
3820 	/* ip_srcroute doesn't do what we want here, need to fix */
3821 	if (inp->inp_flags & INP_RECVRETOPTS) {
3822 		mp = sbcreatecontrol_mbuf((caddr_t)ip_srcroute(),
3823 		    sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP, mp);
3824 		if (*mp == NULL) {
3825 			goto no_mbufs;
3826 		}
3827 	}
3828 #endif /* notyet */
3829 	if (inp->inp_flags & INP_RECVIF) {
3830 		struct ifnet *ifp;
3831 		uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
3832 		struct sockaddr_dl *sdl2 = SDL(&sdlbuf);
3833 
3834 		/*
3835 		 * Make sure to accomodate the largest possible
3836 		 * size of SA(if_lladdr)->sa_len.
3837 		 */
3838 		_CASSERT(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
3839 
3840 		ifnet_head_lock_shared();
3841 		if ((ifp = m->m_pkthdr.rcvif) != NULL &&
3842 		    ifp->if_index && (ifp->if_index <= if_index)) {
3843 			struct ifaddr *ifa = ifnet_addrs[ifp->if_index - 1];
3844 			struct sockaddr_dl *sdp;
3845 
3846 			if (!ifa || !ifa->ifa_addr) {
3847 				goto makedummy;
3848 			}
3849 
3850 			IFA_LOCK_SPIN(ifa);
3851 			sdp = SDL(ifa->ifa_addr);
3852 			/*
3853 			 * Change our mind and don't try copy.
3854 			 */
3855 			if (sdp->sdl_family != AF_LINK) {
3856 				IFA_UNLOCK(ifa);
3857 				goto makedummy;
3858 			}
3859 			/* the above _CASSERT ensures sdl_len fits in sdlbuf */
3860 			bcopy(sdp, sdl2, sdp->sdl_len);
3861 			IFA_UNLOCK(ifa);
3862 		} else {
3863 makedummy:
3864 			sdl2->sdl_len =
3865 			    offsetof(struct sockaddr_dl, sdl_data[0]);
3866 			sdl2->sdl_family = AF_LINK;
3867 			sdl2->sdl_index = 0;
3868 			sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
3869 		}
3870 		ifnet_head_done();
3871 		mp = sbcreatecontrol_mbuf((caddr_t)sdl2, sdl2->sdl_len,
3872 		    IP_RECVIF, IPPROTO_IP, mp);
3873 		if (*mp == NULL) {
3874 			goto no_mbufs;
3875 		}
3876 	}
3877 	if (inp->inp_flags & INP_RECVTTL) {
3878 		mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_ttl,
3879 		    sizeof(ip->ip_ttl), IP_RECVTTL, IPPROTO_IP, mp);
3880 		if (*mp == NULL) {
3881 			goto no_mbufs;
3882 		}
3883 	}
3884 	if (inp->inp_flags & INP_PKTINFO) {
3885 		struct in_pktinfo pi;
3886 
3887 		bzero(&pi, sizeof(struct in_pktinfo));
3888 		bcopy(&ip->ip_dst, &pi.ipi_addr, sizeof(struct in_addr));
3889 		pi.ipi_ifindex = (m != NULL && m->m_pkthdr.rcvif != NULL) ?
3890 		    m->m_pkthdr.rcvif->if_index : 0;
3891 
3892 		mp = sbcreatecontrol_mbuf((caddr_t)&pi,
3893 		    sizeof(struct in_pktinfo), IP_RECVPKTINFO, IPPROTO_IP, mp);
3894 		if (*mp == NULL) {
3895 			goto no_mbufs;
3896 		}
3897 	}
3898 	if (inp->inp_flags & INP_RECVTOS) {
3899 		mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_tos,
3900 		    sizeof(u_char), IP_RECVTOS, IPPROTO_IP, mp);
3901 		if (*mp == NULL) {
3902 			goto no_mbufs;
3903 		}
3904 	}
3905 	return 0;
3906 
3907 no_mbufs:
3908 	ipstat.ips_pktdropcntrl++;
3909 	return ENOBUFS;
3910 }
3911 
3912 static inline u_short
ip_cksum(struct mbuf * m,int hlen)3913 ip_cksum(struct mbuf *m, int hlen)
3914 {
3915 	u_short sum;
3916 
3917 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3918 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3919 	} else if (!(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) &&
3920 	    !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
3921 		/*
3922 		 * The packet arrived on an interface which isn't capable
3923 		 * of performing IP header checksum; compute it now.
3924 		 */
3925 		sum = ip_cksum_hdr_in(m, hlen);
3926 	} else {
3927 		sum = 0;
3928 		m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
3929 		    CSUM_IP_CHECKED | CSUM_IP_VALID);
3930 		m->m_pkthdr.csum_data = 0xffff;
3931 	}
3932 
3933 	if (sum != 0) {
3934 		OSAddAtomic(1, &ipstat.ips_badsum);
3935 	}
3936 
3937 	return sum;
3938 }
3939 
3940 static int
3941 ip_getstat SYSCTL_HANDLER_ARGS
3942 {
3943 #pragma unused(oidp, arg1, arg2)
3944 	if (req->oldptr == USER_ADDR_NULL) {
3945 		req->oldlen = (size_t)sizeof(struct ipstat);
3946 	}
3947 
3948 	return SYSCTL_OUT(req, &ipstat, MIN(sizeof(ipstat), req->oldlen));
3949 }
3950 
3951 void
ip_setsrcifaddr_info(struct mbuf * m,uint16_t src_idx,struct in_ifaddr * ia)3952 ip_setsrcifaddr_info(struct mbuf *m, uint16_t src_idx, struct in_ifaddr *ia)
3953 {
3954 	VERIFY(m->m_flags & M_PKTHDR);
3955 
3956 	/*
3957 	 * If the source ifaddr is specified, pick up the information
3958 	 * from there; otherwise just grab the passed-in ifindex as the
3959 	 * caller may not have the ifaddr available.
3960 	 */
3961 	if (ia != NULL) {
3962 		m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3963 		m->m_pkthdr.src_ifindex = ia->ia_ifp->if_index;
3964 	} else {
3965 		m->m_pkthdr.src_ifindex = src_idx;
3966 		if (src_idx != 0) {
3967 			m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3968 		}
3969 	}
3970 }
3971 
3972 void
ip_setdstifaddr_info(struct mbuf * m,uint16_t dst_idx,struct in_ifaddr * ia)3973 ip_setdstifaddr_info(struct mbuf *m, uint16_t dst_idx, struct in_ifaddr *ia)
3974 {
3975 	VERIFY(m->m_flags & M_PKTHDR);
3976 
3977 	/*
3978 	 * If the destination ifaddr is specified, pick up the information
3979 	 * from there; otherwise just grab the passed-in ifindex as the
3980 	 * caller may not have the ifaddr available.
3981 	 */
3982 	if (ia != NULL) {
3983 		m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3984 		m->m_pkthdr.dst_ifindex = ia->ia_ifp->if_index;
3985 	} else {
3986 		m->m_pkthdr.dst_ifindex = dst_idx;
3987 		if (dst_idx != 0) {
3988 			m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3989 		}
3990 	}
3991 }
3992 
3993 int
ip_getsrcifaddr_info(struct mbuf * m,uint32_t * src_idx,uint32_t * iaf)3994 ip_getsrcifaddr_info(struct mbuf *m, uint32_t *src_idx, uint32_t *iaf)
3995 {
3996 	VERIFY(m->m_flags & M_PKTHDR);
3997 
3998 	if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
3999 		return -1;
4000 	}
4001 
4002 	if (src_idx != NULL) {
4003 		*src_idx = m->m_pkthdr.src_ifindex;
4004 	}
4005 
4006 	if (iaf != NULL) {
4007 		*iaf = 0;
4008 	}
4009 
4010 	return 0;
4011 }
4012 
4013 int
ip_getdstifaddr_info(struct mbuf * m,uint32_t * dst_idx,uint32_t * iaf)4014 ip_getdstifaddr_info(struct mbuf *m, uint32_t *dst_idx, uint32_t *iaf)
4015 {
4016 	VERIFY(m->m_flags & M_PKTHDR);
4017 
4018 	if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
4019 		return -1;
4020 	}
4021 
4022 	if (dst_idx != NULL) {
4023 		*dst_idx = m->m_pkthdr.dst_ifindex;
4024 	}
4025 
4026 	if (iaf != NULL) {
4027 		*iaf = 0;
4028 	}
4029 
4030 	return 0;
4031 }
4032 
4033 /*
4034  * Protocol input handler for IPPROTO_GRE.
4035  */
4036 void
gre_input(struct mbuf * m,int off)4037 gre_input(struct mbuf *m, int off)
4038 {
4039 	gre_input_func_t fn = gre_input_func;
4040 
4041 	/*
4042 	 * If there is a registered GRE input handler, pass mbuf to it.
4043 	 */
4044 	if (fn != NULL) {
4045 		lck_mtx_unlock(inet_domain_mutex);
4046 		m = fn(m, off, (mtod(m, struct ip *))->ip_p);
4047 		lck_mtx_lock(inet_domain_mutex);
4048 	}
4049 
4050 	/*
4051 	 * If no matching tunnel that is up is found, we inject
4052 	 * the mbuf to raw ip socket to see if anyone picks it up.
4053 	 */
4054 	if (m != NULL) {
4055 		rip_input(m, off);
4056 	}
4057 }
4058 
4059 /*
4060  * Private KPI for PPP/PPTP.
4061  */
4062 int
ip_gre_register_input(gre_input_func_t fn)4063 ip_gre_register_input(gre_input_func_t fn)
4064 {
4065 	lck_mtx_lock(inet_domain_mutex);
4066 	gre_input_func = fn;
4067 	lck_mtx_unlock(inet_domain_mutex);
4068 
4069 	return 0;
4070 }
4071 
4072 #if (DEBUG || DEVELOPMENT)
4073 static int
4074 sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS
4075 {
4076 #pragma unused(arg1, arg2)
4077 	int error, i;
4078 
4079 	i = ip_input_measure;
4080 	error = sysctl_handle_int(oidp, &i, 0, req);
4081 	if (error || req->newptr == USER_ADDR_NULL) {
4082 		goto done;
4083 	}
4084 	/* impose bounds */
4085 	if (i < 0 || i > 1) {
4086 		error = EINVAL;
4087 		goto done;
4088 	}
4089 	if (ip_input_measure != i && i == 1) {
4090 		net_perf_initialize(&net_perf, ip_input_measure_bins);
4091 	}
4092 	ip_input_measure = i;
4093 done:
4094 	return error;
4095 }
4096 
4097 static int
4098 sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS
4099 {
4100 #pragma unused(arg1, arg2)
4101 	int error;
4102 	uint64_t i;
4103 
4104 	i = ip_input_measure_bins;
4105 	error = sysctl_handle_quad(oidp, &i, 0, req);
4106 	if (error || req->newptr == USER_ADDR_NULL) {
4107 		goto done;
4108 	}
4109 	/* validate data */
4110 	if (!net_perf_validate_bins(i)) {
4111 		error = EINVAL;
4112 		goto done;
4113 	}
4114 	ip_input_measure_bins = i;
4115 done:
4116 	return error;
4117 }
4118 
4119 static int
4120 sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS
4121 {
4122 #pragma unused(oidp, arg1, arg2)
4123 	if (req->oldptr == USER_ADDR_NULL) {
4124 		req->oldlen = (size_t)sizeof(struct ipstat);
4125 	}
4126 
4127 	return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen));
4128 }
4129 #endif /* (DEBUG || DEVELOPMENT) */
4130 
4131 static int
4132 sysctl_ip_checkinterface SYSCTL_HANDLER_ARGS
4133 {
4134 #pragma unused(arg1, arg2)
4135 	int error, i;
4136 
4137 	i = ip_checkinterface;
4138 	error = sysctl_handle_int(oidp, &i, 0, req);
4139 	if (error != 0 || req->newptr == USER_ADDR_NULL) {
4140 		return error;
4141 	}
4142 
4143 	switch (i) {
4144 	case IP_CHECKINTERFACE_WEAK_ES:
4145 	case IP_CHECKINTERFACE_HYBRID_ES:
4146 	case IP_CHECKINTERFACE_STRONG_ES:
4147 		if (ip_checkinterface != i) {
4148 			ip_checkinterface = i;
4149 			os_log(OS_LOG_DEFAULT, "%s: ip_checkinterface is now %d\n",
4150 			    __func__, ip_checkinterface);
4151 		}
4152 		break;
4153 	default:
4154 		error = EINVAL;
4155 		break;
4156 	}
4157 	return error;
4158 }
4159