xref: /xnu-8796.121.2/bsd/netinet/ip_input.c (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1993
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)ip_input.c	8.2 (Berkeley) 1/4/94
61  */
62 /*
63  * NOTICE: This file was modified by SPARTA, Inc. in 2007 to introduce
64  * support for mandatory and extensible security protections.  This notice
65  * is included in support of clause 2.2 (b) of the Apple Public License,
66  * Version 2.0.
67  */
68 
69 #define _IP_VHL
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/mbuf.h>
74 #include <sys/malloc.h>
75 #include <sys/domain.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/time.h>
79 #include <sys/kernel.h>
80 #include <sys/syslog.h>
81 #include <sys/sysctl.h>
82 #include <sys/mcache.h>
83 #include <sys/socketvar.h>
84 #include <sys/kdebug.h>
85 #include <mach/mach_time.h>
86 #include <mach/sdt.h>
87 
88 #include <machine/endian.h>
89 #include <dev/random/randomdev.h>
90 
91 #include <kern/queue.h>
92 #include <kern/locks.h>
93 #include <libkern/OSAtomic.h>
94 
95 #include <pexpert/pexpert.h>
96 
97 #include <net/if.h>
98 #include <net/if_var.h>
99 #include <net/if_dl.h>
100 #include <net/route.h>
101 #include <net/kpi_protocol.h>
102 #include <net/ntstat.h>
103 #include <net/dlil.h>
104 #include <net/classq/classq.h>
105 #include <net/net_perf.h>
106 #include <net/init.h>
107 #if PF
108 #include <net/pfvar.h>
109 #endif /* PF */
110 #include <net/if_ports_used.h>
111 
112 #include <netinet/in.h>
113 #include <netinet/in_systm.h>
114 #include <netinet/in_var.h>
115 #include <netinet/in_arp.h>
116 #include <netinet/ip.h>
117 #include <netinet/in_pcb.h>
118 #include <netinet/ip_var.h>
119 #include <netinet/ip_icmp.h>
120 #include <netinet/kpi_ipfilter_var.h>
121 #include <netinet/udp.h>
122 #include <netinet/udp_var.h>
123 #include <netinet/bootp.h>
124 
125 #if DUMMYNET
126 #include <netinet/ip_dummynet.h>
127 #endif /* DUMMYNET */
128 
129 #if IPSEC
130 #include <netinet6/ipsec.h>
131 #include <netkey/key.h>
132 #endif /* IPSEC */
133 
134 #include <os/log.h>
135 
136 #define DBG_LAYER_BEG           NETDBG_CODE(DBG_NETIP, 0)
137 #define DBG_LAYER_END           NETDBG_CODE(DBG_NETIP, 2)
138 #define DBG_FNC_IP_INPUT        NETDBG_CODE(DBG_NETIP, (2 << 8))
139 
140 #if IPSEC
141 extern int ipsec_bypass;
142 #endif /* IPSEC */
143 
144 MBUFQ_HEAD(fq_head);
145 
146 static int frag_timeout_run;            /* frag timer is scheduled to run */
147 static void frag_timeout(void *);
148 static void frag_sched_timeout(void);
149 
150 static struct ipq *ipq_alloc(int);
151 static void ipq_free(struct ipq *);
152 static void ipq_updateparams(void);
153 static void ip_input_second_pass(struct mbuf *, struct ifnet *,
154     int, int, struct ip_fw_in_args *);
155 
156 static LCK_GRP_DECLARE(ipqlock_grp, "ipqlock");
157 static LCK_MTX_DECLARE(ipqlock, &ipqlock_grp);
158 
159 
160 /* Packet reassembly stuff */
161 #define IPREASS_NHASH_LOG2      6
162 #define IPREASS_NHASH           (1 << IPREASS_NHASH_LOG2)
163 #define IPREASS_HMASK           (IPREASS_NHASH - 1)
164 #define IPREASS_HASH(x, y) \
165 	(((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
166 
167 /* IP fragment reassembly queues (protected by ipqlock) */
168 static TAILQ_HEAD(ipqhead, ipq) ipq[IPREASS_NHASH]; /* ip reassembly queues */
169 static int maxnipq;                     /* max packets in reass queues */
170 static u_int32_t maxfragsperpacket;     /* max frags/packet in reass queues */
171 static u_int32_t nipq;                  /* # of packets in reass queues */
172 static u_int32_t ipq_limit;             /* ipq allocation limit */
173 static u_int32_t ipq_count;             /* current # of allocated ipq's */
174 
175 static int sysctl_ipforwarding SYSCTL_HANDLER_ARGS;
176 static int sysctl_maxnipq SYSCTL_HANDLER_ARGS;
177 static int sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS;
178 
179 #if (DEBUG || DEVELOPMENT)
180 static int sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS;
181 static int sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS;
182 static int sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS;
183 #endif /* (DEBUG || DEVELOPMENT) */
184 
185 int ipforwarding = 0;
186 SYSCTL_PROC(_net_inet_ip, IPCTL_FORWARDING, forwarding,
187     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ipforwarding, 0,
188     sysctl_ipforwarding, "I", "Enable IP forwarding between interfaces");
189 
190 static int ipsendredirects = 1; /* XXX */
191 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect,
192     CTLFLAG_RW | CTLFLAG_LOCKED, &ipsendredirects, 0,
193     "Enable sending IP redirects");
194 
195 int ip_defttl = IPDEFTTL;
196 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW | CTLFLAG_LOCKED,
197     &ip_defttl, 0, "Maximum TTL on IP packets");
198 
199 static int ip_dosourceroute = 0;
200 SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute,
201     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_dosourceroute, 0,
202     "Enable forwarding source routed IP packets");
203 
204 static int ip_acceptsourceroute = 0;
205 SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
206     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_acceptsourceroute, 0,
207     "Enable accepting source routed IP packets");
208 
209 static int ip_sendsourcequench = 0;
210 SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench,
211     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_sendsourcequench, 0,
212     "Enable the transmission of source quench packets");
213 
214 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets,
215     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxnipq, 0, sysctl_maxnipq,
216     "I", "Maximum number of IPv4 fragment reassembly queue entries");
217 
218 SYSCTL_UINT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD | CTLFLAG_LOCKED,
219     &nipq, 0, "Current number of IPv4 fragment reassembly queue entries");
220 
221 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragsperpacket,
222     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxfragsperpacket, 0,
223     sysctl_maxfragsperpacket, "I",
224     "Maximum number of IPv4 fragments allowed per packet");
225 
226 static uint32_t ip_adj_clear_hwcksum = 0;
227 SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_clear_hwcksum,
228     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_clear_hwcksum, 0,
229     "Invalidate hwcksum info when adjusting length");
230 
231 static uint32_t ip_adj_partial_sum = 1;
232 SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_partial_sum,
233     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_partial_sum, 0,
234     "Perform partial sum adjustment of trailing bytes at IP layer");
235 
236 /*
237  * ip_checkinterface controls the receive side of the models for multihoming
238  * that are discussed in RFC 1122.
239  *
240  * ip_checkinterface values are:
241  *  IP_CHECKINTERFACE_WEAK_ES:
242  *	This corresponds to the Weak End-System model where incoming packets from
243  *	any interface are accepted provided the destination address of the incoming packet
244  *	is assigned to some interface.
245  *
246  *  IP_CHECKINTERFACE_HYBRID_ES:
247  *	The Hybrid End-System model use the Strong End-System for tunnel interfaces
248  *	(ipsec and utun) and the weak End-System model for other interfaces families.
249  *	This prevents a rogue middle box to probe for signs of TCP connections
250  *	that use the tunnel interface.
251  *
252  *  IP_CHECKINTERFACE_STRONG_ES:
253  *	The Strong model model requires the packet arrived on an interface that
254  *	is assigned the destination address of the packet.
255  *
256  * Since the routing table and transmit implementation do not implement the Strong ES model,
257  * setting this to a value different from IP_CHECKINTERFACE_WEAK_ES may lead to unexpected results.
258  *
259  * When forwarding is enabled, the system reverts to the Weak ES model as a router
260  * is expected by design to receive packets from several interfaces to the same address.
261  *
262  * XXX - ip_checkinterface currently must be set to IP_CHECKINTERFACE_WEAK_ES if you use ipnat
263  * to translate the destination address to another local interface.
264  *
265  * XXX - ip_checkinterface must be set to IP_CHECKINTERFACE_WEAK_ES if you add IP aliases
266  * to the loopback interface instead of the interface where the
267  * packets for those addresses are received.
268  */
269 #define IP_CHECKINTERFACE_WEAK_ES       0
270 #define IP_CHECKINTERFACE_HYBRID_ES     1
271 #define IP_CHECKINTERFACE_STRONG_ES     2
272 
273 static int ip_checkinterface = IP_CHECKINTERFACE_HYBRID_ES;
274 
275 static int sysctl_ip_checkinterface SYSCTL_HANDLER_ARGS;
276 SYSCTL_PROC(_net_inet_ip, OID_AUTO, check_interface,
277     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
278     0, 0, sysctl_ip_checkinterface, "I", "Verify packet arrives on correct interface");
279 
280 #if (DEBUG || DEVELOPMENT)
281 #define IP_CHECK_IF_DEBUG 1
282 #else
283 #define IP_CHECK_IF_DEBUG 0
284 #endif /* (DEBUG || DEVELOPMENT) */
285 static int ip_checkinterface_debug = IP_CHECK_IF_DEBUG;
286 SYSCTL_INT(_net_inet_ip, OID_AUTO, checkinterface_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
287     &ip_checkinterface_debug, IP_CHECK_IF_DEBUG, "");
288 
289 static int ip_chaining = 1;
290 SYSCTL_INT(_net_inet_ip, OID_AUTO, rx_chaining, CTLFLAG_RW | CTLFLAG_LOCKED,
291     &ip_chaining, 1, "Do receive side ip address based chaining");
292 
293 static int ip_chainsz = 6;
294 SYSCTL_INT(_net_inet_ip, OID_AUTO, rx_chainsz, CTLFLAG_RW | CTLFLAG_LOCKED,
295     &ip_chainsz, 1, "IP receive side max chaining");
296 
297 #if (DEBUG || DEVELOPMENT)
298 static int ip_input_measure = 0;
299 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf,
300     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
301     &ip_input_measure, 0, sysctl_reset_ip_input_stats, "I", "Do time measurement");
302 
303 static uint64_t ip_input_measure_bins = 0;
304 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_bins,
305     CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_input_measure_bins, 0,
306     sysctl_ip_input_measure_bins, "I",
307     "bins for chaining performance data histogram");
308 
309 static net_perf_t net_perf;
310 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_data,
311     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
312     0, 0, sysctl_ip_input_getperf, "S,net_perf",
313     "IP input performance data (struct net_perf, net/net_perf.h)");
314 #endif /* (DEBUG || DEVELOPMENT) */
315 
316 #if DIAGNOSTIC
317 static int ipprintfs = 0;
318 #endif
319 
320 struct protosw *ip_protox[IPPROTO_MAX];
321 
322 static LCK_GRP_DECLARE(in_ifaddr_rwlock_grp, "in_ifaddr_rwlock");
323 LCK_RW_DECLARE(in_ifaddr_rwlock, &in_ifaddr_rwlock_grp);
324 
325 /* Protected by in_ifaddr_rwlock */
326 struct in_ifaddrhead in_ifaddrhead;             /* first inet address */
327 struct in_ifaddrhashhead *in_ifaddrhashtbl;     /* inet addr hash table  */
328 
329 #define INADDR_NHASH    61
330 static uint32_t inaddr_nhash;                  /* hash table size */
331 static uint32_t inaddr_hashp;                  /* next largest prime */
332 
333 static int ip_getstat SYSCTL_HANDLER_ARGS;
334 struct ipstat ipstat;
335 SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats,
336     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
337     0, 0, ip_getstat, "S,ipstat",
338     "IP statistics (struct ipstat, netinet/ip_var.h)");
339 
340 #if IPCTL_DEFMTU
341 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW | CTLFLAG_LOCKED,
342     &ip_mtu, 0, "Default MTU");
343 #endif /* IPCTL_DEFMTU */
344 
345 #if IPSTEALTH
346 static int      ipstealth = 0;
347 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW | CTLFLAG_LOCKED,
348     &ipstealth, 0, "");
349 #endif /* IPSTEALTH */
350 
351 #if DUMMYNET
352 ip_dn_io_t *ip_dn_io_ptr;
353 #endif /* DUMMYNET */
354 
355 SYSCTL_NODE(_net_inet_ip, OID_AUTO, linklocal,
356     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local");
357 
358 struct ip_linklocal_stat ip_linklocal_stat;
359 SYSCTL_STRUCT(_net_inet_ip_linklocal, OID_AUTO, stat,
360     CTLFLAG_RD | CTLFLAG_LOCKED, &ip_linklocal_stat, ip_linklocal_stat,
361     "Number of link local packets with TTL less than 255");
362 
363 SYSCTL_NODE(_net_inet_ip_linklocal, OID_AUTO, in,
364     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local input");
365 
366 int ip_linklocal_in_allowbadttl = 1;
367 SYSCTL_INT(_net_inet_ip_linklocal_in, OID_AUTO, allowbadttl,
368     CTLFLAG_RW | CTLFLAG_LOCKED, &ip_linklocal_in_allowbadttl, 0,
369     "Allow incoming link local packets with TTL less than 255");
370 
371 
372 /*
373  * We need to save the IP options in case a protocol wants to respond
374  * to an incoming packet over the same route if the packet got here
375  * using IP source routing.  This allows connection establishment and
376  * maintenance when the remote end is on a network that is not known
377  * to us.
378  */
379 static int      ip_nhops = 0;
380 static  struct ip_srcrt {
381 	struct  in_addr dst;                    /* final destination */
382 	char    nop;                            /* one NOP to align */
383 	char    srcopt[IPOPT_OFFSET + 1];       /* OPTVAL, OLEN and OFFSET */
384 	struct  in_addr route[MAX_IPOPTLEN / sizeof(struct in_addr)];
385 } ip_srcrt;
386 
387 static void in_ifaddrhashtbl_init(void);
388 static void save_rte(u_char *, struct in_addr);
389 static int ip_dooptions(struct mbuf *, int, struct sockaddr_in *);
390 static void ip_forward(struct mbuf *, int, struct sockaddr_in *);
391 static void frag_freef(struct ipqhead *, struct ipq *);
392 static struct mbuf *ip_reass(struct mbuf *);
393 static void ip_fwd_route_copyout(struct ifnet *, struct route *);
394 static void ip_fwd_route_copyin(struct ifnet *, struct route *);
395 static inline u_short ip_cksum(struct mbuf *, int);
396 
397 /*
398  * On platforms which require strict alignment (currently for anything but
399  * i386 or x86_64), check if the IP header pointer is 32-bit aligned; if not,
400  * copy the contents of the mbuf chain into a new chain, and free the original
401  * one.  Create some head room in the first mbuf of the new chain, in case
402  * it's needed later on.
403  */
404 #if defined(__i386__) || defined(__x86_64__)
405 #define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0)
406 #else /* !__i386__ && !__x86_64__ */
407 #define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do {                  \
408 	if (!IP_HDR_ALIGNED_P(mtod(_m, caddr_t))) {                     \
409 	        struct mbuf *_n;                                        \
410 	        struct ifnet *__ifp = (_ifp);                           \
411 	        atomic_add_64(&(__ifp)->if_alignerrs, 1);               \
412 	        if (((_m)->m_flags & M_PKTHDR) &&                       \
413 	            (_m)->m_pkthdr.pkt_hdr != NULL)                     \
414 	                (_m)->m_pkthdr.pkt_hdr = NULL;                  \
415 	        _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT);        \
416 	        if (_n == NULL) {                                       \
417 	                atomic_add_32(&ipstat.ips_toosmall, 1);         \
418 	                m_freem(_m);                                    \
419 	                (_m) = NULL;                                    \
420 	                _action;                                        \
421 	        } else {                                                \
422 	                VERIFY(_n != (_m));                             \
423 	                (_m) = _n;                                      \
424 	        }                                                       \
425 	}                                                               \
426 } while (0)
427 #endif /* !__i386__ && !__x86_64__ */
428 
429 
430 typedef enum ip_check_if_result {
431 	IP_CHECK_IF_NONE = 0,
432 	IP_CHECK_IF_OURS = 1,
433 	IP_CHECK_IF_DROP = 2,
434 	IP_CHECK_IF_FORWARD = 3
435 } ip_check_if_result_t;
436 
437 static ip_check_if_result_t ip_input_check_interface(struct mbuf **, struct ip *, struct ifnet *);
438 
439 /*
440  * GRE input handler function, settable via ip_gre_register_input() for PPTP.
441  */
442 static gre_input_func_t gre_input_func;
443 
444 static void
ip_init_delayed(void)445 ip_init_delayed(void)
446 {
447 	struct ifreq ifr;
448 	int error;
449 	struct sockaddr_in *sin;
450 
451 	bzero(&ifr, sizeof(ifr));
452 	strlcpy(ifr.ifr_name, "lo0", sizeof(ifr.ifr_name));
453 	sin = (struct sockaddr_in *)(void *)&ifr.ifr_addr;
454 	sin->sin_len = sizeof(struct sockaddr_in);
455 	sin->sin_family = AF_INET;
456 	sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
457 	error = in_control(NULL, SIOCSIFADDR, (caddr_t)&ifr, lo_ifp, kernproc);
458 	if (error) {
459 		printf("%s: failed to initialise lo0's address, error=%d\n",
460 		    __func__, error);
461 	}
462 }
463 
464 /*
465  * IP initialization: fill in IP protocol switch table.
466  * All protocols not implemented in kernel go to raw IP protocol handler.
467  */
468 void
ip_init(struct protosw * pp,struct domain * dp)469 ip_init(struct protosw *pp, struct domain *dp)
470 {
471 	static int ip_initialized = 0;
472 	struct protosw *pr;
473 	struct timeval tv;
474 	int i;
475 
476 	domain_proto_mtx_lock_assert_held();
477 	VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
478 
479 	/* ipq_alloc() uses mbufs for IP fragment queue structures */
480 	_CASSERT(sizeof(struct ipq) <= _MLEN);
481 
482 	/*
483 	 * Some ioctls (e.g. SIOCAIFADDR) use ifaliasreq struct, which is
484 	 * interchangeable with in_aliasreq; they must have the same size.
485 	 */
486 	_CASSERT(sizeof(struct ifaliasreq) == sizeof(struct in_aliasreq));
487 
488 	if (ip_initialized) {
489 		return;
490 	}
491 	ip_initialized = 1;
492 
493 	in_ifaddr_init();
494 
495 	TAILQ_INIT(&in_ifaddrhead);
496 	in_ifaddrhashtbl_init();
497 
498 	ip_moptions_init();
499 
500 	pr = pffindproto_locked(PF_INET, IPPROTO_RAW, SOCK_RAW);
501 	if (pr == NULL) {
502 		panic("%s: Unable to find [PF_INET,IPPROTO_RAW,SOCK_RAW]",
503 		    __func__);
504 		/* NOTREACHED */
505 	}
506 
507 	/* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
508 	for (i = 0; i < IPPROTO_MAX; i++) {
509 		ip_protox[i] = pr;
510 	}
511 	/*
512 	 * Cycle through IP protocols and put them into the appropriate place
513 	 * in ip_protox[], skipping protocols IPPROTO_{IP,RAW}.
514 	 */
515 	VERIFY(dp == inetdomain && dp->dom_family == PF_INET);
516 	TAILQ_FOREACH(pr, &dp->dom_protosw, pr_entry) {
517 		VERIFY(pr->pr_domain == dp);
518 		if (pr->pr_protocol != 0 && pr->pr_protocol != IPPROTO_RAW) {
519 			/* Be careful to only index valid IP protocols. */
520 			if (pr->pr_protocol < IPPROTO_MAX) {
521 				ip_protox[pr->pr_protocol] = pr;
522 			}
523 		}
524 	}
525 
526 	lck_mtx_lock(&ipqlock);
527 	/* Initialize IP reassembly queue. */
528 	for (i = 0; i < IPREASS_NHASH; i++) {
529 		TAILQ_INIT(&ipq[i]);
530 	}
531 
532 	maxnipq = nmbclusters / 32;
533 	maxfragsperpacket = 128; /* enough for 64k in 512 byte fragments */
534 	ipq_updateparams();
535 	lck_mtx_unlock(&ipqlock);
536 
537 	getmicrotime(&tv);
538 	ip_id = (u_short)(RandomULong() ^ tv.tv_usec);
539 
540 	PE_parse_boot_argn("ip_checkinterface", &i, sizeof(i));
541 	switch (i) {
542 	case IP_CHECKINTERFACE_WEAK_ES:
543 	case IP_CHECKINTERFACE_HYBRID_ES:
544 	case IP_CHECKINTERFACE_STRONG_ES:
545 		ip_checkinterface = i;
546 		break;
547 	default:
548 		break;
549 	}
550 
551 	arp_init();
552 	net_init_add(ip_init_delayed);
553 }
554 
555 /*
556  * Initialize IPv4 source address hash table.
557  */
558 static void
in_ifaddrhashtbl_init(void)559 in_ifaddrhashtbl_init(void)
560 {
561 	int i, k, p;
562 
563 	if (in_ifaddrhashtbl != NULL) {
564 		return;
565 	}
566 
567 	PE_parse_boot_argn("inaddr_nhash", &inaddr_nhash,
568 	    sizeof(inaddr_nhash));
569 	if (inaddr_nhash == 0) {
570 		inaddr_nhash = INADDR_NHASH;
571 	}
572 
573 	in_ifaddrhashtbl = zalloc_permanent(
574 		inaddr_nhash * sizeof(*in_ifaddrhashtbl),
575 		ZALIGN_PTR);
576 
577 	/*
578 	 * Generate the next largest prime greater than inaddr_nhash.
579 	 */
580 	k = (inaddr_nhash % 2 == 0) ? inaddr_nhash + 1 : inaddr_nhash + 2;
581 	for (;;) {
582 		p = 1;
583 		for (i = 3; i * i <= k; i += 2) {
584 			if (k % i == 0) {
585 				p = 0;
586 			}
587 		}
588 		if (p == 1) {
589 			break;
590 		}
591 		k += 2;
592 	}
593 	inaddr_hashp = k;
594 }
595 
596 uint32_t
inaddr_hashval(uint32_t key)597 inaddr_hashval(uint32_t key)
598 {
599 	/*
600 	 * The hash index is the computed prime times the key modulo
601 	 * the hash size, as documented in "Introduction to Algorithms"
602 	 * (Cormen, Leiserson, Rivest).
603 	 */
604 	if (inaddr_nhash > 1) {
605 		return (key * inaddr_hashp) % inaddr_nhash;
606 	} else {
607 		return 0;
608 	}
609 }
610 
611 struct in_ifaddrhashhead *
inaddr_hashlookup(uint32_t key)612 inaddr_hashlookup(uint32_t key)
613 {
614 	return &in_ifaddrhashtbl[inaddr_hashval(key)];
615 }
616 
617 __private_extern__ void
ip_proto_dispatch_in(struct mbuf * m,int hlen,u_int8_t proto,ipfilter_t inject_ipfref)618 ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto,
619     ipfilter_t inject_ipfref)
620 {
621 	struct ipfilter *filter;
622 	int seen = (inject_ipfref == NULL);
623 	int     changed_header = 0;
624 	struct ip *ip;
625 	void (*pr_input)(struct mbuf *, int len);
626 
627 	if (!TAILQ_EMPTY(&ipv4_filters)) {
628 		ipf_ref();
629 		TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
630 			if (seen == 0) {
631 				if ((struct ipfilter *)inject_ipfref == filter) {
632 					seen = 1;
633 				}
634 			} else if (filter->ipf_filter.ipf_input) {
635 				errno_t result;
636 
637 				if (changed_header == 0) {
638 					/*
639 					 * Perform IP header alignment fixup,
640 					 * if needed, before passing packet
641 					 * into filter(s).
642 					 */
643 					IP_HDR_ALIGNMENT_FIXUP(m,
644 					    m->m_pkthdr.rcvif, ipf_unref());
645 
646 					/* ipf_unref() already called */
647 					if (m == NULL) {
648 						return;
649 					}
650 
651 					changed_header = 1;
652 					ip = mtod(m, struct ip *);
653 					ip->ip_len = htons(ip->ip_len + (uint16_t)hlen);
654 					ip->ip_off = htons(ip->ip_off);
655 					ip->ip_sum = 0;
656 					ip->ip_sum = ip_cksum_hdr_in(m, hlen);
657 				}
658 				result = filter->ipf_filter.ipf_input(
659 					filter->ipf_filter.cookie, (mbuf_t *)&m,
660 					hlen, proto);
661 				if (result == EJUSTRETURN) {
662 					ipf_unref();
663 					return;
664 				}
665 				if (result != 0) {
666 					ipf_unref();
667 					m_freem(m);
668 					return;
669 				}
670 			}
671 		}
672 		ipf_unref();
673 	}
674 
675 	/* Perform IP header alignment fixup (post-filters), if needed */
676 	IP_HDR_ALIGNMENT_FIXUP(m, m->m_pkthdr.rcvif, return );
677 
678 	ip = mtod(m, struct ip *);
679 
680 	if (changed_header) {
681 		ip->ip_len = ntohs(ip->ip_len) - (u_short)hlen;
682 		ip->ip_off = ntohs(ip->ip_off);
683 	}
684 
685 	/*
686 	 * If there isn't a specific lock for the protocol
687 	 * we're about to call, use the generic lock for AF_INET.
688 	 * otherwise let the protocol deal with its own locking
689 	 */
690 	if ((pr_input = ip_protox[ip->ip_p]->pr_input) == NULL) {
691 		m_freem(m);
692 	} else if (!(ip_protox[ip->ip_p]->pr_flags & PR_PROTOLOCK)) {
693 		lck_mtx_lock(inet_domain_mutex);
694 		pr_input(m, hlen);
695 		lck_mtx_unlock(inet_domain_mutex);
696 	} else {
697 		pr_input(m, hlen);
698 	}
699 }
700 
701 struct pktchain_elm {
702 	struct mbuf     *pkte_head;
703 	struct mbuf     *pkte_tail;
704 	struct in_addr  pkte_saddr;
705 	struct in_addr  pkte_daddr;
706 	uint16_t        pkte_npkts;
707 	uint16_t        pkte_proto;
708 	uint32_t        pkte_nbytes;
709 };
710 
711 typedef struct pktchain_elm pktchain_elm_t;
712 
713 /* Store upto PKTTBL_SZ unique flows on the stack */
714 #define PKTTBL_SZ       7
715 
716 static struct mbuf *
ip_chain_insert(struct mbuf * packet,pktchain_elm_t * tbl)717 ip_chain_insert(struct mbuf *packet, pktchain_elm_t *tbl)
718 {
719 	struct ip*      ip;
720 	int             pkttbl_idx = 0;
721 
722 	ip = mtod(packet, struct ip*);
723 
724 	/* reusing the hash function from inaddr_hashval */
725 	pkttbl_idx = inaddr_hashval(ntohl(ip->ip_src.s_addr)) % PKTTBL_SZ;
726 	if (tbl[pkttbl_idx].pkte_head == NULL) {
727 		tbl[pkttbl_idx].pkte_head = packet;
728 		tbl[pkttbl_idx].pkte_saddr.s_addr = ip->ip_src.s_addr;
729 		tbl[pkttbl_idx].pkte_daddr.s_addr = ip->ip_dst.s_addr;
730 		tbl[pkttbl_idx].pkte_proto = ip->ip_p;
731 	} else {
732 		if ((ip->ip_dst.s_addr == tbl[pkttbl_idx].pkte_daddr.s_addr) &&
733 		    (ip->ip_src.s_addr == tbl[pkttbl_idx].pkte_saddr.s_addr) &&
734 		    (ip->ip_p == tbl[pkttbl_idx].pkte_proto)) {
735 		} else {
736 			return packet;
737 		}
738 	}
739 	if (tbl[pkttbl_idx].pkte_tail != NULL) {
740 		mbuf_setnextpkt(tbl[pkttbl_idx].pkte_tail, packet);
741 	}
742 
743 	tbl[pkttbl_idx].pkte_tail = packet;
744 	tbl[pkttbl_idx].pkte_npkts += 1;
745 	tbl[pkttbl_idx].pkte_nbytes += packet->m_pkthdr.len;
746 	return NULL;
747 }
748 
749 /* args is a dummy variable here for backward compatibility */
750 static void
ip_input_second_pass_loop_tbl(pktchain_elm_t * tbl,struct ip_fw_in_args * args)751 ip_input_second_pass_loop_tbl(pktchain_elm_t *tbl, struct ip_fw_in_args *args)
752 {
753 	int i = 0;
754 
755 	for (i = 0; i < PKTTBL_SZ; i++) {
756 		if (tbl[i].pkte_head != NULL) {
757 			struct mbuf *m = tbl[i].pkte_head;
758 			ip_input_second_pass(m, m->m_pkthdr.rcvif,
759 			    tbl[i].pkte_npkts, tbl[i].pkte_nbytes, args);
760 
761 			if (tbl[i].pkte_npkts > 2) {
762 				ipstat.ips_rxc_chainsz_gt2++;
763 			}
764 			if (tbl[i].pkte_npkts > 4) {
765 				ipstat.ips_rxc_chainsz_gt4++;
766 			}
767 #if (DEBUG || DEVELOPMENT)
768 			if (ip_input_measure) {
769 				net_perf_histogram(&net_perf, tbl[i].pkte_npkts);
770 			}
771 #endif /* (DEBUG || DEVELOPMENT) */
772 			tbl[i].pkte_head = tbl[i].pkte_tail = NULL;
773 			tbl[i].pkte_npkts = 0;
774 			tbl[i].pkte_nbytes = 0;
775 			/* no need to initialize address and protocol in tbl */
776 		}
777 	}
778 }
779 
780 static void
ip_input_cpout_args(struct ip_fw_in_args * args,struct ip_fw_args * args1,boolean_t * done_init)781 ip_input_cpout_args(struct ip_fw_in_args *args, struct ip_fw_args *args1,
782     boolean_t *done_init)
783 {
784 	if (*done_init == FALSE) {
785 		bzero(args1, sizeof(struct ip_fw_args));
786 		*done_init = TRUE;
787 	}
788 	args1->fwa_pf_rule = args->fwai_pf_rule;
789 }
790 
791 static void
ip_input_cpin_args(struct ip_fw_args * args1,struct ip_fw_in_args * args)792 ip_input_cpin_args(struct ip_fw_args *args1, struct ip_fw_in_args *args)
793 {
794 	args->fwai_pf_rule = args1->fwa_pf_rule;
795 }
796 
797 typedef enum {
798 	IPINPUT_DOCHAIN = 0,
799 	IPINPUT_DONTCHAIN,
800 	IPINPUT_FREED,
801 	IPINPUT_DONE
802 } ipinput_chain_ret_t;
803 
804 static void
ip_input_update_nstat(struct ifnet * ifp,struct in_addr src_ip,u_int32_t packets,u_int32_t bytes)805 ip_input_update_nstat(struct ifnet *ifp, struct in_addr src_ip,
806     u_int32_t packets, u_int32_t bytes)
807 {
808 	if (nstat_collect) {
809 		struct rtentry *rt = ifnet_cached_rtlookup_inet(ifp,
810 		    src_ip);
811 		if (rt != NULL) {
812 			nstat_route_rx(rt, packets, bytes, 0);
813 			rtfree(rt);
814 		}
815 	}
816 }
817 
818 static void
ip_input_dispatch_chain(struct mbuf * m)819 ip_input_dispatch_chain(struct mbuf *m)
820 {
821 	struct mbuf *tmp_mbuf = m;
822 	struct mbuf *nxt_mbuf = NULL;
823 	struct ip *ip = NULL;
824 	unsigned int hlen;
825 
826 	ip = mtod(tmp_mbuf, struct ip *);
827 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
828 	while (tmp_mbuf != NULL) {
829 		nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
830 		mbuf_setnextpkt(tmp_mbuf, NULL);
831 		ip_proto_dispatch_in(tmp_mbuf, hlen, ip->ip_p, 0);
832 		tmp_mbuf = nxt_mbuf;
833 		if (tmp_mbuf) {
834 			ip = mtod(tmp_mbuf, struct ip *);
835 			/* first mbuf of chain already has adjusted ip_len */
836 			hlen = IP_VHL_HL(ip->ip_vhl) << 2;
837 			ip->ip_len -= hlen;
838 		}
839 	}
840 }
841 
842 static void
ip_input_setdst_chain(struct mbuf * m,uint16_t ifindex,struct in_ifaddr * ia)843 ip_input_setdst_chain(struct mbuf *m, uint16_t ifindex, struct in_ifaddr *ia)
844 {
845 	struct mbuf *tmp_mbuf = m;
846 
847 	while (tmp_mbuf != NULL) {
848 		ip_setdstifaddr_info(tmp_mbuf, ifindex, ia);
849 		tmp_mbuf = mbuf_nextpkt(tmp_mbuf);
850 	}
851 }
852 
853 static void
ip_input_adjust(struct mbuf * m,struct ip * ip,struct ifnet * inifp)854 ip_input_adjust(struct mbuf *m, struct ip *ip, struct ifnet *inifp)
855 {
856 	boolean_t adjust = TRUE;
857 
858 	ASSERT(m_pktlen(m) > ip->ip_len);
859 
860 	/*
861 	 * Invalidate hardware checksum info if ip_adj_clear_hwcksum
862 	 * is set; useful to handle buggy drivers.  Note that this
863 	 * should not be enabled by default, as we may get here due
864 	 * to link-layer padding.
865 	 */
866 	if (ip_adj_clear_hwcksum &&
867 	    (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) &&
868 	    !(inifp->if_flags & IFF_LOOPBACK) &&
869 	    !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
870 		m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
871 		m->m_pkthdr.csum_data = 0;
872 		ipstat.ips_adj_hwcsum_clr++;
873 	}
874 
875 	/*
876 	 * If partial checksum information is available, subtract
877 	 * out the partial sum of postpended extraneous bytes, and
878 	 * update the checksum metadata accordingly.  By doing it
879 	 * here, the upper layer transport only needs to adjust any
880 	 * prepended extraneous bytes (else it will do both.)
881 	 */
882 	if (ip_adj_partial_sum &&
883 	    (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
884 	    (CSUM_DATA_VALID | CSUM_PARTIAL)) {
885 		m->m_pkthdr.csum_rx_val = m_adj_sum16(m,
886 		    m->m_pkthdr.csum_rx_start, m->m_pkthdr.csum_rx_start,
887 		    (ip->ip_len - m->m_pkthdr.csum_rx_start),
888 		    m->m_pkthdr.csum_rx_val);
889 	} else if ((m->m_pkthdr.csum_flags &
890 	    (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
891 	    (CSUM_DATA_VALID | CSUM_PARTIAL)) {
892 		/*
893 		 * If packet has partial checksum info and we decided not
894 		 * to subtract the partial sum of postpended extraneous
895 		 * bytes here (not the default case), leave that work to
896 		 * be handled by the other layers.  For now, only TCP, UDP
897 		 * layers are capable of dealing with this.  For all other
898 		 * protocols (including fragments), trim and ditch the
899 		 * partial sum as those layers might not implement partial
900 		 * checksumming (or adjustment) at all.
901 		 */
902 		if ((ip->ip_off & (IP_MF | IP_OFFMASK)) == 0 &&
903 		    (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_UDP)) {
904 			adjust = FALSE;
905 		} else {
906 			m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
907 			m->m_pkthdr.csum_data = 0;
908 			ipstat.ips_adj_hwcsum_clr++;
909 		}
910 	}
911 
912 	if (adjust) {
913 		ipstat.ips_adj++;
914 		if (m->m_len == m->m_pkthdr.len) {
915 			m->m_len = ip->ip_len;
916 			m->m_pkthdr.len = ip->ip_len;
917 		} else {
918 			m_adj(m, ip->ip_len - m->m_pkthdr.len);
919 		}
920 	}
921 }
922 
923 /*
924  * First pass does all essential packet validation and places on a per flow
925  * queue for doing operations that have same outcome for all packets of a flow.
926  */
927 static ipinput_chain_ret_t
ip_input_first_pass(struct mbuf * m,struct ip_fw_in_args * args,struct mbuf ** modm)928 ip_input_first_pass(struct mbuf *m, struct ip_fw_in_args *args, struct mbuf **modm)
929 {
930 	struct ip       *ip;
931 	struct ifnet    *inifp;
932 	unsigned int    hlen;
933 	int             retval = IPINPUT_DOCHAIN;
934 	int             len = 0;
935 	struct in_addr  src_ip;
936 #if DUMMYNET
937 	struct m_tag            *copy;
938 	struct m_tag            *p;
939 	boolean_t               delete = FALSE;
940 	struct ip_fw_args       args1;
941 	boolean_t               init = FALSE;
942 #endif /* DUMMYNET */
943 	ipfilter_t inject_filter_ref = NULL;
944 
945 	/* Check if the mbuf is still valid after interface filter processing */
946 	MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
947 	inifp = mbuf_pkthdr_rcvif(m);
948 	VERIFY(inifp != NULL);
949 
950 	/* Perform IP header alignment fixup, if needed */
951 	IP_HDR_ALIGNMENT_FIXUP(m, inifp, goto bad);
952 
953 	m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
954 
955 #if DUMMYNET
956 	/*
957 	 * Don't bother searching for tag(s) if there's none.
958 	 */
959 	if (SLIST_EMPTY(&m->m_pkthdr.tags)) {
960 		goto ipfw_tags_done;
961 	}
962 
963 	/* Grab info from mtags prepended to the chain */
964 	p = m_tag_first(m);
965 	while (p) {
966 		if (p->m_tag_id == KERNEL_MODULE_TAG_ID) {
967 			if (p->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET) {
968 				struct dn_pkt_tag *dn_tag;
969 
970 				dn_tag = (struct dn_pkt_tag *)(p + 1);
971 				args->fwai_pf_rule = dn_tag->dn_pf_rule;
972 				delete = TRUE;
973 			}
974 
975 			if (delete) {
976 				copy = p;
977 				p = m_tag_next(m, p);
978 				m_tag_delete(m, copy);
979 			} else {
980 				p = m_tag_next(m, p);
981 			}
982 		} else {
983 			p = m_tag_next(m, p);
984 		}
985 	}
986 
987 #if DIAGNOSTIC
988 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
989 		panic("ip_input no HDR");
990 	}
991 #endif
992 
993 	if (args->fwai_pf_rule) {
994 		/* dummynet already filtered us */
995 		ip = mtod(m, struct ip *);
996 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
997 		inject_filter_ref = ipf_get_inject_filter(m);
998 		if (args->fwai_pf_rule) {
999 			goto check_with_pf;
1000 		}
1001 	}
1002 ipfw_tags_done:
1003 #endif /* DUMMYNET */
1004 
1005 	/*
1006 	 * No need to process packet twice if we've already seen it.
1007 	 */
1008 	if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
1009 		inject_filter_ref = ipf_get_inject_filter(m);
1010 	}
1011 	if (inject_filter_ref != NULL) {
1012 		ip = mtod(m, struct ip *);
1013 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1014 
1015 		DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1016 		    struct ip *, ip, struct ifnet *, inifp,
1017 		    struct ip *, ip, struct ip6_hdr *, NULL);
1018 
1019 		ip->ip_len = ntohs(ip->ip_len) - (u_short)hlen;
1020 		ip->ip_off = ntohs(ip->ip_off);
1021 		ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref);
1022 		return IPINPUT_DONE;
1023 	}
1024 
1025 	if (__improbable(m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
1026 		if_ports_used_match_mbuf(inifp, PF_INET, m);
1027 	}
1028 
1029 	if (m->m_pkthdr.len < sizeof(struct ip)) {
1030 		OSAddAtomic(1, &ipstat.ips_total);
1031 		OSAddAtomic(1, &ipstat.ips_tooshort);
1032 		m_freem(m);
1033 		return IPINPUT_FREED;
1034 	}
1035 
1036 	if (m->m_len < sizeof(struct ip) &&
1037 	    (m = m_pullup(m, sizeof(struct ip))) == NULL) {
1038 		OSAddAtomic(1, &ipstat.ips_total);
1039 		OSAddAtomic(1, &ipstat.ips_toosmall);
1040 		return IPINPUT_FREED;
1041 	}
1042 
1043 	ip = mtod(m, struct ip *);
1044 	*modm = m;
1045 
1046 	KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
1047 	    ip->ip_p, ip->ip_off, ip->ip_len);
1048 
1049 	if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
1050 		OSAddAtomic(1, &ipstat.ips_total);
1051 		OSAddAtomic(1, &ipstat.ips_badvers);
1052 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1053 		m_freem(m);
1054 		return IPINPUT_FREED;
1055 	}
1056 
1057 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1058 	if (hlen < sizeof(struct ip)) {
1059 		OSAddAtomic(1, &ipstat.ips_total);
1060 		OSAddAtomic(1, &ipstat.ips_badhlen);
1061 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1062 		m_freem(m);
1063 		return IPINPUT_FREED;
1064 	}
1065 
1066 	if (hlen > m->m_len) {
1067 		if ((m = m_pullup(m, hlen)) == NULL) {
1068 			OSAddAtomic(1, &ipstat.ips_total);
1069 			OSAddAtomic(1, &ipstat.ips_badhlen);
1070 			KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1071 			return IPINPUT_FREED;
1072 		}
1073 		ip = mtod(m, struct ip *);
1074 		*modm = m;
1075 	}
1076 
1077 	if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_ECT1) {
1078 		m->m_pkthdr.pkt_ext_flags |= PKTF_EXT_L4S;
1079 	}
1080 
1081 	/* 127/8 must not appear on wire - RFC1122 */
1082 	if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
1083 	    (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
1084 		/*
1085 		 * Allow for the following exceptions:
1086 		 *
1087 		 *   1. If the packet was sent to loopback (i.e. rcvif
1088 		 *      would have been set earlier at output time.)
1089 		 *
1090 		 *   2. If the packet was sent out on loopback from a local
1091 		 *      source address which belongs to a non-loopback
1092 		 *      interface (i.e. rcvif may not necessarily be a
1093 		 *      loopback interface, hence the test for PKTF_LOOP.)
1094 		 *      Unlike IPv6, there is no interface scope ID, and
1095 		 *      therefore we don't care so much about PKTF_IFINFO.
1096 		 */
1097 		if (!(inifp->if_flags & IFF_LOOPBACK) &&
1098 		    !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1099 			OSAddAtomic(1, &ipstat.ips_total);
1100 			OSAddAtomic(1, &ipstat.ips_badaddr);
1101 			KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1102 			m_freem(m);
1103 			return IPINPUT_FREED;
1104 		}
1105 	}
1106 
1107 	/* IPv4 Link-Local Addresses as defined in RFC3927 */
1108 	if ((IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
1109 	    IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)))) {
1110 		ip_linklocal_stat.iplls_in_total++;
1111 		if (ip->ip_ttl != MAXTTL) {
1112 			OSAddAtomic(1, &ip_linklocal_stat.iplls_in_badttl);
1113 			/* Silently drop link local traffic with bad TTL */
1114 			if (!ip_linklocal_in_allowbadttl) {
1115 				OSAddAtomic(1, &ipstat.ips_total);
1116 				KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1117 				m_freem(m);
1118 				return IPINPUT_FREED;
1119 			}
1120 		}
1121 	}
1122 
1123 	if (ip_cksum(m, hlen)) {
1124 		OSAddAtomic(1, &ipstat.ips_total);
1125 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1126 		m_freem(m);
1127 		return IPINPUT_FREED;
1128 	}
1129 
1130 	DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1131 	    struct ip *, ip, struct ifnet *, inifp,
1132 	    struct ip *, ip, struct ip6_hdr *, NULL);
1133 
1134 	/*
1135 	 * Convert fields to host representation.
1136 	 */
1137 #if BYTE_ORDER != BIG_ENDIAN
1138 	NTOHS(ip->ip_len);
1139 #endif
1140 
1141 	if (ip->ip_len < hlen) {
1142 		OSAddAtomic(1, &ipstat.ips_total);
1143 		OSAddAtomic(1, &ipstat.ips_badlen);
1144 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1145 		m_freem(m);
1146 		return IPINPUT_FREED;
1147 	}
1148 
1149 #if BYTE_ORDER != BIG_ENDIAN
1150 	NTOHS(ip->ip_off);
1151 #endif
1152 
1153 	/*
1154 	 * Check that the amount of data in the buffers
1155 	 * is as at least much as the IP header would have us expect.
1156 	 * Trim mbufs if longer than we expect.
1157 	 * Drop packet if shorter than we expect.
1158 	 */
1159 	if (m->m_pkthdr.len < ip->ip_len) {
1160 		OSAddAtomic(1, &ipstat.ips_total);
1161 		OSAddAtomic(1, &ipstat.ips_tooshort);
1162 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1163 		m_freem(m);
1164 		return IPINPUT_FREED;
1165 	}
1166 
1167 	if (m->m_pkthdr.len > ip->ip_len) {
1168 		ip_input_adjust(m, ip, inifp);
1169 	}
1170 
1171 	/* for netstat route statistics */
1172 	src_ip = ip->ip_src;
1173 	len = m->m_pkthdr.len;
1174 
1175 #if DUMMYNET
1176 check_with_pf:
1177 #endif /* DUMMYNET */
1178 #if PF
1179 	/* Invoke inbound packet filter */
1180 	if (PF_IS_ENABLED) {
1181 		int error;
1182 		ip_input_cpout_args(args, &args1, &init);
1183 		ip = mtod(m, struct ip *);
1184 		src_ip = ip->ip_src;
1185 
1186 #if DUMMYNET
1187 		error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, &args1);
1188 #else
1189 		error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, NULL);
1190 #endif /* DUMMYNET */
1191 		if (error != 0 || m == NULL) {
1192 			if (m != NULL) {
1193 				panic("%s: unexpected packet %p",
1194 				    __func__, m);
1195 				/* NOTREACHED */
1196 			}
1197 			/* Already freed by callee */
1198 			ip_input_update_nstat(inifp, src_ip, 1, len);
1199 			KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1200 			OSAddAtomic(1, &ipstat.ips_total);
1201 			return IPINPUT_FREED;
1202 		}
1203 		ip = mtod(m, struct ip *);
1204 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1205 		*modm = m;
1206 		ip_input_cpin_args(&args1, args);
1207 	}
1208 #endif /* PF */
1209 
1210 #if IPSEC
1211 	if (ipsec_bypass == 0 && ipsec_gethist(m, NULL)) {
1212 		retval = IPINPUT_DONTCHAIN; /* XXX scope for chaining here? */
1213 		goto pass;
1214 	}
1215 #endif
1216 
1217 #if IPSEC
1218 pass:
1219 #endif
1220 	/*
1221 	 * Process options and, if not destined for us,
1222 	 * ship it on.  ip_dooptions returns 1 when an
1223 	 * error was detected (causing an icmp message
1224 	 * to be sent and the original packet to be freed).
1225 	 */
1226 	ip_nhops = 0;           /* for source routed packets */
1227 	if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) {
1228 		src_ip = ip->ip_src;
1229 		ip_input_update_nstat(inifp, src_ip, 1, len);
1230 		KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1231 		OSAddAtomic(1, &ipstat.ips_total);
1232 		return IPINPUT_FREED;
1233 	}
1234 
1235 	/*
1236 	 * Don't chain fragmented packets
1237 	 */
1238 	if (ip->ip_off & ~(IP_DF | IP_RF)) {
1239 		return IPINPUT_DONTCHAIN;
1240 	}
1241 
1242 	/* Allow DHCP/BootP responses through */
1243 	if ((inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
1244 	    hlen == sizeof(struct ip) && ip->ip_p == IPPROTO_UDP) {
1245 		struct udpiphdr *ui;
1246 
1247 		if (m->m_len < sizeof(struct udpiphdr) &&
1248 		    (m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) {
1249 			OSAddAtomic(1, &udpstat.udps_hdrops);
1250 			KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1251 			OSAddAtomic(1, &ipstat.ips_total);
1252 			return IPINPUT_FREED;
1253 		}
1254 		*modm = m;
1255 		ui = mtod(m, struct udpiphdr *);
1256 		if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
1257 			ip_setdstifaddr_info(m, inifp->if_index, NULL);
1258 			return IPINPUT_DONTCHAIN;
1259 		}
1260 	}
1261 
1262 	/* Avoid chaining raw sockets as ipsec checks occur later for them */
1263 	if (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR) {
1264 		return IPINPUT_DONTCHAIN;
1265 	}
1266 
1267 	return retval;
1268 #if !defined(__i386__) && !defined(__x86_64__)
1269 bad:
1270 	m_freem(m);
1271 	return IPINPUT_FREED;
1272 #endif
1273 }
1274 
1275 /*
1276  * Because the call to m_pullup() may freem the mbuf, the function frees the mbuf packet
1277  * chain before it return IP_CHECK_IF_DROP
1278  */
1279 static ip_check_if_result_t
ip_input_check_interface(struct mbuf ** mp,struct ip * ip,struct ifnet * inifp)1280 ip_input_check_interface(struct mbuf **mp, struct ip *ip, struct ifnet *inifp)
1281 {
1282 	struct mbuf *m = *mp;
1283 	struct in_ifaddr *ia = NULL;
1284 	struct in_ifaddr *best_ia = NULL;
1285 	struct ifnet *match_ifp = NULL;
1286 	ip_check_if_result_t result = IP_CHECK_IF_NONE;
1287 
1288 	/*
1289 	 * Host broadcast and all network broadcast addresses are always a match
1290 	 */
1291 	if (ip->ip_dst.s_addr == (u_int32_t)INADDR_BROADCAST ||
1292 	    ip->ip_dst.s_addr == INADDR_ANY) {
1293 		ip_input_setdst_chain(m, inifp->if_index, NULL);
1294 		return IP_CHECK_IF_OURS;
1295 	}
1296 
1297 	/*
1298 	 * Check for a match in the hash bucket.
1299 	 */
1300 	lck_rw_lock_shared(&in_ifaddr_rwlock);
1301 	TAILQ_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
1302 		if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr) {
1303 			best_ia = ia;
1304 			match_ifp = best_ia->ia_ifp;
1305 
1306 			if (ia->ia_ifp == inifp || (inifp->if_flags & IFF_LOOPBACK) ||
1307 			    (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1308 				/*
1309 				 * A locally originated packet or packet from the loopback
1310 				 * interface is always an exact interface address match
1311 				 */
1312 				match_ifp = inifp;
1313 				break;
1314 			}
1315 			/*
1316 			 * Continue the loop in case there's a exact match with another
1317 			 * interface
1318 			 */
1319 		}
1320 	}
1321 	if (best_ia != NULL) {
1322 		if (match_ifp != inifp && ipforwarding == 0 &&
1323 		    ((ip_checkinterface == IP_CHECKINTERFACE_HYBRID_ES &&
1324 		    (match_ifp->if_family == IFNET_FAMILY_IPSEC ||
1325 		    match_ifp->if_family == IFNET_FAMILY_UTUN)) ||
1326 		    ip_checkinterface == IP_CHECKINTERFACE_STRONG_ES)) {
1327 			/*
1328 			 * Drop when interface address check is strict and forwarding
1329 			 * is disabled
1330 			 */
1331 			result = IP_CHECK_IF_DROP;
1332 		} else {
1333 			result = IP_CHECK_IF_OURS;
1334 			ip_input_setdst_chain(m, 0, best_ia);
1335 		}
1336 	}
1337 	lck_rw_done(&in_ifaddr_rwlock);
1338 
1339 	if (result == IP_CHECK_IF_NONE && (inifp->if_flags & IFF_BROADCAST)) {
1340 		/*
1341 		 * Check for broadcast addresses.
1342 		 *
1343 		 * Only accept broadcast packets that arrive via the matching
1344 		 * interface.  Reception of forwarded directed broadcasts would be
1345 		 * handled via ip_forward() and ether_frameout() with the loopback
1346 		 * into the stack for SIMPLEX interfaces handled by ether_frameout().
1347 		 */
1348 		struct ifaddr *ifa;
1349 
1350 		ifnet_lock_shared(inifp);
1351 		TAILQ_FOREACH(ifa, &inifp->if_addrhead, ifa_link) {
1352 			if (ifa->ifa_addr->sa_family != AF_INET) {
1353 				continue;
1354 			}
1355 			ia = ifatoia(ifa);
1356 			if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == ip->ip_dst.s_addr ||
1357 			    ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) {
1358 				ip_input_setdst_chain(m, 0, ia);
1359 				result = IP_CHECK_IF_OURS;
1360 				match_ifp = inifp;
1361 				break;
1362 			}
1363 		}
1364 		ifnet_lock_done(inifp);
1365 	}
1366 
1367 	/* Allow DHCP/BootP responses through */
1368 	if (result == IP_CHECK_IF_NONE && (inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
1369 	    ip->ip_p == IPPROTO_UDP && (IP_VHL_HL(ip->ip_vhl) << 2) == sizeof(struct ip)) {
1370 		struct udpiphdr *ui;
1371 
1372 		if (m->m_len < sizeof(struct udpiphdr)) {
1373 			if ((m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) {
1374 				OSAddAtomic(1, &udpstat.udps_hdrops);
1375 				*mp = NULL;
1376 				return IP_CHECK_IF_DROP;
1377 			}
1378 			/*
1379 			 * m_pullup can return a different mbuf
1380 			 */
1381 			*mp = m;
1382 			ip = mtod(m, struct ip *);
1383 		}
1384 		ui = mtod(m, struct udpiphdr *);
1385 		if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
1386 			ip_input_setdst_chain(m, inifp->if_index, NULL);
1387 			result = IP_CHECK_IF_OURS;
1388 			match_ifp = inifp;
1389 		}
1390 	}
1391 
1392 	if (result == IP_CHECK_IF_NONE) {
1393 		if (ipforwarding == 0) {
1394 			result = IP_CHECK_IF_DROP;
1395 		} else {
1396 			result = IP_CHECK_IF_FORWARD;
1397 			ip_input_setdst_chain(m, inifp->if_index, NULL);
1398 		}
1399 	}
1400 
1401 	if (result == IP_CHECK_IF_OURS && match_ifp != inifp) {
1402 		ipstat.ips_rcv_if_weak_match++;
1403 
1404 		/*  Logging is too noisy when forwarding is enabled */
1405 		if (ip_checkinterface_debug != 0 && ipforwarding == 0) {
1406 			char src_str[MAX_IPv4_STR_LEN];
1407 			char dst_str[MAX_IPv4_STR_LEN];
1408 
1409 			inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str));
1410 			inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str));
1411 			os_log_info(OS_LOG_DEFAULT,
1412 			    "%s: weak ES interface match to %s for packet from %s to %s proto %u received via %s",
1413 			    __func__, best_ia->ia_ifp->if_xname, src_str, dst_str, ip->ip_p, inifp->if_xname);
1414 		}
1415 	} else if (result == IP_CHECK_IF_DROP) {
1416 		if (ip_checkinterface_debug > 0) {
1417 			char src_str[MAX_IPv4_STR_LEN];
1418 			char dst_str[MAX_IPv4_STR_LEN];
1419 
1420 			inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str));
1421 			inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str));
1422 			os_log(OS_LOG_DEFAULT,
1423 			    "%s: no interface match for packet from %s to %s proto %u received via %s",
1424 			    __func__, src_str, dst_str, ip->ip_p, inifp->if_xname);
1425 		}
1426 		struct mbuf *tmp_mbuf = m;
1427 		while (tmp_mbuf != NULL) {
1428 			ipstat.ips_rcv_if_no_match++;
1429 			tmp_mbuf = tmp_mbuf->m_nextpkt;
1430 		}
1431 		m_freem_list(m);
1432 		*mp = NULL;
1433 	}
1434 
1435 	return result;
1436 }
1437 
1438 static void
ip_input_second_pass(struct mbuf * m,struct ifnet * inifp,int npkts_in_chain,int bytes_in_chain,struct ip_fw_in_args * args)1439 ip_input_second_pass(struct mbuf *m, struct ifnet *inifp,
1440     int npkts_in_chain, int bytes_in_chain, struct ip_fw_in_args *args)
1441 {
1442 	struct mbuf             *tmp_mbuf = NULL;
1443 	unsigned int            hlen;
1444 
1445 #pragma unused (args)
1446 
1447 	struct ip *ip = mtod(m, struct ip *);
1448 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1449 
1450 	OSAddAtomic(npkts_in_chain, &ipstat.ips_total);
1451 
1452 	/*
1453 	 * Naively assume we can attribute inbound data to the route we would
1454 	 * use to send to this destination. Asymmetric routing breaks this
1455 	 * assumption, but it still allows us to account for traffic from
1456 	 * a remote node in the routing table.
1457 	 * this has a very significant performance impact so we bypass
1458 	 * if nstat_collect is disabled. We may also bypass if the
1459 	 * protocol is tcp in the future because tcp will have a route that
1460 	 * we can use to attribute the data to. That does mean we would not
1461 	 * account for forwarded tcp traffic.
1462 	 */
1463 	ip_input_update_nstat(inifp, ip->ip_src, npkts_in_chain,
1464 	    bytes_in_chain);
1465 
1466 	/*
1467 	 * Check our list of addresses, to see if the packet is for us.
1468 	 * If we don't have any addresses, assume any unicast packet
1469 	 * we receive might be for us (and let the upper layers deal
1470 	 * with it).
1471 	 */
1472 	tmp_mbuf = m;
1473 	if (TAILQ_EMPTY(&in_ifaddrhead)) {
1474 		while (tmp_mbuf != NULL) {
1475 			if (!(tmp_mbuf->m_flags & (M_MCAST | M_BCAST))) {
1476 				ip_setdstifaddr_info(tmp_mbuf, inifp->if_index,
1477 				    NULL);
1478 			}
1479 			tmp_mbuf = mbuf_nextpkt(tmp_mbuf);
1480 		}
1481 		goto ours;
1482 	}
1483 
1484 	/*
1485 	 * Enable a consistency check between the destination address
1486 	 * and the arrival interface for a unicast packet (the RFC 1122
1487 	 * strong ES model) if IP forwarding is disabled and the packet
1488 	 * is not locally generated
1489 	 *
1490 	 * XXX - Checking also should be disabled if the destination
1491 	 * address is ipnat'ed to a different interface.
1492 	 *
1493 	 * XXX - Checking is incompatible with IP aliases added
1494 	 * to the loopback interface instead of the interface where
1495 	 * the packets are received.
1496 	 */
1497 	if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
1498 		ip_check_if_result_t ip_check_if_result = IP_CHECK_IF_NONE;
1499 
1500 		ip_check_if_result = ip_input_check_interface(&m, ip, inifp);
1501 		ASSERT(ip_check_if_result != IP_CHECK_IF_NONE);
1502 		if (ip_check_if_result == IP_CHECK_IF_OURS) {
1503 			goto ours;
1504 		} else if (ip_check_if_result == IP_CHECK_IF_DROP) {
1505 			return;
1506 		}
1507 	} else {
1508 		struct in_multi *inm;
1509 		/*
1510 		 * See if we belong to the destination multicast group on the
1511 		 * arrival interface.
1512 		 */
1513 		in_multihead_lock_shared();
1514 		IN_LOOKUP_MULTI(&ip->ip_dst, inifp, inm);
1515 		in_multihead_lock_done();
1516 		if (inm == NULL) {
1517 			OSAddAtomic(npkts_in_chain, &ipstat.ips_notmember);
1518 			m_freem_list(m);
1519 			KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1520 			return;
1521 		}
1522 		ip_input_setdst_chain(m, inifp->if_index, NULL);
1523 		INM_REMREF(inm);
1524 		goto ours;
1525 	}
1526 
1527 	tmp_mbuf = m;
1528 	struct mbuf *nxt_mbuf = NULL;
1529 	while (tmp_mbuf != NULL) {
1530 		nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
1531 		/*
1532 		 * Not for us; forward if possible and desirable.
1533 		 */
1534 		mbuf_setnextpkt(tmp_mbuf, NULL);
1535 		if (ipforwarding == 0) {
1536 			OSAddAtomic(1, &ipstat.ips_cantforward);
1537 			m_freem(tmp_mbuf);
1538 		} else {
1539 			ip_forward(tmp_mbuf, 0, NULL);
1540 		}
1541 		tmp_mbuf = nxt_mbuf;
1542 	}
1543 	KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1544 	return;
1545 ours:
1546 	ip = mtod(m, struct ip *); /* in case it changed */
1547 	/*
1548 	 * If offset is set, must reassemble.
1549 	 */
1550 	if (ip->ip_off & ~(IP_DF | IP_RF)) {
1551 		VERIFY(npkts_in_chain == 1);
1552 		m = ip_reass(m);
1553 		if (m == NULL) {
1554 			return;
1555 		}
1556 		ip = mtod(m, struct ip *);
1557 		/* Get the header length of the reassembled packet */
1558 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1559 	}
1560 
1561 	/*
1562 	 * Further protocols expect the packet length to be w/o the
1563 	 * IP header.
1564 	 */
1565 	ip->ip_len -= hlen;
1566 
1567 #if IPSEC
1568 	/*
1569 	 * enforce IPsec policy checking if we are seeing last header.
1570 	 * note that we do not visit this with protocols with pcb layer
1571 	 * code - like udp/tcp/raw ip.
1572 	 */
1573 	if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR)) {
1574 		VERIFY(npkts_in_chain == 1);
1575 		if (ipsec4_in_reject(m, NULL)) {
1576 			IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
1577 			goto bad;
1578 		}
1579 	}
1580 #endif /* IPSEC */
1581 
1582 	/*
1583 	 * Switch out to protocol's input routine.
1584 	 */
1585 	OSAddAtomic(npkts_in_chain, &ipstat.ips_delivered);
1586 
1587 	ip_input_dispatch_chain(m);
1588 
1589 	KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1590 	return;
1591 bad:
1592 	KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1593 	m_freem(m);
1594 }
1595 
1596 void
ip_input_process_list(struct mbuf * packet_list)1597 ip_input_process_list(struct mbuf *packet_list)
1598 {
1599 	pktchain_elm_t  pktchain_tbl[PKTTBL_SZ];
1600 
1601 	struct mbuf     *packet = NULL;
1602 	struct mbuf     *modm = NULL; /* modified mbuf */
1603 	int             retval = 0;
1604 #if (DEBUG || DEVELOPMENT)
1605 	struct timeval start_tv;
1606 #endif /* (DEBUG || DEVELOPMENT) */
1607 	int     num_pkts = 0;
1608 	int chain = 0;
1609 	struct ip_fw_in_args       args;
1610 
1611 	if (ip_chaining == 0) {
1612 		struct mbuf *m = packet_list;
1613 #if (DEBUG || DEVELOPMENT)
1614 		if (ip_input_measure) {
1615 			net_perf_start_time(&net_perf, &start_tv);
1616 		}
1617 #endif /* (DEBUG || DEVELOPMENT) */
1618 
1619 		while (m) {
1620 			packet_list = mbuf_nextpkt(m);
1621 			mbuf_setnextpkt(m, NULL);
1622 			ip_input(m);
1623 			m = packet_list;
1624 			num_pkts++;
1625 		}
1626 #if (DEBUG || DEVELOPMENT)
1627 		if (ip_input_measure) {
1628 			net_perf_measure_time(&net_perf, &start_tv, num_pkts);
1629 		}
1630 #endif /* (DEBUG || DEVELOPMENT) */
1631 		return;
1632 	}
1633 #if (DEBUG || DEVELOPMENT)
1634 	if (ip_input_measure) {
1635 		net_perf_start_time(&net_perf, &start_tv);
1636 	}
1637 #endif /* (DEBUG || DEVELOPMENT) */
1638 
1639 	bzero(&pktchain_tbl, sizeof(pktchain_tbl));
1640 restart_list_process:
1641 	chain = 0;
1642 	for (packet = packet_list; packet; packet = packet_list) {
1643 		m_add_crumb(packet, PKT_CRUMB_IP_INPUT);
1644 
1645 		packet_list = mbuf_nextpkt(packet);
1646 		mbuf_setnextpkt(packet, NULL);
1647 
1648 		num_pkts++;
1649 		modm = NULL;
1650 		bzero(&args, sizeof(args));
1651 
1652 		retval = ip_input_first_pass(packet, &args, &modm);
1653 
1654 		if (retval == IPINPUT_DOCHAIN) {
1655 			if (modm) {
1656 				packet = modm;
1657 			}
1658 			packet = ip_chain_insert(packet, &pktchain_tbl[0]);
1659 			if (packet == NULL) {
1660 				ipstat.ips_rxc_chained++;
1661 				chain++;
1662 				if (chain > ip_chainsz) {
1663 					break;
1664 				}
1665 			} else {
1666 				ipstat.ips_rxc_collisions++;
1667 				break;
1668 			}
1669 		} else if (retval == IPINPUT_DONTCHAIN) {
1670 			/* in order to preserve order, exit from chaining */
1671 			if (modm) {
1672 				packet = modm;
1673 			}
1674 			ipstat.ips_rxc_notchain++;
1675 			break;
1676 		} else {
1677 			/* packet was freed or delivered, do nothing. */
1678 		}
1679 	}
1680 
1681 	/* do second pass here for pktchain_tbl */
1682 	if (chain) {
1683 		ip_input_second_pass_loop_tbl(&pktchain_tbl[0], &args);
1684 	}
1685 
1686 	if (packet) {
1687 		/*
1688 		 * equivalent update in chaining case if performed in
1689 		 * ip_input_second_pass_loop_tbl().
1690 		 */
1691 #if (DEBUG || DEVELOPMENT)
1692 		if (ip_input_measure) {
1693 			net_perf_histogram(&net_perf, 1);
1694 		}
1695 #endif /* (DEBUG || DEVELOPMENT) */
1696 		ip_input_second_pass(packet, packet->m_pkthdr.rcvif,
1697 		    1, packet->m_pkthdr.len, &args);
1698 	}
1699 
1700 	if (packet_list) {
1701 		goto restart_list_process;
1702 	}
1703 
1704 #if (DEBUG || DEVELOPMENT)
1705 	if (ip_input_measure) {
1706 		net_perf_measure_time(&net_perf, &start_tv, num_pkts);
1707 	}
1708 #endif /* (DEBUG || DEVELOPMENT) */
1709 }
1710 /*
1711  * Ip input routine.  Checksum and byte swap header.  If fragmented
1712  * try to reassemble.  Process options.  Pass to next level.
1713  */
1714 void
ip_input(struct mbuf * m)1715 ip_input(struct mbuf *m)
1716 {
1717 	struct ip *ip;
1718 	unsigned int hlen;
1719 	u_short sum = 0;
1720 #if DUMMYNET
1721 	struct ip_fw_args args;
1722 	struct m_tag    *tag;
1723 #endif
1724 	ipfilter_t inject_filter_ref = NULL;
1725 	struct ifnet *inifp;
1726 
1727 	/* Check if the mbuf is still valid after interface filter processing */
1728 	MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
1729 	inifp = m->m_pkthdr.rcvif;
1730 	VERIFY(inifp != NULL);
1731 
1732 	m_add_crumb(m, PKT_CRUMB_IP_INPUT);
1733 
1734 	ipstat.ips_rxc_notlist++;
1735 
1736 	/* Perform IP header alignment fixup, if needed */
1737 	IP_HDR_ALIGNMENT_FIXUP(m, inifp, goto bad);
1738 
1739 	m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
1740 
1741 #if DUMMYNET
1742 	bzero(&args, sizeof(struct ip_fw_args));
1743 
1744 	/*
1745 	 * Don't bother searching for tag(s) if there's none.
1746 	 */
1747 	if (SLIST_EMPTY(&m->m_pkthdr.tags)) {
1748 		goto ipfw_tags_done;
1749 	}
1750 
1751 	/* Grab info from mtags prepended to the chain */
1752 	if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
1753 	    KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) {
1754 		struct dn_pkt_tag *dn_tag;
1755 
1756 		dn_tag = (struct dn_pkt_tag *)(tag + 1);
1757 		args.fwa_pf_rule = dn_tag->dn_pf_rule;
1758 
1759 		m_tag_delete(m, tag);
1760 	}
1761 
1762 #if DIAGNOSTIC
1763 	if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1764 		panic("ip_input no HDR");
1765 	}
1766 #endif
1767 
1768 	if (args.fwa_pf_rule) {
1769 		/* dummynet already filtered us */
1770 		ip = mtod(m, struct ip *);
1771 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1772 		inject_filter_ref = ipf_get_inject_filter(m);
1773 		if (args.fwa_pf_rule) {
1774 			goto check_with_pf;
1775 		}
1776 	}
1777 ipfw_tags_done:
1778 #endif /* DUMMYNET */
1779 
1780 	/*
1781 	 * No need to process packet twice if we've already seen it.
1782 	 */
1783 	if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
1784 		inject_filter_ref = ipf_get_inject_filter(m);
1785 	}
1786 	if (inject_filter_ref != NULL) {
1787 		ip = mtod(m, struct ip *);
1788 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1789 
1790 		DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1791 		    struct ip *, ip, struct ifnet *, inifp,
1792 		    struct ip *, ip, struct ip6_hdr *, NULL);
1793 
1794 		ip->ip_len = ntohs(ip->ip_len) - (u_short)hlen;
1795 		ip->ip_off = ntohs(ip->ip_off);
1796 		ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref);
1797 		return;
1798 	}
1799 
1800 	if (__improbable(m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
1801 		if_ports_used_match_mbuf(inifp, PF_INET, m);
1802 	}
1803 
1804 	OSAddAtomic(1, &ipstat.ips_total);
1805 	if (m->m_pkthdr.len < sizeof(struct ip)) {
1806 		goto tooshort;
1807 	}
1808 
1809 	if (m->m_len < sizeof(struct ip) &&
1810 	    (m = m_pullup(m, sizeof(struct ip))) == NULL) {
1811 		OSAddAtomic(1, &ipstat.ips_toosmall);
1812 		return;
1813 	}
1814 	ip = mtod(m, struct ip *);
1815 
1816 	KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
1817 	    ip->ip_p, ip->ip_off, ip->ip_len);
1818 
1819 	if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
1820 		OSAddAtomic(1, &ipstat.ips_badvers);
1821 		goto bad;
1822 	}
1823 
1824 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1825 	if (hlen < sizeof(struct ip)) {         /* minimum header length */
1826 		OSAddAtomic(1, &ipstat.ips_badhlen);
1827 		goto bad;
1828 	}
1829 	if (hlen > m->m_len) {
1830 		if ((m = m_pullup(m, hlen)) == NULL) {
1831 			OSAddAtomic(1, &ipstat.ips_badhlen);
1832 			return;
1833 		}
1834 		ip = mtod(m, struct ip *);
1835 	}
1836 
1837 	/* 127/8 must not appear on wire - RFC1122 */
1838 	if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
1839 	    (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
1840 		/*
1841 		 * Allow for the following exceptions:
1842 		 *
1843 		 *   1. If the packet was sent to loopback (i.e. rcvif
1844 		 *	would have been set earlier at output time.)
1845 		 *
1846 		 *   2. If the packet was sent out on loopback from a local
1847 		 *	source address which belongs to a non-loopback
1848 		 *	interface (i.e. rcvif may not necessarily be a
1849 		 *	loopback interface, hence the test for PKTF_LOOP.)
1850 		 *	Unlike IPv6, there is no interface scope ID, and
1851 		 *	therefore we don't care so much about PKTF_IFINFO.
1852 		 */
1853 		if (!(inifp->if_flags & IFF_LOOPBACK) &&
1854 		    !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1855 			OSAddAtomic(1, &ipstat.ips_badaddr);
1856 			goto bad;
1857 		}
1858 	}
1859 
1860 	/* IPv4 Link-Local Addresses as defined in RFC3927 */
1861 	if ((IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
1862 	    IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)))) {
1863 		ip_linklocal_stat.iplls_in_total++;
1864 		if (ip->ip_ttl != MAXTTL) {
1865 			OSAddAtomic(1, &ip_linklocal_stat.iplls_in_badttl);
1866 			/* Silently drop link local traffic with bad TTL */
1867 			if (!ip_linklocal_in_allowbadttl) {
1868 				goto bad;
1869 			}
1870 		}
1871 	}
1872 
1873 	sum = ip_cksum(m, hlen);
1874 	if (sum) {
1875 		goto bad;
1876 	}
1877 
1878 	DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1879 	    struct ip *, ip, struct ifnet *, inifp,
1880 	    struct ip *, ip, struct ip6_hdr *, NULL);
1881 
1882 	/*
1883 	 * Naively assume we can attribute inbound data to the route we would
1884 	 * use to send to this destination. Asymmetric routing breaks this
1885 	 * assumption, but it still allows us to account for traffic from
1886 	 * a remote node in the routing table.
1887 	 * this has a very significant performance impact so we bypass
1888 	 * if nstat_collect is disabled. We may also bypass if the
1889 	 * protocol is tcp in the future because tcp will have a route that
1890 	 * we can use to attribute the data to. That does mean we would not
1891 	 * account for forwarded tcp traffic.
1892 	 */
1893 	if (nstat_collect) {
1894 		struct rtentry *rt =
1895 		    ifnet_cached_rtlookup_inet(inifp, ip->ip_src);
1896 		if (rt != NULL) {
1897 			nstat_route_rx(rt, 1, m->m_pkthdr.len, 0);
1898 			rtfree(rt);
1899 		}
1900 	}
1901 
1902 	/*
1903 	 * Convert fields to host representation.
1904 	 */
1905 #if BYTE_ORDER != BIG_ENDIAN
1906 	NTOHS(ip->ip_len);
1907 #endif
1908 
1909 	if (ip->ip_len < hlen) {
1910 		OSAddAtomic(1, &ipstat.ips_badlen);
1911 		goto bad;
1912 	}
1913 
1914 #if BYTE_ORDER != BIG_ENDIAN
1915 	NTOHS(ip->ip_off);
1916 #endif
1917 	/*
1918 	 * Check that the amount of data in the buffers
1919 	 * is as at least much as the IP header would have us expect.
1920 	 * Trim mbufs if longer than we expect.
1921 	 * Drop packet if shorter than we expect.
1922 	 */
1923 	if (m->m_pkthdr.len < ip->ip_len) {
1924 tooshort:
1925 		OSAddAtomic(1, &ipstat.ips_tooshort);
1926 		goto bad;
1927 	}
1928 	if (m->m_pkthdr.len > ip->ip_len) {
1929 		ip_input_adjust(m, ip, inifp);
1930 	}
1931 
1932 #if DUMMYNET
1933 check_with_pf:
1934 #endif
1935 #if PF
1936 	/* Invoke inbound packet filter */
1937 	if (PF_IS_ENABLED) {
1938 		int error;
1939 #if DUMMYNET
1940 		error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, &args);
1941 #else
1942 		error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, NULL);
1943 #endif /* DUMMYNET */
1944 		if (error != 0 || m == NULL) {
1945 			if (m != NULL) {
1946 				panic("%s: unexpected packet %p",
1947 				    __func__, m);
1948 				/* NOTREACHED */
1949 			}
1950 			/* Already freed by callee */
1951 			return;
1952 		}
1953 		ip = mtod(m, struct ip *);
1954 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1955 	}
1956 #endif /* PF */
1957 
1958 #if IPSEC
1959 	if (ipsec_bypass == 0 && ipsec_gethist(m, NULL)) {
1960 		goto pass;
1961 	}
1962 #endif
1963 
1964 pass:
1965 	/*
1966 	 * Process options and, if not destined for us,
1967 	 * ship it on.  ip_dooptions returns 1 when an
1968 	 * error was detected (causing an icmp message
1969 	 * to be sent and the original packet to be freed).
1970 	 */
1971 	ip_nhops = 0;           /* for source routed packets */
1972 	if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) {
1973 		return;
1974 	}
1975 
1976 	/*
1977 	 * Check our list of addresses, to see if the packet is for us.
1978 	 * If we don't have any addresses, assume any unicast packet
1979 	 * we receive might be for us (and let the upper layers deal
1980 	 * with it).
1981 	 */
1982 	if (TAILQ_EMPTY(&in_ifaddrhead) && !(m->m_flags & (M_MCAST | M_BCAST))) {
1983 		ip_setdstifaddr_info(m, inifp->if_index, NULL);
1984 		goto ours;
1985 	}
1986 
1987 	/*
1988 	 * Enable a consistency check between the destination address
1989 	 * and the arrival interface for a unicast packet (the RFC 1122
1990 	 * strong ES model) if IP forwarding is disabled and the packet
1991 	 * is not locally generated and the packet is not subject to
1992 	 * 'ipfw fwd'.
1993 	 *
1994 	 * XXX - Checking also should be disabled if the destination
1995 	 * address is ipnat'ed to a different interface.
1996 	 *
1997 	 * XXX - Checking is incompatible with IP aliases added
1998 	 * to the loopback interface instead of the interface where
1999 	 * the packets are received.
2000 	 */
2001 	if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
2002 		ip_check_if_result_t check_if_result = IP_CHECK_IF_NONE;
2003 
2004 		check_if_result = ip_input_check_interface(&m, ip, inifp);
2005 		ASSERT(check_if_result != IP_CHECK_IF_NONE);
2006 		if (check_if_result == IP_CHECK_IF_OURS) {
2007 			goto ours;
2008 		} else if (check_if_result == IP_CHECK_IF_DROP) {
2009 			return;
2010 		}
2011 	} else {
2012 		struct in_multi *inm;
2013 		/*
2014 		 * See if we belong to the destination multicast group on the
2015 		 * arrival interface.
2016 		 */
2017 		in_multihead_lock_shared();
2018 		IN_LOOKUP_MULTI(&ip->ip_dst, inifp, inm);
2019 		in_multihead_lock_done();
2020 		if (inm == NULL) {
2021 			OSAddAtomic(1, &ipstat.ips_notmember);
2022 			m_freem(m);
2023 			return;
2024 		}
2025 		ip_setdstifaddr_info(m, inifp->if_index, NULL);
2026 		INM_REMREF(inm);
2027 		goto ours;
2028 	}
2029 
2030 	/*
2031 	 * Not for us; forward if possible and desirable.
2032 	 */
2033 	if (ipforwarding == 0) {
2034 		OSAddAtomic(1, &ipstat.ips_cantforward);
2035 		m_freem(m);
2036 	} else {
2037 		ip_forward(m, 0, NULL);
2038 	}
2039 	return;
2040 
2041 ours:
2042 	/*
2043 	 * If offset or IP_MF are set, must reassemble.
2044 	 */
2045 	if (ip->ip_off & ~(IP_DF | IP_RF)) {
2046 		m = ip_reass(m);
2047 		if (m == NULL) {
2048 			return;
2049 		}
2050 		ip = mtod(m, struct ip *);
2051 		/* Get the header length of the reassembled packet */
2052 		hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2053 	}
2054 
2055 	/*
2056 	 * Further protocols expect the packet length to be w/o the
2057 	 * IP header.
2058 	 */
2059 	ip->ip_len -= hlen;
2060 
2061 
2062 #if IPSEC
2063 	/*
2064 	 * enforce IPsec policy checking if we are seeing last header.
2065 	 * note that we do not visit this with protocols with pcb layer
2066 	 * code - like udp/tcp/raw ip.
2067 	 */
2068 	if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR)) {
2069 		if (ipsec4_in_reject(m, NULL)) {
2070 			IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
2071 			goto bad;
2072 		}
2073 	}
2074 #endif /* IPSEC */
2075 
2076 	/*
2077 	 * Switch out to protocol's input routine.
2078 	 */
2079 	OSAddAtomic(1, &ipstat.ips_delivered);
2080 
2081 	ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
2082 	return;
2083 
2084 bad:
2085 	KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
2086 	m_freem(m);
2087 }
2088 
2089 static void
ipq_updateparams(void)2090 ipq_updateparams(void)
2091 {
2092 	LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2093 	/*
2094 	 * -1 for unlimited allocation.
2095 	 */
2096 	if (maxnipq < 0) {
2097 		ipq_limit = 0;
2098 	}
2099 	/*
2100 	 * Positive number for specific bound.
2101 	 */
2102 	if (maxnipq > 0) {
2103 		ipq_limit = maxnipq;
2104 	}
2105 	/*
2106 	 * Zero specifies no further fragment queue allocation -- set the
2107 	 * bound very low, but rely on implementation elsewhere to actually
2108 	 * prevent allocation and reclaim current queues.
2109 	 */
2110 	if (maxnipq == 0) {
2111 		ipq_limit = 1;
2112 	}
2113 	/*
2114 	 * Arm the purge timer if not already and if there's work to do
2115 	 */
2116 	frag_sched_timeout();
2117 }
2118 
2119 static int
2120 sysctl_maxnipq SYSCTL_HANDLER_ARGS
2121 {
2122 #pragma unused(arg1, arg2)
2123 	int error, i;
2124 
2125 	lck_mtx_lock(&ipqlock);
2126 	i = maxnipq;
2127 	error = sysctl_handle_int(oidp, &i, 0, req);
2128 	if (error || req->newptr == USER_ADDR_NULL) {
2129 		goto done;
2130 	}
2131 	/* impose bounds */
2132 	if (i < -1 || i > (nmbclusters / 4)) {
2133 		error = EINVAL;
2134 		goto done;
2135 	}
2136 	maxnipq = i;
2137 	ipq_updateparams();
2138 done:
2139 	lck_mtx_unlock(&ipqlock);
2140 	return error;
2141 }
2142 
2143 static int
2144 sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS
2145 {
2146 #pragma unused(arg1, arg2)
2147 	int error, i;
2148 
2149 	lck_mtx_lock(&ipqlock);
2150 	i = maxfragsperpacket;
2151 	error = sysctl_handle_int(oidp, &i, 0, req);
2152 	if (error || req->newptr == USER_ADDR_NULL) {
2153 		goto done;
2154 	}
2155 	maxfragsperpacket = i;
2156 	ipq_updateparams();     /* see if we need to arm timer */
2157 done:
2158 	lck_mtx_unlock(&ipqlock);
2159 	return error;
2160 }
2161 
2162 /*
2163  * Take incoming datagram fragment and try to reassemble it into
2164  * whole datagram.  If a chain for reassembly of this datagram already
2165  * exists, then it is given as fp; otherwise have to make a chain.
2166  *
2167  * The IP header is *NOT* adjusted out of iplen (but in host byte order).
2168  */
2169 static struct mbuf *
ip_reass(struct mbuf * m)2170 ip_reass(struct mbuf *m)
2171 {
2172 	struct ip *ip;
2173 	struct mbuf *p, *q, *nq, *t;
2174 	struct ipq *fp = NULL;
2175 	struct ipqhead *head;
2176 	int i, hlen, next;
2177 	u_int8_t ecn, ecn0;
2178 	uint32_t csum, csum_flags;
2179 	uint16_t hash;
2180 	struct fq_head dfq;
2181 
2182 	MBUFQ_INIT(&dfq);       /* for deferred frees */
2183 
2184 	/* If maxnipq or maxfragsperpacket is 0, never accept fragments. */
2185 	if (maxnipq == 0 || maxfragsperpacket == 0) {
2186 		ipstat.ips_fragments++;
2187 		ipstat.ips_fragdropped++;
2188 		m_freem(m);
2189 		if (nipq > 0) {
2190 			lck_mtx_lock(&ipqlock);
2191 			frag_sched_timeout();   /* purge stale fragments */
2192 			lck_mtx_unlock(&ipqlock);
2193 		}
2194 		return NULL;
2195 	}
2196 
2197 	ip = mtod(m, struct ip *);
2198 	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2199 
2200 	lck_mtx_lock(&ipqlock);
2201 
2202 	hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
2203 	head = &ipq[hash];
2204 
2205 	/*
2206 	 * Look for queue of fragments
2207 	 * of this datagram.
2208 	 */
2209 	TAILQ_FOREACH(fp, head, ipq_list) {
2210 		if (ip->ip_id == fp->ipq_id &&
2211 		    ip->ip_src.s_addr == fp->ipq_src.s_addr &&
2212 		    ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
2213 		    ip->ip_p == fp->ipq_p) {
2214 			goto found;
2215 		}
2216 	}
2217 
2218 	fp = NULL;
2219 
2220 	/*
2221 	 * Attempt to trim the number of allocated fragment queues if it
2222 	 * exceeds the administrative limit.
2223 	 */
2224 	if ((nipq > (unsigned)maxnipq) && (maxnipq > 0)) {
2225 		/*
2226 		 * drop something from the tail of the current queue
2227 		 * before proceeding further
2228 		 */
2229 		struct ipq *fq = TAILQ_LAST(head, ipqhead);
2230 		if (fq == NULL) {   /* gak */
2231 			for (i = 0; i < IPREASS_NHASH; i++) {
2232 				struct ipq *r = TAILQ_LAST(&ipq[i], ipqhead);
2233 				if (r) {
2234 					ipstat.ips_fragtimeout += r->ipq_nfrags;
2235 					frag_freef(&ipq[i], r);
2236 					break;
2237 				}
2238 			}
2239 		} else {
2240 			ipstat.ips_fragtimeout += fq->ipq_nfrags;
2241 			frag_freef(head, fq);
2242 		}
2243 	}
2244 
2245 found:
2246 	/*
2247 	 * Leverage partial checksum offload for IP fragments.  Narrow down
2248 	 * the scope to cover only UDP without IP options, as that is the
2249 	 * most common case.
2250 	 *
2251 	 * Perform 1's complement adjustment of octets that got included/
2252 	 * excluded in the hardware-calculated checksum value.  Ignore cases
2253 	 * where the value includes the entire IPv4 header span, as the sum
2254 	 * for those octets would already be 0 by the time we get here; IP
2255 	 * has already performed its header checksum validation.  Also take
2256 	 * care of any trailing bytes and subtract out their partial sum.
2257 	 */
2258 	if (ip->ip_p == IPPROTO_UDP && hlen == sizeof(struct ip) &&
2259 	    (m->m_pkthdr.csum_flags &
2260 	    (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) ==
2261 	    (CSUM_DATA_VALID | CSUM_PARTIAL)) {
2262 		uint32_t start = m->m_pkthdr.csum_rx_start;
2263 		int32_t trailer = (m_pktlen(m) - ip->ip_len);
2264 		uint32_t swbytes = (uint32_t)trailer;
2265 
2266 		csum = m->m_pkthdr.csum_rx_val;
2267 
2268 		ASSERT(trailer >= 0);
2269 		if ((start != 0 && start != hlen) || trailer != 0) {
2270 			uint32_t datalen = ip->ip_len - hlen;
2271 
2272 #if BYTE_ORDER != BIG_ENDIAN
2273 			if (start < hlen) {
2274 				HTONS(ip->ip_len);
2275 				HTONS(ip->ip_off);
2276 			}
2277 #endif /* BYTE_ORDER != BIG_ENDIAN */
2278 			/* callee folds in sum */
2279 			csum = m_adj_sum16(m, start, hlen, datalen, csum);
2280 			if (hlen > start) {
2281 				swbytes += (hlen - start);
2282 			} else {
2283 				swbytes += (start - hlen);
2284 			}
2285 #if BYTE_ORDER != BIG_ENDIAN
2286 			if (start < hlen) {
2287 				NTOHS(ip->ip_off);
2288 				NTOHS(ip->ip_len);
2289 			}
2290 #endif /* BYTE_ORDER != BIG_ENDIAN */
2291 		}
2292 		csum_flags = m->m_pkthdr.csum_flags;
2293 
2294 		if (swbytes != 0) {
2295 			udp_in_cksum_stats(swbytes);
2296 		}
2297 		if (trailer != 0) {
2298 			m_adj(m, -trailer);
2299 		}
2300 	} else {
2301 		csum = 0;
2302 		csum_flags = 0;
2303 	}
2304 
2305 	/* Invalidate checksum */
2306 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
2307 
2308 	ipstat.ips_fragments++;
2309 
2310 	/*
2311 	 * Adjust ip_len to not reflect header,
2312 	 * convert offset of this to bytes.
2313 	 */
2314 	ip->ip_len -= hlen;
2315 	if (ip->ip_off & IP_MF) {
2316 		/*
2317 		 * Make sure that fragments have a data length
2318 		 * that's a non-zero multiple of 8 bytes.
2319 		 */
2320 		if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
2321 			OSAddAtomic(1, &ipstat.ips_toosmall);
2322 			/*
2323 			 * Reassembly queue may have been found if previous
2324 			 * fragments were valid; given that this one is bad,
2325 			 * we need to drop it.  Make sure to set fp to NULL
2326 			 * if not already, since we don't want to decrement
2327 			 * ipq_nfrags as it doesn't include this packet.
2328 			 */
2329 			fp = NULL;
2330 			goto dropfrag;
2331 		}
2332 		m->m_flags |= M_FRAG;
2333 	} else {
2334 		/* Clear the flag in case packet comes from loopback */
2335 		m->m_flags &= ~M_FRAG;
2336 	}
2337 	ip->ip_off <<= 3;
2338 
2339 	m->m_pkthdr.pkt_hdr = ip;
2340 
2341 	/* Previous ip_reass() started here. */
2342 	/*
2343 	 * Presence of header sizes in mbufs
2344 	 * would confuse code below.
2345 	 */
2346 	m->m_data += hlen;
2347 	m->m_len -= hlen;
2348 
2349 	/*
2350 	 * If first fragment to arrive, create a reassembly queue.
2351 	 */
2352 	if (fp == NULL) {
2353 		fp = ipq_alloc(M_DONTWAIT);
2354 		if (fp == NULL) {
2355 			goto dropfrag;
2356 		}
2357 		TAILQ_INSERT_HEAD(head, fp, ipq_list);
2358 		nipq++;
2359 		fp->ipq_nfrags = 1;
2360 		fp->ipq_ttl = IPFRAGTTL;
2361 		fp->ipq_p = ip->ip_p;
2362 		fp->ipq_id = ip->ip_id;
2363 		fp->ipq_src = ip->ip_src;
2364 		fp->ipq_dst = ip->ip_dst;
2365 		fp->ipq_frags = m;
2366 		m->m_nextpkt = NULL;
2367 		/*
2368 		 * If the first fragment has valid checksum offload
2369 		 * info, the rest of fragments are eligible as well.
2370 		 */
2371 		if (csum_flags != 0) {
2372 			fp->ipq_csum = csum;
2373 			fp->ipq_csum_flags = csum_flags;
2374 		}
2375 		m = NULL;       /* nothing to return */
2376 		goto done;
2377 	} else {
2378 		fp->ipq_nfrags++;
2379 	}
2380 
2381 #define GETIP(m)        ((struct ip *)((m)->m_pkthdr.pkt_hdr))
2382 
2383 	/*
2384 	 * Handle ECN by comparing this segment with the first one;
2385 	 * if CE is set, do not lose CE.
2386 	 * drop if CE and not-ECT are mixed for the same packet.
2387 	 */
2388 	ecn = ip->ip_tos & IPTOS_ECN_MASK;
2389 	ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
2390 	if (ecn == IPTOS_ECN_CE) {
2391 		if (ecn0 == IPTOS_ECN_NOTECT) {
2392 			goto dropfrag;
2393 		}
2394 		if (ecn0 != IPTOS_ECN_CE) {
2395 			GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
2396 		}
2397 	}
2398 	if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
2399 		goto dropfrag;
2400 	}
2401 
2402 	/*
2403 	 * Find a segment which begins after this one does.
2404 	 */
2405 	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
2406 		if (GETIP(q)->ip_off > ip->ip_off) {
2407 			break;
2408 		}
2409 	}
2410 
2411 	/*
2412 	 * If there is a preceding segment, it may provide some of
2413 	 * our data already.  If so, drop the data from the incoming
2414 	 * segment.  If it provides all of our data, drop us, otherwise
2415 	 * stick new segment in the proper place.
2416 	 *
2417 	 * If some of the data is dropped from the preceding
2418 	 * segment, then it's checksum is invalidated.
2419 	 */
2420 	if (p) {
2421 		i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
2422 		if (i > 0) {
2423 			if (i >= ip->ip_len) {
2424 				goto dropfrag;
2425 			}
2426 			m_adj(m, i);
2427 			fp->ipq_csum_flags = 0;
2428 			ip->ip_off += i;
2429 			ip->ip_len -= i;
2430 		}
2431 		m->m_nextpkt = p->m_nextpkt;
2432 		p->m_nextpkt = m;
2433 	} else {
2434 		m->m_nextpkt = fp->ipq_frags;
2435 		fp->ipq_frags = m;
2436 	}
2437 
2438 	/*
2439 	 * While we overlap succeeding segments trim them or,
2440 	 * if they are completely covered, dequeue them.
2441 	 */
2442 	for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
2443 	    q = nq) {
2444 		i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
2445 		if (i < GETIP(q)->ip_len) {
2446 			GETIP(q)->ip_len -= i;
2447 			GETIP(q)->ip_off += i;
2448 			m_adj(q, i);
2449 			fp->ipq_csum_flags = 0;
2450 			break;
2451 		}
2452 		nq = q->m_nextpkt;
2453 		m->m_nextpkt = nq;
2454 		ipstat.ips_fragdropped++;
2455 		fp->ipq_nfrags--;
2456 		/* defer freeing until after lock is dropped */
2457 		MBUFQ_ENQUEUE(&dfq, q);
2458 	}
2459 
2460 	/*
2461 	 * If this fragment contains similar checksum offload info
2462 	 * as that of the existing ones, accumulate checksum.  Otherwise,
2463 	 * invalidate checksum offload info for the entire datagram.
2464 	 */
2465 	if (csum_flags != 0 && csum_flags == fp->ipq_csum_flags) {
2466 		fp->ipq_csum += csum;
2467 	} else if (fp->ipq_csum_flags != 0) {
2468 		fp->ipq_csum_flags = 0;
2469 	}
2470 
2471 
2472 	/*
2473 	 * Check for complete reassembly and perform frag per packet
2474 	 * limiting.
2475 	 *
2476 	 * Frag limiting is performed here so that the nth frag has
2477 	 * a chance to complete the packet before we drop the packet.
2478 	 * As a result, n+1 frags are actually allowed per packet, but
2479 	 * only n will ever be stored. (n = maxfragsperpacket.)
2480 	 *
2481 	 */
2482 	next = 0;
2483 	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
2484 		if (GETIP(q)->ip_off != next) {
2485 			if (fp->ipq_nfrags > maxfragsperpacket) {
2486 				ipstat.ips_fragdropped += fp->ipq_nfrags;
2487 				frag_freef(head, fp);
2488 			}
2489 			m = NULL;       /* nothing to return */
2490 			goto done;
2491 		}
2492 		next += GETIP(q)->ip_len;
2493 	}
2494 	/* Make sure the last packet didn't have the IP_MF flag */
2495 	if (p->m_flags & M_FRAG) {
2496 		if (fp->ipq_nfrags > maxfragsperpacket) {
2497 			ipstat.ips_fragdropped += fp->ipq_nfrags;
2498 			frag_freef(head, fp);
2499 		}
2500 		m = NULL;               /* nothing to return */
2501 		goto done;
2502 	}
2503 
2504 	/*
2505 	 * Reassembly is complete.  Make sure the packet is a sane size.
2506 	 */
2507 	q = fp->ipq_frags;
2508 	ip = GETIP(q);
2509 	if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) {
2510 		ipstat.ips_toolong++;
2511 		ipstat.ips_fragdropped += fp->ipq_nfrags;
2512 		frag_freef(head, fp);
2513 		m = NULL;               /* nothing to return */
2514 		goto done;
2515 	}
2516 
2517 	/*
2518 	 * Concatenate fragments.
2519 	 */
2520 	m = q;
2521 	t = m->m_next;
2522 	m->m_next = NULL;
2523 	m_cat(m, t);
2524 	nq = q->m_nextpkt;
2525 	q->m_nextpkt = NULL;
2526 	for (q = nq; q != NULL; q = nq) {
2527 		nq = q->m_nextpkt;
2528 		q->m_nextpkt = NULL;
2529 		m_cat(m, q);
2530 	}
2531 
2532 	/*
2533 	 * Store partial hardware checksum info from the fragment queue;
2534 	 * the receive start offset is set to 20 bytes (see code at the
2535 	 * top of this routine.)
2536 	 */
2537 	if (fp->ipq_csum_flags != 0) {
2538 		csum = fp->ipq_csum;
2539 
2540 		ADDCARRY(csum);
2541 
2542 		m->m_pkthdr.csum_rx_val = (uint16_t)csum;
2543 		m->m_pkthdr.csum_rx_start = sizeof(struct ip);
2544 		m->m_pkthdr.csum_flags = fp->ipq_csum_flags;
2545 	} else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) ||
2546 	    (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
2547 		/* loopback checksums are always OK */
2548 		m->m_pkthdr.csum_data = 0xffff;
2549 		m->m_pkthdr.csum_flags =
2550 		    CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
2551 		    CSUM_IP_CHECKED | CSUM_IP_VALID;
2552 	}
2553 
2554 	/*
2555 	 * Create header for new ip packet by modifying header of first
2556 	 * packet; dequeue and discard fragment reassembly header.
2557 	 * Make header visible.
2558 	 */
2559 	ip->ip_len = (u_short)((IP_VHL_HL(ip->ip_vhl) << 2) + next);
2560 	ip->ip_src = fp->ipq_src;
2561 	ip->ip_dst = fp->ipq_dst;
2562 
2563 	fp->ipq_frags = NULL;   /* return to caller as 'm' */
2564 	frag_freef(head, fp);
2565 	fp = NULL;
2566 
2567 	m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2);
2568 	m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2);
2569 	/* some debugging cruft by sklower, below, will go away soon */
2570 	if (m->m_flags & M_PKTHDR) {    /* XXX this should be done elsewhere */
2571 		m_fixhdr(m);
2572 	}
2573 	ipstat.ips_reassembled++;
2574 
2575 	/* arm the purge timer if not already and if there's work to do */
2576 	frag_sched_timeout();
2577 	lck_mtx_unlock(&ipqlock);
2578 	/* perform deferred free (if needed) now that lock is dropped */
2579 	if (!MBUFQ_EMPTY(&dfq)) {
2580 		MBUFQ_DRAIN(&dfq);
2581 	}
2582 	VERIFY(MBUFQ_EMPTY(&dfq));
2583 	return m;
2584 
2585 done:
2586 	VERIFY(m == NULL);
2587 	/* arm the purge timer if not already and if there's work to do */
2588 	frag_sched_timeout();
2589 	lck_mtx_unlock(&ipqlock);
2590 	/* perform deferred free (if needed) */
2591 	if (!MBUFQ_EMPTY(&dfq)) {
2592 		MBUFQ_DRAIN(&dfq);
2593 	}
2594 	VERIFY(MBUFQ_EMPTY(&dfq));
2595 	return NULL;
2596 
2597 dropfrag:
2598 	ipstat.ips_fragdropped++;
2599 	if (fp != NULL) {
2600 		fp->ipq_nfrags--;
2601 	}
2602 	/* arm the purge timer if not already and if there's work to do */
2603 	frag_sched_timeout();
2604 	lck_mtx_unlock(&ipqlock);
2605 	m_freem(m);
2606 	/* perform deferred free (if needed) */
2607 	if (!MBUFQ_EMPTY(&dfq)) {
2608 		MBUFQ_DRAIN(&dfq);
2609 	}
2610 	VERIFY(MBUFQ_EMPTY(&dfq));
2611 	return NULL;
2612 #undef GETIP
2613 }
2614 
2615 /*
2616  * Free a fragment reassembly header and all
2617  * associated datagrams.
2618  */
2619 static void
frag_freef(struct ipqhead * fhp,struct ipq * fp)2620 frag_freef(struct ipqhead *fhp, struct ipq *fp)
2621 {
2622 	LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2623 
2624 	fp->ipq_nfrags = 0;
2625 	if (fp->ipq_frags != NULL) {
2626 		m_freem_list(fp->ipq_frags);
2627 		fp->ipq_frags = NULL;
2628 	}
2629 	TAILQ_REMOVE(fhp, fp, ipq_list);
2630 	nipq--;
2631 	ipq_free(fp);
2632 }
2633 
2634 /*
2635  * IP reassembly timer processing
2636  */
2637 static void
frag_timeout(void * arg)2638 frag_timeout(void *arg)
2639 {
2640 #pragma unused(arg)
2641 	struct ipq *fp;
2642 	int i;
2643 
2644 	/*
2645 	 * Update coarse-grained networking timestamp (in sec.); the idea
2646 	 * is to piggy-back on the timeout callout to update the counter
2647 	 * returnable via net_uptime().
2648 	 */
2649 	net_update_uptime();
2650 
2651 	lck_mtx_lock(&ipqlock);
2652 	for (i = 0; i < IPREASS_NHASH; i++) {
2653 		for (fp = TAILQ_FIRST(&ipq[i]); fp;) {
2654 			struct ipq *fpp;
2655 
2656 			fpp = fp;
2657 			fp = TAILQ_NEXT(fp, ipq_list);
2658 			if (--fpp->ipq_ttl == 0) {
2659 				ipstat.ips_fragtimeout += fpp->ipq_nfrags;
2660 				frag_freef(&ipq[i], fpp);
2661 			}
2662 		}
2663 	}
2664 	/*
2665 	 * If we are over the maximum number of fragments
2666 	 * (due to the limit being lowered), drain off
2667 	 * enough to get down to the new limit.
2668 	 */
2669 	if (maxnipq >= 0 && nipq > (unsigned)maxnipq) {
2670 		for (i = 0; i < IPREASS_NHASH; i++) {
2671 			while (nipq > (unsigned)maxnipq &&
2672 			    !TAILQ_EMPTY(&ipq[i])) {
2673 				ipstat.ips_fragdropped +=
2674 				    TAILQ_FIRST(&ipq[i])->ipq_nfrags;
2675 				frag_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
2676 			}
2677 		}
2678 	}
2679 	/* re-arm the purge timer if there's work to do */
2680 	frag_timeout_run = 0;
2681 	frag_sched_timeout();
2682 	lck_mtx_unlock(&ipqlock);
2683 }
2684 
2685 static void
frag_sched_timeout(void)2686 frag_sched_timeout(void)
2687 {
2688 	LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2689 
2690 	if (!frag_timeout_run && nipq > 0) {
2691 		frag_timeout_run = 1;
2692 		timeout(frag_timeout, NULL, hz);
2693 	}
2694 }
2695 
2696 /*
2697  * Drain off all datagram fragments.
2698  */
2699 static void
frag_drain(void)2700 frag_drain(void)
2701 {
2702 	int i;
2703 
2704 	lck_mtx_lock(&ipqlock);
2705 	for (i = 0; i < IPREASS_NHASH; i++) {
2706 		while (!TAILQ_EMPTY(&ipq[i])) {
2707 			ipstat.ips_fragdropped +=
2708 			    TAILQ_FIRST(&ipq[i])->ipq_nfrags;
2709 			frag_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
2710 		}
2711 	}
2712 	lck_mtx_unlock(&ipqlock);
2713 }
2714 
2715 static struct ipq *
ipq_alloc(int how)2716 ipq_alloc(int how)
2717 {
2718 	struct mbuf *t;
2719 	struct ipq *fp;
2720 
2721 	/*
2722 	 * See comments in ipq_updateparams().  Keep the count separate
2723 	 * from nipq since the latter represents the elements already
2724 	 * in the reassembly queues.
2725 	 */
2726 	if (ipq_limit > 0 && ipq_count > ipq_limit) {
2727 		return NULL;
2728 	}
2729 
2730 	t = m_get(how, MT_FTABLE);
2731 	if (t != NULL) {
2732 		atomic_add_32(&ipq_count, 1);
2733 		fp = mtod(t, struct ipq *);
2734 		bzero(fp, sizeof(*fp));
2735 	} else {
2736 		fp = NULL;
2737 	}
2738 	return fp;
2739 }
2740 
2741 static void
ipq_free(struct ipq * fp)2742 ipq_free(struct ipq *fp)
2743 {
2744 	(void) m_free(dtom(fp));
2745 	atomic_add_32(&ipq_count, -1);
2746 }
2747 
2748 /*
2749  * Drain callback
2750  */
2751 void
ip_drain(void)2752 ip_drain(void)
2753 {
2754 	frag_drain();           /* fragments */
2755 	in_rtqdrain();          /* protocol cloned routes */
2756 	in_arpdrain(NULL);      /* cloned routes: ARP */
2757 }
2758 
2759 /*
2760  * Do option processing on a datagram,
2761  * possibly discarding it if bad options are encountered,
2762  * or forwarding it if source-routed.
2763  * The pass argument is used when operating in the IPSTEALTH
2764  * mode to tell what options to process:
2765  * [LS]SRR (pass 0) or the others (pass 1).
2766  * The reason for as many as two passes is that when doing IPSTEALTH,
2767  * non-routing options should be processed only if the packet is for us.
2768  * Returns 1 if packet has been forwarded/freed,
2769  * 0 if the packet should be processed further.
2770  */
2771 static int
ip_dooptions(struct mbuf * m,int pass,struct sockaddr_in * next_hop)2772 ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop)
2773 {
2774 #pragma unused(pass)
2775 	struct ip *ip = mtod(m, struct ip *);
2776 	u_char *cp;
2777 	struct ip_timestamp *ipt;
2778 	struct in_ifaddr *ia;
2779 	int opt, optlen, cnt, off, type = ICMP_PARAMPROB, forward = 0;
2780 	uint8_t code = 0;
2781 	struct in_addr *sin, dst;
2782 	u_int32_t ntime;
2783 	struct sockaddr_in ipaddr = {
2784 		.sin_len = sizeof(ipaddr),
2785 		.sin_family = AF_INET,
2786 		.sin_port = 0,
2787 		.sin_addr = { .s_addr = 0 },
2788 		.sin_zero = { 0, }
2789 	};
2790 
2791 	/* Expect 32-bit aligned data pointer on strict-align platforms */
2792 	MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
2793 
2794 	dst = ip->ip_dst;
2795 	cp = (u_char *)(ip + 1);
2796 	cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
2797 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
2798 		opt = cp[IPOPT_OPTVAL];
2799 		if (opt == IPOPT_EOL) {
2800 			break;
2801 		}
2802 		if (opt == IPOPT_NOP) {
2803 			optlen = 1;
2804 		} else {
2805 			if (cnt < IPOPT_OLEN + sizeof(*cp)) {
2806 				code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2807 				goto bad;
2808 			}
2809 			optlen = cp[IPOPT_OLEN];
2810 			if (optlen < IPOPT_OLEN + sizeof(*cp) ||
2811 			    optlen > cnt) {
2812 				code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2813 				goto bad;
2814 			}
2815 		}
2816 		switch (opt) {
2817 		default:
2818 			break;
2819 
2820 		/*
2821 		 * Source routing with record.
2822 		 * Find interface with current destination address.
2823 		 * If none on this machine then drop if strictly routed,
2824 		 * or do nothing if loosely routed.
2825 		 * Record interface address and bring up next address
2826 		 * component.  If strictly routed make sure next
2827 		 * address is on directly accessible net.
2828 		 */
2829 		case IPOPT_LSRR:
2830 		case IPOPT_SSRR:
2831 			if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
2832 				code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2833 				goto bad;
2834 			}
2835 			if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
2836 				code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2837 				goto bad;
2838 			}
2839 			ipaddr.sin_addr = ip->ip_dst;
2840 			ia = (struct in_ifaddr *)ifa_ifwithaddr(SA(&ipaddr));
2841 			if (ia == NULL) {
2842 				if (opt == IPOPT_SSRR) {
2843 					type = ICMP_UNREACH;
2844 					code = ICMP_UNREACH_SRCFAIL;
2845 					goto bad;
2846 				}
2847 				if (!ip_dosourceroute) {
2848 					goto nosourcerouting;
2849 				}
2850 				/*
2851 				 * Loose routing, and not at next destination
2852 				 * yet; nothing to do except forward.
2853 				 */
2854 				break;
2855 			} else {
2856 				IFA_REMREF(&ia->ia_ifa);
2857 				ia = NULL;
2858 			}
2859 			off--;                  /* 0 origin */
2860 			if (off > optlen - (int)sizeof(struct in_addr)) {
2861 				/*
2862 				 * End of source route.  Should be for us.
2863 				 */
2864 				if (!ip_acceptsourceroute) {
2865 					goto nosourcerouting;
2866 				}
2867 				save_rte(cp, ip->ip_src);
2868 				break;
2869 			}
2870 
2871 			if (!ip_dosourceroute) {
2872 				if (ipforwarding) {
2873 					char buf[MAX_IPv4_STR_LEN];
2874 					char buf2[MAX_IPv4_STR_LEN];
2875 					/*
2876 					 * Acting as a router, so generate ICMP
2877 					 */
2878 nosourcerouting:
2879 					log(LOG_WARNING,
2880 					    "attempted source route from %s "
2881 					    "to %s\n",
2882 					    inet_ntop(AF_INET, &ip->ip_src,
2883 					    buf, sizeof(buf)),
2884 					    inet_ntop(AF_INET, &ip->ip_dst,
2885 					    buf2, sizeof(buf2)));
2886 					type = ICMP_UNREACH;
2887 					code = ICMP_UNREACH_SRCFAIL;
2888 					goto bad;
2889 				} else {
2890 					/*
2891 					 * Not acting as a router,
2892 					 * so silently drop.
2893 					 */
2894 					OSAddAtomic(1, &ipstat.ips_cantforward);
2895 					m_freem(m);
2896 					return 1;
2897 				}
2898 			}
2899 
2900 			/*
2901 			 * locate outgoing interface
2902 			 */
2903 			(void) memcpy(&ipaddr.sin_addr, cp + off,
2904 			    sizeof(ipaddr.sin_addr));
2905 
2906 			if (opt == IPOPT_SSRR) {
2907 #define INA     struct in_ifaddr *
2908 				if ((ia = (INA)ifa_ifwithdstaddr(
2909 					    SA(&ipaddr))) == NULL) {
2910 					ia = (INA)ifa_ifwithnet(SA(&ipaddr));
2911 				}
2912 			} else {
2913 				ia = ip_rtaddr(ipaddr.sin_addr);
2914 			}
2915 			if (ia == NULL) {
2916 				type = ICMP_UNREACH;
2917 				code = ICMP_UNREACH_SRCFAIL;
2918 				goto bad;
2919 			}
2920 			ip->ip_dst = ipaddr.sin_addr;
2921 			IFA_LOCK(&ia->ia_ifa);
2922 			(void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
2923 			    sizeof(struct in_addr));
2924 			IFA_UNLOCK(&ia->ia_ifa);
2925 			IFA_REMREF(&ia->ia_ifa);
2926 			ia = NULL;
2927 			cp[IPOPT_OFFSET] += sizeof(struct in_addr);
2928 			/*
2929 			 * Let ip_intr's mcast routing check handle mcast pkts
2930 			 */
2931 			forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
2932 			break;
2933 
2934 		case IPOPT_RR:
2935 			if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
2936 				code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2937 				goto bad;
2938 			}
2939 			if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
2940 				code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2941 				goto bad;
2942 			}
2943 			/*
2944 			 * If no space remains, ignore.
2945 			 */
2946 			off--;                  /* 0 origin */
2947 			if (off > optlen - (int)sizeof(struct in_addr)) {
2948 				break;
2949 			}
2950 			(void) memcpy(&ipaddr.sin_addr, &ip->ip_dst,
2951 			    sizeof(ipaddr.sin_addr));
2952 			/*
2953 			 * locate outgoing interface; if we're the destination,
2954 			 * use the incoming interface (should be same).
2955 			 */
2956 			if ((ia = (INA)ifa_ifwithaddr(SA(&ipaddr))) == NULL) {
2957 				if ((ia = ip_rtaddr(ipaddr.sin_addr)) == NULL) {
2958 					type = ICMP_UNREACH;
2959 					code = ICMP_UNREACH_HOST;
2960 					goto bad;
2961 				}
2962 			}
2963 			IFA_LOCK(&ia->ia_ifa);
2964 			(void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
2965 			    sizeof(struct in_addr));
2966 			IFA_UNLOCK(&ia->ia_ifa);
2967 			IFA_REMREF(&ia->ia_ifa);
2968 			ia = NULL;
2969 			cp[IPOPT_OFFSET] += sizeof(struct in_addr);
2970 			break;
2971 
2972 		case IPOPT_TS:
2973 			code = (uint8_t)(cp - (u_char *)ip);
2974 			ipt = (struct ip_timestamp *)(void *)cp;
2975 			if (ipt->ipt_len < 4 || ipt->ipt_len > 40) {
2976 				code = (uint8_t)((u_char *)&ipt->ipt_len -
2977 				    (u_char *)ip);
2978 				goto bad;
2979 			}
2980 			if (ipt->ipt_ptr < 5) {
2981 				code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2982 				    (u_char *)ip);
2983 				goto bad;
2984 			}
2985 			if (ipt->ipt_ptr >
2986 			    ipt->ipt_len - (int)sizeof(int32_t)) {
2987 				if (++ipt->ipt_oflw == 0) {
2988 					code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2989 					    (u_char *)ip);
2990 					goto bad;
2991 				}
2992 				break;
2993 			}
2994 			sin = (struct in_addr *)(void *)(cp + ipt->ipt_ptr - 1);
2995 			switch (ipt->ipt_flg) {
2996 			case IPOPT_TS_TSONLY:
2997 				break;
2998 
2999 			case IPOPT_TS_TSANDADDR:
3000 				if (ipt->ipt_ptr - 1 + sizeof(n_time) +
3001 				    sizeof(struct in_addr) > ipt->ipt_len) {
3002 					code = (uint8_t)((u_char *)&ipt->ipt_ptr -
3003 					    (u_char *)ip);
3004 					goto bad;
3005 				}
3006 				ipaddr.sin_addr = dst;
3007 				ia = (INA)ifaof_ifpforaddr(SA(&ipaddr),
3008 				    m->m_pkthdr.rcvif);
3009 				if (ia == NULL) {
3010 					continue;
3011 				}
3012 				IFA_LOCK(&ia->ia_ifa);
3013 				(void) memcpy(sin, &IA_SIN(ia)->sin_addr,
3014 				    sizeof(struct in_addr));
3015 				IFA_UNLOCK(&ia->ia_ifa);
3016 				ipt->ipt_ptr += sizeof(struct in_addr);
3017 				IFA_REMREF(&ia->ia_ifa);
3018 				ia = NULL;
3019 				break;
3020 
3021 			case IPOPT_TS_PRESPEC:
3022 				if (ipt->ipt_ptr - 1 + sizeof(n_time) +
3023 				    sizeof(struct in_addr) > ipt->ipt_len) {
3024 					code = (uint8_t)((u_char *)&ipt->ipt_ptr -
3025 					    (u_char *)ip);
3026 					goto bad;
3027 				}
3028 				(void) memcpy(&ipaddr.sin_addr, sin,
3029 				    sizeof(struct in_addr));
3030 				if ((ia = (struct in_ifaddr *)ifa_ifwithaddr(
3031 					    SA(&ipaddr))) == NULL) {
3032 					continue;
3033 				}
3034 				IFA_REMREF(&ia->ia_ifa);
3035 				ia = NULL;
3036 				ipt->ipt_ptr += sizeof(struct in_addr);
3037 				break;
3038 
3039 			default:
3040 				/* XXX can't take &ipt->ipt_flg */
3041 				code = (uint8_t)((u_char *)&ipt->ipt_ptr -
3042 				    (u_char *)ip + 1);
3043 				goto bad;
3044 			}
3045 			ntime = iptime();
3046 			(void) memcpy(cp + ipt->ipt_ptr - 1, &ntime,
3047 			    sizeof(n_time));
3048 			ipt->ipt_ptr += sizeof(n_time);
3049 		}
3050 	}
3051 	if (forward && ipforwarding) {
3052 		ip_forward(m, 1, next_hop);
3053 		return 1;
3054 	}
3055 	return 0;
3056 bad:
3057 	icmp_error(m, type, code, 0, 0);
3058 	OSAddAtomic(1, &ipstat.ips_badoptions);
3059 	return 1;
3060 }
3061 
3062 /*
3063  * Check for the presence of the IP Router Alert option [RFC2113]
3064  * in the header of an IPv4 datagram.
3065  *
3066  * This call is not intended for use from the forwarding path; it is here
3067  * so that protocol domains may check for the presence of the option.
3068  * Given how FreeBSD's IPv4 stack is currently structured, the Router Alert
3069  * option does not have much relevance to the implementation, though this
3070  * may change in future.
3071  * Router alert options SHOULD be passed if running in IPSTEALTH mode and
3072  * we are not the endpoint.
3073  * Length checks on individual options should already have been peformed
3074  * by ip_dooptions() therefore they are folded under DIAGNOSTIC here.
3075  *
3076  * Return zero if not present or options are invalid, non-zero if present.
3077  */
3078 int
ip_checkrouteralert(struct mbuf * m)3079 ip_checkrouteralert(struct mbuf *m)
3080 {
3081 	struct ip *ip = mtod(m, struct ip *);
3082 	u_char *cp;
3083 	int opt, optlen, cnt, found_ra;
3084 
3085 	found_ra = 0;
3086 	cp = (u_char *)(ip + 1);
3087 	cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
3088 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
3089 		opt = cp[IPOPT_OPTVAL];
3090 		if (opt == IPOPT_EOL) {
3091 			break;
3092 		}
3093 		if (opt == IPOPT_NOP) {
3094 			optlen = 1;
3095 		} else {
3096 #ifdef DIAGNOSTIC
3097 			if (cnt < IPOPT_OLEN + sizeof(*cp)) {
3098 				break;
3099 			}
3100 #endif
3101 			optlen = cp[IPOPT_OLEN];
3102 #ifdef DIAGNOSTIC
3103 			if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
3104 				break;
3105 			}
3106 #endif
3107 		}
3108 		switch (opt) {
3109 		case IPOPT_RA:
3110 #ifdef DIAGNOSTIC
3111 			if (optlen != IPOPT_OFFSET + sizeof(uint16_t) ||
3112 			    (*((uint16_t *)(void *)&cp[IPOPT_OFFSET]) != 0)) {
3113 				break;
3114 			} else
3115 #endif
3116 			found_ra = 1;
3117 			break;
3118 		default:
3119 			break;
3120 		}
3121 	}
3122 
3123 	return found_ra;
3124 }
3125 
3126 /*
3127  * Given address of next destination (final or next hop),
3128  * return internet address info of interface to be used to get there.
3129  */
3130 struct in_ifaddr *
ip_rtaddr(struct in_addr dst)3131 ip_rtaddr(struct in_addr dst)
3132 {
3133 	struct sockaddr_in *sin;
3134 	struct ifaddr *rt_ifa;
3135 	struct route ro;
3136 
3137 	bzero(&ro, sizeof(ro));
3138 	sin = SIN(&ro.ro_dst);
3139 	sin->sin_family = AF_INET;
3140 	sin->sin_len = sizeof(*sin);
3141 	sin->sin_addr = dst;
3142 
3143 	rtalloc_ign(&ro, RTF_PRCLONING);
3144 	if (ro.ro_rt == NULL) {
3145 		ROUTE_RELEASE(&ro);
3146 		return NULL;
3147 	}
3148 
3149 	RT_LOCK(ro.ro_rt);
3150 	if ((rt_ifa = ro.ro_rt->rt_ifa) != NULL) {
3151 		IFA_ADDREF(rt_ifa);
3152 	}
3153 	RT_UNLOCK(ro.ro_rt);
3154 	ROUTE_RELEASE(&ro);
3155 
3156 	return (struct in_ifaddr *)rt_ifa;
3157 }
3158 
3159 /*
3160  * Save incoming source route for use in replies,
3161  * to be picked up later by ip_srcroute if the receiver is interested.
3162  */
3163 void
save_rte(u_char * option,struct in_addr dst)3164 save_rte(u_char *option, struct in_addr dst)
3165 {
3166 	unsigned olen;
3167 
3168 	olen = option[IPOPT_OLEN];
3169 #if DIAGNOSTIC
3170 	if (ipprintfs) {
3171 		printf("save_rte: olen %d\n", olen);
3172 	}
3173 #endif
3174 	if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst))) {
3175 		return;
3176 	}
3177 	bcopy(option, ip_srcrt.srcopt, olen);
3178 	ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
3179 	ip_srcrt.dst = dst;
3180 }
3181 
3182 /*
3183  * Retrieve incoming source route for use in replies,
3184  * in the same form used by setsockopt.
3185  * The first hop is placed before the options, will be removed later.
3186  */
3187 struct mbuf *
ip_srcroute(void)3188 ip_srcroute(void)
3189 {
3190 	struct in_addr *p, *q;
3191 	struct mbuf *m;
3192 
3193 	if (ip_nhops == 0) {
3194 		return NULL;
3195 	}
3196 
3197 	m = m_get(M_DONTWAIT, MT_HEADER);
3198 	if (m == NULL) {
3199 		return NULL;
3200 	}
3201 
3202 #define OPTSIZ  (sizeof (ip_srcrt.nop) + sizeof (ip_srcrt.srcopt))
3203 
3204 	/* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */
3205 	m->m_len = ip_nhops * sizeof(struct in_addr) +
3206 	    sizeof(struct in_addr) + OPTSIZ;
3207 #if DIAGNOSTIC
3208 	if (ipprintfs) {
3209 		printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len);
3210 	}
3211 #endif
3212 
3213 	/*
3214 	 * First save first hop for return route
3215 	 */
3216 	p = &ip_srcrt.route[ip_nhops - 1];
3217 	*(mtod(m, struct in_addr *)) = *p--;
3218 #if DIAGNOSTIC
3219 	if (ipprintfs) {
3220 		printf(" hops %lx",
3221 		    (u_int32_t)ntohl(mtod(m, struct in_addr *)->s_addr));
3222 	}
3223 #endif
3224 
3225 	/*
3226 	 * Copy option fields and padding (nop) to mbuf.
3227 	 */
3228 	ip_srcrt.nop = IPOPT_NOP;
3229 	ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF;
3230 	(void) memcpy(mtod(m, caddr_t) + sizeof(struct in_addr),
3231 	    &ip_srcrt.nop, OPTSIZ);
3232 	q = (struct in_addr *)(void *)(mtod(m, caddr_t) +
3233 	    sizeof(struct in_addr) + OPTSIZ);
3234 #undef OPTSIZ
3235 	/*
3236 	 * Record return path as an IP source route,
3237 	 * reversing the path (pointers are now aligned).
3238 	 */
3239 	while (p >= ip_srcrt.route) {
3240 #if DIAGNOSTIC
3241 		if (ipprintfs) {
3242 			printf(" %lx", (u_int32_t)ntohl(q->s_addr));
3243 		}
3244 #endif
3245 		*q++ = *p--;
3246 	}
3247 	/*
3248 	 * Last hop goes to final destination.
3249 	 */
3250 	*q = ip_srcrt.dst;
3251 #if DIAGNOSTIC
3252 	if (ipprintfs) {
3253 		printf(" %lx\n", (u_int32_t)ntohl(q->s_addr));
3254 	}
3255 #endif
3256 	return m;
3257 }
3258 
3259 /*
3260  * Strip out IP options, at higher level protocol in the kernel.
3261  */
3262 void
ip_stripoptions(struct mbuf * m)3263 ip_stripoptions(struct mbuf *m)
3264 {
3265 	int i;
3266 	struct ip *ip = mtod(m, struct ip *);
3267 	caddr_t opts;
3268 	int olen;
3269 
3270 	/* Expect 32-bit aligned data pointer on strict-align platforms */
3271 	MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
3272 
3273 	/* use bcopy() since it supports overlapping range */
3274 	olen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
3275 	opts = (caddr_t)(ip + 1);
3276 	i = m->m_len - (sizeof(struct ip) + olen);
3277 	bcopy(opts + olen, opts, (unsigned)i);
3278 	m->m_len -= olen;
3279 	if (m->m_flags & M_PKTHDR) {
3280 		m->m_pkthdr.len -= olen;
3281 	}
3282 	ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
3283 
3284 	/*
3285 	 * We expect ip_{off,len} to be in host order by now, and
3286 	 * that the original IP header length has been subtracted
3287 	 * out from ip_len.  Temporarily adjust ip_len for checksum
3288 	 * recalculation, and restore it afterwards.
3289 	 */
3290 	ip->ip_len += sizeof(struct ip);
3291 
3292 	/* recompute checksum now that IP header is smaller */
3293 #if BYTE_ORDER != BIG_ENDIAN
3294 	HTONS(ip->ip_len);
3295 	HTONS(ip->ip_off);
3296 #endif /* BYTE_ORDER != BIG_ENDIAN */
3297 	ip->ip_sum = in_cksum_hdr(ip);
3298 #if BYTE_ORDER != BIG_ENDIAN
3299 	NTOHS(ip->ip_off);
3300 	NTOHS(ip->ip_len);
3301 #endif /* BYTE_ORDER != BIG_ENDIAN */
3302 
3303 	ip->ip_len -= sizeof(struct ip);
3304 
3305 	/*
3306 	 * Given that we've just stripped IP options from the header,
3307 	 * we need to adjust the start offset accordingly if this
3308 	 * packet had gone thru partial checksum offload.
3309 	 */
3310 	if ((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
3311 	    (CSUM_DATA_VALID | CSUM_PARTIAL)) {
3312 		if (m->m_pkthdr.csum_rx_start >= (sizeof(struct ip) + olen)) {
3313 			/* most common case */
3314 			m->m_pkthdr.csum_rx_start -= olen;
3315 		} else {
3316 			/* compute checksum in software instead */
3317 			m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
3318 			m->m_pkthdr.csum_data = 0;
3319 			ipstat.ips_adj_hwcsum_clr++;
3320 		}
3321 	}
3322 }
3323 
3324 u_char inetctlerrmap[PRC_NCMDS] = {
3325 	0, 0, 0, 0,
3326 	0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
3327 	ENETUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
3328 	EMSGSIZE, EHOSTUNREACH, 0, 0,
3329 	0, 0, EHOSTUNREACH, 0,
3330 	ENOPROTOOPT, ECONNREFUSED
3331 };
3332 
3333 static int
3334 sysctl_ipforwarding SYSCTL_HANDLER_ARGS
3335 {
3336 #pragma unused(arg1, arg2)
3337 	int i, was_ipforwarding = ipforwarding;
3338 
3339 	i = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
3340 	if (i != 0 || req->newptr == USER_ADDR_NULL) {
3341 		return i;
3342 	}
3343 
3344 	if (was_ipforwarding && !ipforwarding) {
3345 		/* clean up IPv4 forwarding cached routes */
3346 		ifnet_head_lock_shared();
3347 		for (i = 0; i <= if_index; i++) {
3348 			struct ifnet *ifp = ifindex2ifnet[i];
3349 			if (ifp != NULL) {
3350 				lck_mtx_lock(&ifp->if_cached_route_lock);
3351 				ROUTE_RELEASE(&ifp->if_fwd_route);
3352 				bzero(&ifp->if_fwd_route,
3353 				    sizeof(ifp->if_fwd_route));
3354 				lck_mtx_unlock(&ifp->if_cached_route_lock);
3355 			}
3356 		}
3357 		ifnet_head_done();
3358 	}
3359 
3360 	return 0;
3361 }
3362 
3363 /*
3364  * Similar to inp_route_{copyout,copyin} routines except that these copy
3365  * out the cached IPv4 forwarding route from struct ifnet instead of the
3366  * inpcb.  See comments for those routines for explanations.
3367  */
3368 static void
ip_fwd_route_copyout(struct ifnet * ifp,struct route * dst)3369 ip_fwd_route_copyout(struct ifnet *ifp, struct route *dst)
3370 {
3371 	struct route *src = &ifp->if_fwd_route;
3372 
3373 	lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3374 	lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3375 
3376 	/* Minor sanity check */
3377 	if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3378 		panic("%s: wrong or corrupted route: %p", __func__, src);
3379 	}
3380 
3381 	route_copyout(dst, src, sizeof(*dst));
3382 
3383 	lck_mtx_unlock(&ifp->if_cached_route_lock);
3384 }
3385 
3386 static void
ip_fwd_route_copyin(struct ifnet * ifp,struct route * src)3387 ip_fwd_route_copyin(struct ifnet *ifp, struct route *src)
3388 {
3389 	struct route *dst = &ifp->if_fwd_route;
3390 
3391 	lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3392 	lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3393 
3394 	/* Minor sanity check */
3395 	if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3396 		panic("%s: wrong or corrupted route: %p", __func__, src);
3397 	}
3398 
3399 	if (ifp->if_fwd_cacheok) {
3400 		route_copyin(src, dst, sizeof(*src));
3401 	}
3402 
3403 	lck_mtx_unlock(&ifp->if_cached_route_lock);
3404 }
3405 
3406 /*
3407  * Forward a packet.  If some error occurs return the sender
3408  * an icmp packet.  Note we can't always generate a meaningful
3409  * icmp message because icmp doesn't have a large enough repertoire
3410  * of codes and types.
3411  *
3412  * If not forwarding, just drop the packet.  This could be confusing
3413  * if ipforwarding was zero but some routing protocol was advancing
3414  * us as a gateway to somewhere.  However, we must let the routing
3415  * protocol deal with that.
3416  *
3417  * The srcrt parameter indicates whether the packet is being forwarded
3418  * via a source route.
3419  */
3420 static void
ip_forward(struct mbuf * m,int srcrt,struct sockaddr_in * next_hop)3421 ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop)
3422 {
3423 #pragma unused(next_hop)
3424 	struct ip *ip = mtod(m, struct ip *);
3425 	struct sockaddr_in *sin;
3426 	struct rtentry *rt;
3427 	struct route fwd_rt;
3428 	int error, type = 0, code = 0;
3429 	struct mbuf *mcopy;
3430 	n_long dest;
3431 	struct in_addr pkt_dst;
3432 	u_int32_t nextmtu = 0, len;
3433 	struct ip_out_args ipoa;
3434 	struct ifnet *rcvifp = m->m_pkthdr.rcvif;
3435 
3436 	bzero(&ipoa, sizeof(ipoa));
3437 	ipoa.ipoa_boundif = IFSCOPE_NONE;
3438 	ipoa.ipoa_sotc = SO_TC_UNSPEC;
3439 	ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3440 
3441 #if IPSEC
3442 	struct secpolicy *sp = NULL;
3443 	int ipsecerror;
3444 #endif /* IPSEC */
3445 #if PF
3446 	struct pf_mtag *pf_mtag;
3447 #endif /* PF */
3448 
3449 	dest = 0;
3450 	pkt_dst = ip->ip_dst;
3451 
3452 #if DIAGNOSTIC
3453 	if (ipprintfs) {
3454 		printf("forward: src %lx dst %lx ttl %x\n",
3455 		    (u_int32_t)ip->ip_src.s_addr, (u_int32_t)pkt_dst.s_addr,
3456 		    ip->ip_ttl);
3457 	}
3458 #endif
3459 
3460 	if (m->m_flags & (M_BCAST | M_MCAST) || !in_canforward(pkt_dst)) {
3461 		OSAddAtomic(1, &ipstat.ips_cantforward);
3462 		m_freem(m);
3463 		return;
3464 	}
3465 #if IPSTEALTH
3466 	if (!ipstealth) {
3467 #endif /* IPSTEALTH */
3468 	if (ip->ip_ttl <= IPTTLDEC) {
3469 		icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
3470 		    dest, 0);
3471 		return;
3472 	}
3473 #if IPSTEALTH
3474 }
3475 #endif /* IPSTEALTH */
3476 
3477 #if PF
3478 	pf_mtag = pf_find_mtag(m);
3479 	if (pf_mtag != NULL && pf_mtag->pftag_rtableid != IFSCOPE_NONE) {
3480 		ipoa.ipoa_boundif = pf_mtag->pftag_rtableid;
3481 		ipoa.ipoa_flags |= IPOAF_BOUND_IF;
3482 	}
3483 #endif /* PF */
3484 
3485 	ip_fwd_route_copyout(rcvifp, &fwd_rt);
3486 
3487 	sin = SIN(&fwd_rt.ro_dst);
3488 	if (ROUTE_UNUSABLE(&fwd_rt) || pkt_dst.s_addr != sin->sin_addr.s_addr) {
3489 		ROUTE_RELEASE(&fwd_rt);
3490 
3491 		sin->sin_family = AF_INET;
3492 		sin->sin_len = sizeof(*sin);
3493 		sin->sin_addr = pkt_dst;
3494 
3495 		rtalloc_scoped_ign(&fwd_rt, RTF_PRCLONING, ipoa.ipoa_boundif);
3496 		if (fwd_rt.ro_rt == NULL) {
3497 			icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0);
3498 			goto done;
3499 		}
3500 	}
3501 	rt = fwd_rt.ro_rt;
3502 
3503 	/*
3504 	 * Save the IP header and at most 8 bytes of the payload,
3505 	 * in case we need to generate an ICMP message to the src.
3506 	 *
3507 	 * We don't use m_copy() because it might return a reference
3508 	 * to a shared cluster. Both this function and ip_output()
3509 	 * assume exclusive access to the IP header in `m', so any
3510 	 * data in a cluster may change before we reach icmp_error().
3511 	 */
3512 	MGET(mcopy, M_DONTWAIT, m->m_type);
3513 	if (mcopy != NULL && m_dup_pkthdr(mcopy, m, M_DONTWAIT) == 0) {
3514 		mcopy->m_len = imin((IP_VHL_HL(ip->ip_vhl) << 2) + 8,
3515 		    (int)ip->ip_len);
3516 		m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
3517 	}
3518 
3519 #if IPSTEALTH
3520 	if (!ipstealth) {
3521 #endif /* IPSTEALTH */
3522 	ip->ip_ttl -= IPTTLDEC;
3523 #if IPSTEALTH
3524 }
3525 #endif /* IPSTEALTH */
3526 
3527 	/*
3528 	 * If forwarding packet using same interface that it came in on,
3529 	 * perhaps should send a redirect to sender to shortcut a hop.
3530 	 * Only send redirect if source is sending directly to us,
3531 	 * and if packet was not source routed (or has any options).
3532 	 * Also, don't send redirect if forwarding using a default route
3533 	 * or a route modified by a redirect.
3534 	 */
3535 	RT_LOCK_SPIN(rt);
3536 	if (rt->rt_ifp == m->m_pkthdr.rcvif &&
3537 	    !(rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) &&
3538 	    satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY &&
3539 	    ipsendredirects && !srcrt && rt->rt_ifa != NULL) {
3540 		struct in_ifaddr *ia = (struct in_ifaddr *)rt->rt_ifa;
3541 		u_int32_t src = ntohl(ip->ip_src.s_addr);
3542 
3543 		/* Become a regular mutex */
3544 		RT_CONVERT_LOCK(rt);
3545 		IFA_LOCK_SPIN(&ia->ia_ifa);
3546 		if ((src & ia->ia_subnetmask) == ia->ia_subnet) {
3547 			if (rt->rt_flags & RTF_GATEWAY) {
3548 				dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
3549 			} else {
3550 				dest = pkt_dst.s_addr;
3551 			}
3552 			/*
3553 			 * Router requirements says to only send
3554 			 * host redirects.
3555 			 */
3556 			type = ICMP_REDIRECT;
3557 			code = ICMP_REDIRECT_HOST;
3558 #if DIAGNOSTIC
3559 			if (ipprintfs) {
3560 				printf("redirect (%d) to %lx\n", code,
3561 				    (u_int32_t)dest);
3562 			}
3563 #endif
3564 		}
3565 		IFA_UNLOCK(&ia->ia_ifa);
3566 	}
3567 	RT_UNLOCK(rt);
3568 
3569 
3570 	/* Mark this packet as being forwarded from another interface */
3571 	m->m_pkthdr.pkt_flags |= PKTF_FORWARDED;
3572 	len = m_pktlen(m);
3573 
3574 	error = ip_output(m, NULL, &fwd_rt, IP_FORWARDING | IP_OUTARGS,
3575 	    NULL, &ipoa);
3576 
3577 	/* Refresh rt since the route could have changed while in IP */
3578 	rt = fwd_rt.ro_rt;
3579 
3580 	if (error != 0) {
3581 		OSAddAtomic(1, &ipstat.ips_cantforward);
3582 	} else {
3583 		/*
3584 		 * Increment stats on the source interface; the ones
3585 		 * for destination interface has been taken care of
3586 		 * during output above by virtue of PKTF_FORWARDED.
3587 		 */
3588 		rcvifp->if_fpackets++;
3589 		rcvifp->if_fbytes += len;
3590 
3591 		OSAddAtomic(1, &ipstat.ips_forward);
3592 		if (type != 0) {
3593 			OSAddAtomic(1, &ipstat.ips_redirectsent);
3594 		} else {
3595 			if (mcopy != NULL) {
3596 				/*
3597 				 * If we didn't have to go thru ipflow and
3598 				 * the packet was successfully consumed by
3599 				 * ip_output, the mcopy is rather a waste;
3600 				 * this could be further optimized.
3601 				 */
3602 				m_freem(mcopy);
3603 			}
3604 			goto done;
3605 		}
3606 	}
3607 	if (mcopy == NULL) {
3608 		goto done;
3609 	}
3610 
3611 	switch (error) {
3612 	case 0:                         /* forwarded, but need redirect */
3613 		/* type, code set above */
3614 		break;
3615 
3616 	case ENETUNREACH:               /* shouldn't happen, checked above */
3617 	case EHOSTUNREACH:
3618 	case ENETDOWN:
3619 	case EHOSTDOWN:
3620 	default:
3621 		type = ICMP_UNREACH;
3622 		code = ICMP_UNREACH_HOST;
3623 		break;
3624 
3625 	case EMSGSIZE:
3626 		type = ICMP_UNREACH;
3627 		code = ICMP_UNREACH_NEEDFRAG;
3628 
3629 		if (rt == NULL) {
3630 			break;
3631 		} else {
3632 			RT_LOCK_SPIN(rt);
3633 			if (rt->rt_ifp != NULL) {
3634 				nextmtu = rt->rt_ifp->if_mtu;
3635 			}
3636 			RT_UNLOCK(rt);
3637 		}
3638 #ifdef IPSEC
3639 		if (ipsec_bypass) {
3640 			break;
3641 		}
3642 
3643 		/*
3644 		 * If the packet is routed over IPsec tunnel, tell the
3645 		 * originator the tunnel MTU.
3646 		 *	tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
3647 		 * XXX quickhack!!!
3648 		 */
3649 		sp = ipsec4_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND,
3650 		    IP_FORWARDING, &ipsecerror);
3651 
3652 		if (sp == NULL) {
3653 			break;
3654 		}
3655 
3656 		/*
3657 		 * find the correct route for outer IPv4
3658 		 * header, compute tunnel MTU.
3659 		 */
3660 		nextmtu = 0;
3661 
3662 		if (sp->req != NULL &&
3663 		    sp->req->saidx.mode == IPSEC_MODE_TUNNEL) {
3664 			struct secasindex saidx;
3665 			struct secasvar *sav;
3666 			struct route *ro;
3667 			struct ip *ipm;
3668 			size_t ipsechdr;
3669 
3670 			/* count IPsec header size */
3671 			ipsechdr = ipsec_hdrsiz(sp);
3672 
3673 			ipm = mtod(mcopy, struct ip *);
3674 			bcopy(&sp->req->saidx, &saidx, sizeof(saidx));
3675 			saidx.mode = sp->req->saidx.mode;
3676 			saidx.reqid = sp->req->saidx.reqid;
3677 			sin = SIN(&saidx.src);
3678 			if (sin->sin_len == 0) {
3679 				sin->sin_len = sizeof(*sin);
3680 				sin->sin_family = AF_INET;
3681 				sin->sin_port = IPSEC_PORT_ANY;
3682 				bcopy(&ipm->ip_src, &sin->sin_addr,
3683 				    sizeof(sin->sin_addr));
3684 			}
3685 			sin = SIN(&saidx.dst);
3686 			if (sin->sin_len == 0) {
3687 				sin->sin_len = sizeof(*sin);
3688 				sin->sin_family = AF_INET;
3689 				sin->sin_port = IPSEC_PORT_ANY;
3690 				bcopy(&ipm->ip_dst, &sin->sin_addr,
3691 				    sizeof(sin->sin_addr));
3692 			}
3693 			sav = key_allocsa_policy(&saidx);
3694 			if (sav != NULL) {
3695 				lck_mtx_lock(sadb_mutex);
3696 				if (sav->sah != NULL) {
3697 					ro = (struct route *)&sav->sah->sa_route;
3698 					if (ro->ro_rt != NULL) {
3699 						RT_LOCK(ro->ro_rt);
3700 						if (ro->ro_rt->rt_ifp != NULL) {
3701 							nextmtu = ro->ro_rt->
3702 							    rt_ifp->if_mtu;
3703 							nextmtu -= ipsechdr;
3704 						}
3705 						RT_UNLOCK(ro->ro_rt);
3706 					}
3707 				}
3708 				key_freesav(sav, KEY_SADB_LOCKED);
3709 				lck_mtx_unlock(sadb_mutex);
3710 			}
3711 		}
3712 		key_freesp(sp, KEY_SADB_UNLOCKED);
3713 #endif /* IPSEC */
3714 		break;
3715 
3716 	case ENOBUFS:
3717 		/*
3718 		 * A router should not generate ICMP_SOURCEQUENCH as
3719 		 * required in RFC1812 Requirements for IP Version 4 Routers.
3720 		 * Source quench could be a big problem under DoS attacks,
3721 		 * or if the underlying interface is rate-limited.
3722 		 * Those who need source quench packets may re-enable them
3723 		 * via the net.inet.ip.sendsourcequench sysctl.
3724 		 */
3725 		if (ip_sendsourcequench == 0) {
3726 			m_freem(mcopy);
3727 			goto done;
3728 		} else {
3729 			type = ICMP_SOURCEQUENCH;
3730 			code = 0;
3731 		}
3732 		break;
3733 
3734 	case EACCES:
3735 		m_freem(mcopy);
3736 		goto done;
3737 	}
3738 
3739 	if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG) {
3740 		OSAddAtomic(1, &ipstat.ips_cantfrag);
3741 	}
3742 
3743 	icmp_error(mcopy, type, code, dest, nextmtu);
3744 done:
3745 	ip_fwd_route_copyin(rcvifp, &fwd_rt);
3746 }
3747 
3748 int
ip_savecontrol(struct inpcb * inp,struct mbuf ** mp,struct ip * ip,struct mbuf * m)3749 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
3750     struct mbuf *m)
3751 {
3752 	*mp = NULL;
3753 	if (inp->inp_socket->so_options & SO_TIMESTAMP) {
3754 		struct timeval tv;
3755 
3756 		getmicrotime(&tv);
3757 		mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof(tv),
3758 		    SCM_TIMESTAMP, SOL_SOCKET, mp);
3759 		if (*mp == NULL) {
3760 			goto no_mbufs;
3761 		}
3762 	}
3763 	if (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) {
3764 		uint64_t time;
3765 
3766 		time = mach_absolute_time();
3767 		mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
3768 		    SCM_TIMESTAMP_MONOTONIC, SOL_SOCKET, mp);
3769 		if (*mp == NULL) {
3770 			goto no_mbufs;
3771 		}
3772 	}
3773 	if (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) {
3774 		uint64_t time;
3775 
3776 		time = mach_continuous_time();
3777 		mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
3778 		    SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp);
3779 		if (*mp == NULL) {
3780 			goto no_mbufs;
3781 		}
3782 	}
3783 	if (inp->inp_socket->so_flags & SOF_RECV_TRAFFIC_CLASS) {
3784 		int tc = m_get_traffic_class(m);
3785 
3786 		mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof(tc),
3787 		    SO_TRAFFIC_CLASS, SOL_SOCKET, mp);
3788 		if (*mp == NULL) {
3789 			goto no_mbufs;
3790 		}
3791 	}
3792 	if ((inp->inp_socket->so_flags & SOF_RECV_WAKE_PKT) &&
3793 	    (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
3794 		int flag = 1;
3795 
3796 		mp = sbcreatecontrol_mbuf((caddr_t)&flag, sizeof(flag),
3797 		    SO_RECV_WAKE_PKT, SOL_SOCKET, mp);
3798 		if (*mp == NULL) {
3799 			goto no_mbufs;
3800 		}
3801 	}
3802 
3803 	if (inp->inp_flags & INP_RECVDSTADDR || SOFLOW_ENABLED(inp->inp_socket)) {
3804 		mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_dst,
3805 		    sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP, mp);
3806 		if (*mp == NULL) {
3807 			goto no_mbufs;
3808 		}
3809 	}
3810 #ifdef notyet
3811 	/*
3812 	 * XXX
3813 	 * Moving these out of udp_input() made them even more broken
3814 	 * than they already were.
3815 	 */
3816 	/* options were tossed already */
3817 	if (inp->inp_flags & INP_RECVOPTS) {
3818 		mp = sbcreatecontrol_mbuf((caddr_t)opts_deleted_above,
3819 		    sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP, mp);
3820 		if (*mp == NULL) {
3821 			goto no_mbufs;
3822 		}
3823 	}
3824 	/* ip_srcroute doesn't do what we want here, need to fix */
3825 	if (inp->inp_flags & INP_RECVRETOPTS) {
3826 		mp = sbcreatecontrol_mbuf((caddr_t)ip_srcroute(),
3827 		    sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP, mp);
3828 		if (*mp == NULL) {
3829 			goto no_mbufs;
3830 		}
3831 	}
3832 #endif /* notyet */
3833 	if (inp->inp_flags & INP_RECVIF) {
3834 		struct ifnet *ifp;
3835 		uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
3836 		struct sockaddr_dl *sdl2 = SDL(&sdlbuf);
3837 
3838 		/*
3839 		 * Make sure to accomodate the largest possible
3840 		 * size of SA(if_lladdr)->sa_len.
3841 		 */
3842 		_CASSERT(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
3843 
3844 		ifnet_head_lock_shared();
3845 		if ((ifp = m->m_pkthdr.rcvif) != NULL &&
3846 		    ifp->if_index && (ifp->if_index <= if_index)) {
3847 			struct ifaddr *ifa = ifnet_addrs[ifp->if_index - 1];
3848 			struct sockaddr_dl *sdp;
3849 
3850 			if (!ifa || !ifa->ifa_addr) {
3851 				goto makedummy;
3852 			}
3853 
3854 			IFA_LOCK_SPIN(ifa);
3855 			sdp = SDL(ifa->ifa_addr);
3856 			/*
3857 			 * Change our mind and don't try copy.
3858 			 */
3859 			if (sdp->sdl_family != AF_LINK) {
3860 				IFA_UNLOCK(ifa);
3861 				goto makedummy;
3862 			}
3863 			/* the above _CASSERT ensures sdl_len fits in sdlbuf */
3864 			bcopy(sdp, sdl2, sdp->sdl_len);
3865 			IFA_UNLOCK(ifa);
3866 		} else {
3867 makedummy:
3868 			sdl2->sdl_len =
3869 			    offsetof(struct sockaddr_dl, sdl_data[0]);
3870 			sdl2->sdl_family = AF_LINK;
3871 			sdl2->sdl_index = 0;
3872 			sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
3873 		}
3874 		ifnet_head_done();
3875 		mp = sbcreatecontrol_mbuf((caddr_t)sdl2, sdl2->sdl_len,
3876 		    IP_RECVIF, IPPROTO_IP, mp);
3877 		if (*mp == NULL) {
3878 			goto no_mbufs;
3879 		}
3880 	}
3881 	if (inp->inp_flags & INP_RECVTTL) {
3882 		mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_ttl,
3883 		    sizeof(ip->ip_ttl), IP_RECVTTL, IPPROTO_IP, mp);
3884 		if (*mp == NULL) {
3885 			goto no_mbufs;
3886 		}
3887 	}
3888 	if (inp->inp_flags & INP_PKTINFO) {
3889 		struct in_pktinfo pi;
3890 
3891 		bzero(&pi, sizeof(struct in_pktinfo));
3892 		bcopy(&ip->ip_dst, &pi.ipi_addr, sizeof(struct in_addr));
3893 		pi.ipi_ifindex = (m != NULL && m->m_pkthdr.rcvif != NULL) ?
3894 		    m->m_pkthdr.rcvif->if_index : 0;
3895 
3896 		mp = sbcreatecontrol_mbuf((caddr_t)&pi,
3897 		    sizeof(struct in_pktinfo), IP_RECVPKTINFO, IPPROTO_IP, mp);
3898 		if (*mp == NULL) {
3899 			goto no_mbufs;
3900 		}
3901 	}
3902 	if (inp->inp_flags & INP_RECVTOS) {
3903 		mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_tos,
3904 		    sizeof(u_char), IP_RECVTOS, IPPROTO_IP, mp);
3905 		if (*mp == NULL) {
3906 			goto no_mbufs;
3907 		}
3908 	}
3909 	return 0;
3910 
3911 no_mbufs:
3912 	ipstat.ips_pktdropcntrl++;
3913 	return ENOBUFS;
3914 }
3915 
3916 static inline u_short
ip_cksum(struct mbuf * m,int hlen)3917 ip_cksum(struct mbuf *m, int hlen)
3918 {
3919 	u_short sum;
3920 
3921 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3922 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3923 	} else if (!(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) &&
3924 	    !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
3925 		/*
3926 		 * The packet arrived on an interface which isn't capable
3927 		 * of performing IP header checksum; compute it now.
3928 		 */
3929 		sum = ip_cksum_hdr_in(m, hlen);
3930 	} else {
3931 		sum = 0;
3932 		m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
3933 		    CSUM_IP_CHECKED | CSUM_IP_VALID);
3934 		m->m_pkthdr.csum_data = 0xffff;
3935 	}
3936 
3937 	if (sum != 0) {
3938 		OSAddAtomic(1, &ipstat.ips_badsum);
3939 	}
3940 
3941 	return sum;
3942 }
3943 
3944 static int
3945 ip_getstat SYSCTL_HANDLER_ARGS
3946 {
3947 #pragma unused(oidp, arg1, arg2)
3948 	if (req->oldptr == USER_ADDR_NULL) {
3949 		req->oldlen = (size_t)sizeof(struct ipstat);
3950 	}
3951 
3952 	return SYSCTL_OUT(req, &ipstat, MIN(sizeof(ipstat), req->oldlen));
3953 }
3954 
3955 void
ip_setsrcifaddr_info(struct mbuf * m,uint16_t src_idx,struct in_ifaddr * ia)3956 ip_setsrcifaddr_info(struct mbuf *m, uint16_t src_idx, struct in_ifaddr *ia)
3957 {
3958 	VERIFY(m->m_flags & M_PKTHDR);
3959 
3960 	/*
3961 	 * If the source ifaddr is specified, pick up the information
3962 	 * from there; otherwise just grab the passed-in ifindex as the
3963 	 * caller may not have the ifaddr available.
3964 	 */
3965 	if (ia != NULL) {
3966 		m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3967 		m->m_pkthdr.src_ifindex = ia->ia_ifp->if_index;
3968 	} else {
3969 		m->m_pkthdr.src_ifindex = src_idx;
3970 		if (src_idx != 0) {
3971 			m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3972 		}
3973 	}
3974 }
3975 
3976 void
ip_setdstifaddr_info(struct mbuf * m,uint16_t dst_idx,struct in_ifaddr * ia)3977 ip_setdstifaddr_info(struct mbuf *m, uint16_t dst_idx, struct in_ifaddr *ia)
3978 {
3979 	VERIFY(m->m_flags & M_PKTHDR);
3980 
3981 	/*
3982 	 * If the destination ifaddr is specified, pick up the information
3983 	 * from there; otherwise just grab the passed-in ifindex as the
3984 	 * caller may not have the ifaddr available.
3985 	 */
3986 	if (ia != NULL) {
3987 		m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3988 		m->m_pkthdr.dst_ifindex = ia->ia_ifp->if_index;
3989 	} else {
3990 		m->m_pkthdr.dst_ifindex = dst_idx;
3991 		if (dst_idx != 0) {
3992 			m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3993 		}
3994 	}
3995 }
3996 
3997 int
ip_getsrcifaddr_info(struct mbuf * m,uint32_t * src_idx,uint32_t * iaf)3998 ip_getsrcifaddr_info(struct mbuf *m, uint32_t *src_idx, uint32_t *iaf)
3999 {
4000 	VERIFY(m->m_flags & M_PKTHDR);
4001 
4002 	if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
4003 		return -1;
4004 	}
4005 
4006 	if (src_idx != NULL) {
4007 		*src_idx = m->m_pkthdr.src_ifindex;
4008 	}
4009 
4010 	if (iaf != NULL) {
4011 		*iaf = 0;
4012 	}
4013 
4014 	return 0;
4015 }
4016 
4017 int
ip_getdstifaddr_info(struct mbuf * m,uint32_t * dst_idx,uint32_t * iaf)4018 ip_getdstifaddr_info(struct mbuf *m, uint32_t *dst_idx, uint32_t *iaf)
4019 {
4020 	VERIFY(m->m_flags & M_PKTHDR);
4021 
4022 	if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
4023 		return -1;
4024 	}
4025 
4026 	if (dst_idx != NULL) {
4027 		*dst_idx = m->m_pkthdr.dst_ifindex;
4028 	}
4029 
4030 	if (iaf != NULL) {
4031 		*iaf = 0;
4032 	}
4033 
4034 	return 0;
4035 }
4036 
4037 /*
4038  * Protocol input handler for IPPROTO_GRE.
4039  */
4040 void
gre_input(struct mbuf * m,int off)4041 gre_input(struct mbuf *m, int off)
4042 {
4043 	gre_input_func_t fn = gre_input_func;
4044 
4045 	/*
4046 	 * If there is a registered GRE input handler, pass mbuf to it.
4047 	 */
4048 	if (fn != NULL) {
4049 		lck_mtx_unlock(inet_domain_mutex);
4050 		m = fn(m, off, (mtod(m, struct ip *))->ip_p);
4051 		lck_mtx_lock(inet_domain_mutex);
4052 	}
4053 
4054 	/*
4055 	 * If no matching tunnel that is up is found, we inject
4056 	 * the mbuf to raw ip socket to see if anyone picks it up.
4057 	 */
4058 	if (m != NULL) {
4059 		rip_input(m, off);
4060 	}
4061 }
4062 
4063 /*
4064  * Private KPI for PPP/PPTP.
4065  */
4066 int
ip_gre_register_input(gre_input_func_t fn)4067 ip_gre_register_input(gre_input_func_t fn)
4068 {
4069 	lck_mtx_lock(inet_domain_mutex);
4070 	gre_input_func = fn;
4071 	lck_mtx_unlock(inet_domain_mutex);
4072 
4073 	return 0;
4074 }
4075 
4076 #if (DEBUG || DEVELOPMENT)
4077 static int
4078 sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS
4079 {
4080 #pragma unused(arg1, arg2)
4081 	int error, i;
4082 
4083 	i = ip_input_measure;
4084 	error = sysctl_handle_int(oidp, &i, 0, req);
4085 	if (error || req->newptr == USER_ADDR_NULL) {
4086 		goto done;
4087 	}
4088 	/* impose bounds */
4089 	if (i < 0 || i > 1) {
4090 		error = EINVAL;
4091 		goto done;
4092 	}
4093 	if (ip_input_measure != i && i == 1) {
4094 		net_perf_initialize(&net_perf, ip_input_measure_bins);
4095 	}
4096 	ip_input_measure = i;
4097 done:
4098 	return error;
4099 }
4100 
4101 static int
4102 sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS
4103 {
4104 #pragma unused(arg1, arg2)
4105 	int error;
4106 	uint64_t i;
4107 
4108 	i = ip_input_measure_bins;
4109 	error = sysctl_handle_quad(oidp, &i, 0, req);
4110 	if (error || req->newptr == USER_ADDR_NULL) {
4111 		goto done;
4112 	}
4113 	/* validate data */
4114 	if (!net_perf_validate_bins(i)) {
4115 		error = EINVAL;
4116 		goto done;
4117 	}
4118 	ip_input_measure_bins = i;
4119 done:
4120 	return error;
4121 }
4122 
4123 static int
4124 sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS
4125 {
4126 #pragma unused(oidp, arg1, arg2)
4127 	if (req->oldptr == USER_ADDR_NULL) {
4128 		req->oldlen = (size_t)sizeof(struct ipstat);
4129 	}
4130 
4131 	return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen));
4132 }
4133 #endif /* (DEBUG || DEVELOPMENT) */
4134 
4135 static int
4136 sysctl_ip_checkinterface SYSCTL_HANDLER_ARGS
4137 {
4138 #pragma unused(arg1, arg2)
4139 	int error, i;
4140 
4141 	i = ip_checkinterface;
4142 	error = sysctl_handle_int(oidp, &i, 0, req);
4143 	if (error != 0 || req->newptr == USER_ADDR_NULL) {
4144 		return error;
4145 	}
4146 
4147 	switch (i) {
4148 	case IP_CHECKINTERFACE_WEAK_ES:
4149 	case IP_CHECKINTERFACE_HYBRID_ES:
4150 	case IP_CHECKINTERFACE_STRONG_ES:
4151 		if (ip_checkinterface != i) {
4152 			ip_checkinterface = i;
4153 			os_log(OS_LOG_DEFAULT, "%s: ip_checkinterface is now %d\n",
4154 			    __func__, ip_checkinterface);
4155 		}
4156 		break;
4157 	default:
4158 		error = EINVAL;
4159 		break;
4160 	}
4161 	return error;
4162 }
4163