1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2007 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #define _IP_VHL
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/mbuf.h>
74 #include <sys/malloc.h>
75 #include <sys/domain.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/time.h>
79 #include <sys/kernel.h>
80 #include <sys/syslog.h>
81 #include <sys/sysctl.h>
82 #include <sys/mcache.h>
83 #include <sys/socketvar.h>
84 #include <sys/kdebug.h>
85 #include <mach/mach_time.h>
86 #include <mach/sdt.h>
87
88 #include <machine/endian.h>
89 #include <dev/random/randomdev.h>
90
91 #include <kern/queue.h>
92 #include <kern/locks.h>
93 #include <libkern/OSAtomic.h>
94
95 #include <pexpert/pexpert.h>
96
97 #include <net/if.h>
98 #include <net/if_var.h>
99 #include <net/if_dl.h>
100 #include <net/route.h>
101 #include <net/kpi_protocol.h>
102 #include <net/ntstat.h>
103 #include <net/dlil.h>
104 #include <net/classq/classq.h>
105 #include <net/net_perf.h>
106 #include <net/init.h>
107 #if PF
108 #include <net/pfvar.h>
109 #endif /* PF */
110 #include <net/if_ports_used.h>
111 #include <net/droptap.h>
112
113 #include <netinet/in.h>
114 #include <netinet/in_systm.h>
115 #include <netinet/in_var.h>
116 #include <netinet/in_arp.h>
117 #include <netinet/ip.h>
118 #include <netinet/in_pcb.h>
119 #include <netinet/ip_var.h>
120 #include <netinet/ip_icmp.h>
121 #include <netinet/kpi_ipfilter_var.h>
122 #include <netinet/udp.h>
123 #include <netinet/udp_var.h>
124 #include <netinet/bootp.h>
125
126 #if DUMMYNET
127 #include <netinet/ip_dummynet.h>
128 #endif /* DUMMYNET */
129
130 #if IPSEC
131 #include <netinet6/ipsec.h>
132 #include <netkey/key.h>
133 #endif /* IPSEC */
134
135 #include <net/sockaddr_utils.h>
136
137 #include <os/log.h>
138
139 extern struct inpcbinfo ripcbinfo;
140
141 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 0)
142 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 2)
143 #define DBG_FNC_IP_INPUT NETDBG_CODE(DBG_NETIP, (2 << 8))
144
145 #if IPSEC
146 extern int ipsec_bypass;
147 #endif /* IPSEC */
148
149 MBUFQ_HEAD(fq_head);
150
151 static int frag_timeout_run; /* frag timer is scheduled to run */
152 static void frag_timeout(void *);
153 static void frag_sched_timeout(void);
154
155 static struct ipq *ipq_alloc(void);
156 static void ipq_free(struct ipq *);
157 static void ipq_updateparams(void);
158 static void ip_input_second_pass(struct mbuf *, struct ifnet *,
159 int, int, struct ip_fw_in_args *);
160
161 static LCK_GRP_DECLARE(ipqlock_grp, "ipqlock");
162 static LCK_MTX_DECLARE(ipqlock, &ipqlock_grp);
163
164
165 /* Packet reassembly stuff */
166 #define IPREASS_NHASH_LOG2 6
167 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
168 #define IPREASS_HMASK (IPREASS_NHASH - 1)
169 #define IPREASS_HASH(x, y) \
170 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
171
172 /* IP fragment reassembly queues (protected by ipqlock) */
173 static TAILQ_HEAD(ipqhead, ipq) ipq[IPREASS_NHASH]; /* ip reassembly queues */
174 static int maxnipq; /* max packets in reass queues */
175 static u_int32_t maxfragsperpacket; /* max frags/packet in reass queues */
176 static u_int32_t nipq; /* # of packets in reass queues */
177 static u_int32_t ipq_limit; /* ipq allocation limit */
178 static u_int32_t ipq_count; /* current # of allocated ipq's */
179
180 static int sysctl_ipforwarding SYSCTL_HANDLER_ARGS;
181 static int sysctl_maxnipq SYSCTL_HANDLER_ARGS;
182 static int sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS;
183
184 #if (DEBUG || DEVELOPMENT)
185 static int sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS;
186 static int sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS;
187 static int sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS;
188 #endif /* (DEBUG || DEVELOPMENT) */
189
190 int ipforwarding = 0;
191 SYSCTL_PROC(_net_inet_ip, IPCTL_FORWARDING, forwarding,
192 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ipforwarding, 0,
193 sysctl_ipforwarding, "I", "Enable IP forwarding between interfaces");
194
195 static int ipsendredirects = 1; /* XXX */
196 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect,
197 CTLFLAG_RW | CTLFLAG_LOCKED, &ipsendredirects, 0,
198 "Enable sending IP redirects");
199
200 int ip_defttl = IPDEFTTL;
201 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW | CTLFLAG_LOCKED,
202 &ip_defttl, 0, "Maximum TTL on IP packets");
203
204 static int ip_dosourceroute = 0;
205 SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute,
206 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_dosourceroute, 0,
207 "Enable forwarding source routed IP packets");
208
209 static int ip_acceptsourceroute = 0;
210 SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
211 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_acceptsourceroute, 0,
212 "Enable accepting source routed IP packets");
213
214 static int ip_sendsourcequench = 0;
215 SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench,
216 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_sendsourcequench, 0,
217 "Enable the transmission of source quench packets");
218
219 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets,
220 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxnipq, 0, sysctl_maxnipq,
221 "I", "Maximum number of IPv4 fragment reassembly queue entries");
222
223 SYSCTL_UINT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD | CTLFLAG_LOCKED,
224 &nipq, 0, "Current number of IPv4 fragment reassembly queue entries");
225
226 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragsperpacket,
227 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxfragsperpacket, 0,
228 sysctl_maxfragsperpacket, "I",
229 "Maximum number of IPv4 fragments allowed per packet");
230
231 static uint32_t ip_adj_clear_hwcksum = 0;
232 SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_clear_hwcksum,
233 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_clear_hwcksum, 0,
234 "Invalidate hwcksum info when adjusting length");
235
236 static uint32_t ip_adj_partial_sum = 1;
237 SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_partial_sum,
238 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_partial_sum, 0,
239 "Perform partial sum adjustment of trailing bytes at IP layer");
240
241 /*
242 * ip_checkinterface controls the receive side of the models for multihoming
243 * that are discussed in RFC 1122.
244 *
245 * ip_checkinterface values are:
246 * IP_CHECKINTERFACE_WEAK_ES:
247 * This corresponds to the Weak End-System model where incoming packets from
248 * any interface are accepted provided the destination address of the incoming packet
249 * is assigned to some interface.
250 *
251 * IP_CHECKINTERFACE_HYBRID_ES:
252 * The Hybrid End-System model use the Strong End-System for tunnel interfaces
253 * (ipsec and utun) and the weak End-System model for other interfaces families.
254 * This prevents a rogue middle box to probe for signs of TCP connections
255 * that use the tunnel interface.
256 *
257 * IP_CHECKINTERFACE_STRONG_ES:
258 * The Strong model model requires the packet arrived on an interface that
259 * is assigned the destination address of the packet.
260 *
261 * Since the routing table and transmit implementation do not implement the Strong ES model,
262 * setting this to a value different from IP_CHECKINTERFACE_WEAK_ES may lead to unexpected results.
263 *
264 * When forwarding is enabled, the system reverts to the Weak ES model as a router
265 * is expected by design to receive packets from several interfaces to the same address.
266 *
267 * XXX - ip_checkinterface currently must be set to IP_CHECKINTERFACE_WEAK_ES if you use ipnat
268 * to translate the destination address to another local interface.
269 *
270 * XXX - ip_checkinterface must be set to IP_CHECKINTERFACE_WEAK_ES if you add IP aliases
271 * to the loopback interface instead of the interface where the
272 * packets for those addresses are received.
273 */
274 #define IP_CHECKINTERFACE_WEAK_ES 0
275 #define IP_CHECKINTERFACE_HYBRID_ES 1
276 #define IP_CHECKINTERFACE_STRONG_ES 2
277
278 static int ip_checkinterface = IP_CHECKINTERFACE_HYBRID_ES;
279
280 static int sysctl_ip_checkinterface SYSCTL_HANDLER_ARGS;
281 SYSCTL_PROC(_net_inet_ip, OID_AUTO, check_interface,
282 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
283 0, 0, sysctl_ip_checkinterface, "I", "Verify packet arrives on correct interface");
284
285 #if (DEBUG || DEVELOPMENT)
286 #define IP_CHECK_IF_DEBUG 1
287 #else
288 #define IP_CHECK_IF_DEBUG 0
289 #endif /* (DEBUG || DEVELOPMENT) */
290 static int ip_checkinterface_debug = IP_CHECK_IF_DEBUG;
291 SYSCTL_INT(_net_inet_ip, OID_AUTO, checkinterface_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
292 &ip_checkinterface_debug, IP_CHECK_IF_DEBUG, "");
293
294 static int ip_chainsz = 6;
295 SYSCTL_INT(_net_inet_ip, OID_AUTO, rx_chainsz, CTLFLAG_RW | CTLFLAG_LOCKED,
296 &ip_chainsz, 1, "IP receive side max chaining");
297
298 #if (DEBUG || DEVELOPMENT)
299 static int ip_input_measure = 0;
300 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf,
301 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
302 &ip_input_measure, 0, sysctl_reset_ip_input_stats, "I", "Do time measurement");
303
304 static uint64_t ip_input_measure_bins = 0;
305 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_bins,
306 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_input_measure_bins, 0,
307 sysctl_ip_input_measure_bins, "I",
308 "bins for chaining performance data histogram");
309
310 static net_perf_t net_perf;
311 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_data,
312 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
313 0, 0, sysctl_ip_input_getperf, "S,net_perf",
314 "IP input performance data (struct net_perf, net/net_perf.h)");
315 #endif /* (DEBUG || DEVELOPMENT) */
316
317 #if DIAGNOSTIC
318 static int ipprintfs = 0;
319 #endif
320
321 struct protosw *ip_protox[IPPROTO_MAX];
322
323 static LCK_GRP_DECLARE(in_ifaddr_rwlock_grp, "in_ifaddr_rwlock");
324 LCK_RW_DECLARE(in_ifaddr_rwlock, &in_ifaddr_rwlock_grp);
325
326 #define INADDR_NHASH 61
327 static uint32_t inaddr_nhash; /* hash table size */
328 static uint32_t inaddr_hashp; /* next largest prime */
329
330 /* Protected by in_ifaddr_rwlock */
331 struct in_ifaddrhead in_ifaddrhead; /* first inet address */
332 static struct in_ifaddrhashhead *__counted_by(inaddr_nhash) in_ifaddrhashtbl = NULL; /* inet addr hash table */
333
334 static int ip_getstat SYSCTL_HANDLER_ARGS;
335 struct ipstat ipstat;
336 SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats,
337 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
338 0, 0, ip_getstat, "S,ipstat",
339 "IP statistics (struct ipstat, netinet/ip_var.h)");
340
341 #if IPCTL_DEFMTU
342 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW | CTLFLAG_LOCKED,
343 &ip_mtu, 0, "Default MTU");
344 #endif /* IPCTL_DEFMTU */
345
346 #if IPSTEALTH
347 static int ipstealth = 0;
348 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW | CTLFLAG_LOCKED,
349 &ipstealth, 0, "");
350 #endif /* IPSTEALTH */
351
352 #if DUMMYNET
353 ip_dn_io_t *ip_dn_io_ptr;
354 #endif /* DUMMYNET */
355
356 SYSCTL_NODE(_net_inet_ip, OID_AUTO, linklocal,
357 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local");
358
359 struct ip_linklocal_stat ip_linklocal_stat;
360 SYSCTL_STRUCT(_net_inet_ip_linklocal, OID_AUTO, stat,
361 CTLFLAG_RD | CTLFLAG_LOCKED, &ip_linklocal_stat, ip_linklocal_stat,
362 "Number of link local packets with TTL less than 255");
363
364 SYSCTL_NODE(_net_inet_ip_linklocal, OID_AUTO, in,
365 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local input");
366
367 int ip_linklocal_in_allowbadttl = 1;
368 SYSCTL_INT(_net_inet_ip_linklocal_in, OID_AUTO, allowbadttl,
369 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_linklocal_in_allowbadttl, 0,
370 "Allow incoming link local packets with TTL less than 255");
371
372
373 /*
374 * We need to save the IP options in case a protocol wants to respond
375 * to an incoming packet over the same route if the packet got here
376 * using IP source routing. This allows connection establishment and
377 * maintenance when the remote end is on a network that is not known
378 * to us.
379 */
380 static int ip_nhops = 0;
381 static struct ip_srcrt {
382 struct in_addr dst; /* final destination */
383 char nop; /* one NOP to align */
384 char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */
385 struct in_addr route[MAX_IPOPTLEN / sizeof(struct in_addr)];
386 } ip_srcrt;
387
388 static void in_ifaddrhashtbl_init(void);
389 static void save_rte(u_char *__indexable, struct in_addr);
390 static int ip_dooptions(struct mbuf *, int, struct sockaddr_in *);
391 static void ip_forward(struct mbuf *, int, struct sockaddr_in *);
392 static void frag_freef(struct ipqhead *, struct ipq *, drop_reason_t);
393 static struct mbuf *ip_reass(struct mbuf *);
394 static void ip_fwd_route_copyout(struct ifnet *, struct route *);
395 static void ip_fwd_route_copyin(struct ifnet *, struct route *);
396 static inline u_short ip_cksum(struct mbuf *, int);
397
398 /*
399 * On platforms which require strict alignment (currently for anything but
400 * i386 or x86_64 or arm64), check if the IP header pointer is 32-bit aligned; if not,
401 * copy the contents of the mbuf chain into a new chain, and free the original
402 * one. Create some head room in the first mbuf of the new chain, in case
403 * it's needed later on.
404 */
405 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
406 #define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0)
407 #else /* !__i386__ && !__x86_64__ && !__arm64__ */
408 #define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { \
409 if (!IP_HDR_ALIGNED_P(mtod(_m, caddr_t))) { \
410 struct mbuf *_n; \
411 struct ifnet *__ifp = (_ifp); \
412 os_atomic_inc(&(__ifp)->if_alignerrs, relaxed); \
413 if (((_m)->m_flags & M_PKTHDR) && \
414 (_m)->m_pkthdr.pkt_hdr != NULL) \
415 (_m)->m_pkthdr.pkt_hdr = NULL; \
416 _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT); \
417 if (_n == NULL) { \
418 os_atomic_inc(&ipstat.ips_toosmall, relaxed); \
419 m_drop(_m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_TOO_SMALL, NULL, 0);\
420 (_m) = NULL; \
421 _action; \
422 } else { \
423 VERIFY(_n != (_m)); \
424 (_m) = _n; \
425 } \
426 } \
427 } while (0)
428 #endif /* !__i386__ && !__x86_64__ && !__arm64__ */
429
430
431 typedef enum ip_check_if_result {
432 IP_CHECK_IF_NONE = 0,
433 IP_CHECK_IF_OURS = 1,
434 IP_CHECK_IF_DROP = 2,
435 IP_CHECK_IF_FORWARD = 3
436 } ip_check_if_result_t;
437
438 static ip_check_if_result_t ip_input_check_interface(struct mbuf **, struct ip *, struct ifnet *);
439
440 /*
441 * GRE input handler function, settable via ip_gre_register_input() for PPTP.
442 */
443 static gre_input_func_t gre_input_func;
444
445 static void
ip_init_delayed(void)446 ip_init_delayed(void)
447 {
448 struct ifreq ifr;
449 int error;
450 struct sockaddr_in *__single sin;
451
452 bzero(&ifr, sizeof(ifr));
453 strlcpy(ifr.ifr_name, "lo0", sizeof(ifr.ifr_name));
454 sin = SIN(&ifr.ifr_addr);
455 sin->sin_len = sizeof(struct sockaddr_in);
456 sin->sin_family = AF_INET;
457 sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
458 error = in_control(NULL, SIOCSIFADDR, (caddr_t)&ifr, lo_ifp, kernproc);
459 if (error) {
460 printf("%s: failed to initialise lo0's address, error=%d\n",
461 __func__, error);
462 }
463 }
464
465 /*
466 * IP initialization: fill in IP protocol switch table.
467 * All protocols not implemented in kernel go to raw IP protocol handler.
468 */
469 void
ip_init(struct protosw * pp,struct domain * dp)470 ip_init(struct protosw *pp, struct domain *dp)
471 {
472 static int ip_initialized = 0;
473 struct protosw *__single pr;
474 struct timeval tv;
475 int i;
476
477 domain_proto_mtx_lock_assert_held();
478 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
479
480 /*
481 * Some ioctls (e.g. SIOCAIFADDR) use ifaliasreq struct, which is
482 * interchangeable with in_aliasreq; they must have the same size.
483 */
484 static_assert(sizeof(struct ifaliasreq) == sizeof(struct in_aliasreq));
485
486 if (!os_atomic_cmpxchg(&ip_initialized, 0, 1, relaxed)) {
487 return;
488 }
489
490 TAILQ_INIT(&in_ifaddrhead);
491 in_ifaddrhashtbl_init();
492
493 ip_moptions_init();
494
495 pr = pffindproto_locked(PF_INET, IPPROTO_RAW, SOCK_RAW);
496 if (pr == NULL) {
497 panic("%s: Unable to find [PF_INET,IPPROTO_RAW,SOCK_RAW]",
498 __func__);
499 /* NOTREACHED */
500 }
501
502 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
503 for (i = 0; i < IPPROTO_MAX; i++) {
504 ip_protox[i] = pr;
505 }
506 /*
507 * Cycle through IP protocols and put them into the appropriate place
508 * in ip_protox[], skipping protocols IPPROTO_{IP,RAW}.
509 */
510 VERIFY(dp == inetdomain && dp->dom_family == PF_INET);
511 TAILQ_FOREACH(pr, &dp->dom_protosw, pr_entry) {
512 VERIFY(pr->pr_domain == dp);
513 if (pr->pr_protocol != 0 && pr->pr_protocol != IPPROTO_RAW) {
514 /* Be careful to only index valid IP protocols. */
515 if (pr->pr_protocol < IPPROTO_MAX) {
516 ip_protox[pr->pr_protocol] = pr;
517 }
518 }
519 }
520
521 lck_mtx_lock(&ipqlock);
522 /* Initialize IP reassembly queue. */
523 for (i = 0; i < IPREASS_NHASH; i++) {
524 TAILQ_INIT(&ipq[i]);
525 }
526
527 maxnipq = 8192;
528 maxfragsperpacket = 128; /* enough for 64k in 512 byte fragments */
529 ipq_updateparams();
530 lck_mtx_unlock(&ipqlock);
531
532 getmicrotime(&tv);
533 ip_id = (u_short)(RandomULong() ^ tv.tv_usec);
534
535 PE_parse_boot_argn("ip_checkinterface", &i, sizeof(i));
536 switch (i) {
537 case IP_CHECKINTERFACE_WEAK_ES:
538 case IP_CHECKINTERFACE_HYBRID_ES:
539 case IP_CHECKINTERFACE_STRONG_ES:
540 ip_checkinterface = i;
541 break;
542 default:
543 break;
544 }
545
546 net_init_add(ip_init_delayed);
547 }
548
549 /*
550 * Initialize IPv4 source address hash table.
551 */
552 static void
in_ifaddrhashtbl_init(void)553 in_ifaddrhashtbl_init(void)
554 {
555 int i, k, p;
556 uint32_t nhash = 0;
557 uint32_t hash_size;
558
559 if (in_ifaddrhashtbl != NULL) {
560 return;
561 }
562
563 PE_parse_boot_argn("inaddr_nhash", &nhash,
564 sizeof(inaddr_nhash));
565 if (nhash == 0) {
566 nhash = INADDR_NHASH;
567 }
568
569 hash_size = nhash * sizeof(*in_ifaddrhashtbl);
570
571 in_ifaddrhashtbl = zalloc_permanent(
572 hash_size,
573 ZALIGN_PTR);
574 inaddr_nhash = nhash;
575
576 /*
577 * Generate the next largest prime greater than inaddr_nhash.
578 */
579 k = (inaddr_nhash % 2 == 0) ? inaddr_nhash + 1 : inaddr_nhash + 2;
580 for (;;) {
581 p = 1;
582 for (i = 3; i * i <= k; i += 2) {
583 if (k % i == 0) {
584 p = 0;
585 }
586 }
587 if (p == 1) {
588 break;
589 }
590 k += 2;
591 }
592 inaddr_hashp = k;
593 }
594
595 uint32_t
inaddr_hashval(uint32_t key)596 inaddr_hashval(uint32_t key)
597 {
598 /*
599 * The hash index is the computed prime times the key modulo
600 * the hash size, as documented in "Introduction to Algorithms"
601 * (Cormen, Leiserson, Rivest).
602 */
603 if (inaddr_nhash > 1) {
604 return (key * inaddr_hashp) % inaddr_nhash;
605 } else {
606 return 0;
607 }
608 }
609
610 struct in_ifaddrhashhead *
inaddr_hashlookup(uint32_t key)611 inaddr_hashlookup(uint32_t key)
612 {
613 return &in_ifaddrhashtbl[inaddr_hashval(key)];
614 }
615
616 static void
ip_proto_process_wake_packet(struct mbuf * m)617 ip_proto_process_wake_packet(struct mbuf *m)
618 {
619 struct ifnet *ifp = m->m_pkthdr.rcvif;
620
621 if (if_is_lpw_enabled(ifp)) {
622 if_exit_lpw(ifp, "IP packet");
623 }
624 }
625
626 __private_extern__ void
ip_proto_dispatch_in(struct mbuf * m,int hlen,u_int8_t proto,ipfilter_t inject_ipfref)627 ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto,
628 ipfilter_t inject_ipfref)
629 {
630 struct ipfilter *__single filter;
631 int seen = (inject_ipfref == NULL);
632 int changed_header = 0;
633 struct ip *ip;
634 void (*pr_input)(struct mbuf *, int len);
635
636 if (!TAILQ_EMPTY(&ipv4_filters)) {
637 ipf_ref();
638 TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
639 if (seen == 0) {
640 if ((struct ipfilter *)inject_ipfref == filter) {
641 seen = 1;
642 }
643 } else if (filter->ipf_filter.ipf_input) {
644 errno_t result;
645
646 if (changed_header == 0) {
647 /*
648 * Perform IP header alignment fixup,
649 * if needed, before passing packet
650 * into filter(s).
651 */
652 IP_HDR_ALIGNMENT_FIXUP(m,
653 m->m_pkthdr.rcvif, ipf_unref());
654
655 /* ipf_unref() already called */
656 if (m == NULL) {
657 return;
658 }
659
660 changed_header = 1;
661 ip = mtod(m, struct ip *);
662 ip->ip_len = htons(ip->ip_len + (uint16_t)hlen);
663 ip->ip_off = htons(ip->ip_off);
664 ip->ip_sum = 0;
665 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
666 }
667 result = filter->ipf_filter.ipf_input(
668 filter->ipf_filter.cookie, (mbuf_t *)&m,
669 hlen, proto);
670 if (result == EJUSTRETURN) {
671 ipf_unref();
672 return;
673 }
674 if (result != 0) {
675 ipstat.ips_input_ipf_drop++;
676 ipf_unref();
677 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_FILTER_DROP,
678 NULL, 0);
679 return;
680 }
681 }
682 }
683 ipf_unref();
684 }
685
686 /* Perform IP header alignment fixup (post-filters), if needed */
687 IP_HDR_ALIGNMENT_FIXUP(m, m->m_pkthdr.rcvif, return );
688
689 ip = mtod(m, struct ip *);
690
691 if (changed_header) {
692 ip->ip_len = ntohs(ip->ip_len) - (u_short)hlen;
693 ip->ip_off = ntohs(ip->ip_off);
694 }
695
696 /*
697 * Check if need to switch to full wake mode -- TCP knows about idle connections
698 */
699 if (__improbable(ip->ip_p != IPPROTO_TCP && (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) != 0)) {
700 ip_proto_process_wake_packet(m);
701 }
702
703 /*
704 * If there isn't a specific lock for the protocol
705 * we're about to call, use the generic lock for AF_INET.
706 * otherwise let the protocol deal with its own locking
707 */
708 if ((pr_input = ip_protox[ip->ip_p]->pr_input) == NULL) {
709 ipstat.ips_input_no_proto++;
710 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_NO_PROTO,
711 NULL, 0);
712 } else if (!(ip_protox[ip->ip_p]->pr_flags & PR_PROTOLOCK)) {
713 lck_mtx_lock(inet_domain_mutex);
714 pr_input(m, hlen);
715 lck_mtx_unlock(inet_domain_mutex);
716 } else {
717 pr_input(m, hlen);
718 }
719 }
720
721 struct pktchain_elm {
722 struct mbuf *pkte_head;
723 struct mbuf *pkte_tail;
724 struct in_addr pkte_saddr;
725 struct in_addr pkte_daddr;
726 uint16_t pkte_npkts;
727 uint16_t pkte_proto;
728 uint32_t pkte_nbytes;
729 };
730
731 typedef struct pktchain_elm pktchain_elm_t;
732
733 /* Store upto PKTTBL_SZ unique flows on the stack */
734 #define PKTTBL_SZ 7
735
736 static struct mbuf *
ip_chain_insert(struct mbuf * packet,pktchain_elm_t * __counted_by (PKTTBL_SZ)tbl)737 ip_chain_insert(struct mbuf *packet, pktchain_elm_t *__counted_by(PKTTBL_SZ) tbl)
738 {
739 struct ip* ip;
740 int pkttbl_idx = 0;
741
742 ip = mtod(packet, struct ip*);
743
744 /* reusing the hash function from inaddr_hashval */
745 pkttbl_idx = inaddr_hashval(ntohl(ip->ip_src.s_addr)) % PKTTBL_SZ;
746 if (tbl[pkttbl_idx].pkte_head == NULL) {
747 tbl[pkttbl_idx].pkte_head = packet;
748 tbl[pkttbl_idx].pkte_saddr.s_addr = ip->ip_src.s_addr;
749 tbl[pkttbl_idx].pkte_daddr.s_addr = ip->ip_dst.s_addr;
750 tbl[pkttbl_idx].pkte_proto = ip->ip_p;
751 } else {
752 if ((ip->ip_dst.s_addr == tbl[pkttbl_idx].pkte_daddr.s_addr) &&
753 (ip->ip_src.s_addr == tbl[pkttbl_idx].pkte_saddr.s_addr) &&
754 (ip->ip_p == tbl[pkttbl_idx].pkte_proto)) {
755 } else {
756 return packet;
757 }
758 }
759 if (tbl[pkttbl_idx].pkte_tail != NULL) {
760 mbuf_setnextpkt(tbl[pkttbl_idx].pkte_tail, packet);
761 }
762
763 tbl[pkttbl_idx].pkte_tail = packet;
764 tbl[pkttbl_idx].pkte_npkts += 1;
765 tbl[pkttbl_idx].pkte_nbytes += packet->m_pkthdr.len;
766 return NULL;
767 }
768
769 /* args is a dummy variable here for backward compatibility */
770 static void
ip_input_second_pass_loop_tbl(pktchain_elm_t * __counted_by (PKTTBL_SZ)tbl,struct ip_fw_in_args * args)771 ip_input_second_pass_loop_tbl(pktchain_elm_t *__counted_by(PKTTBL_SZ) tbl, struct ip_fw_in_args *args)
772 {
773 int i = 0;
774
775 for (i = 0; i < PKTTBL_SZ; i++) {
776 if (tbl[i].pkte_head != NULL) {
777 struct mbuf *m = tbl[i].pkte_head;
778 ip_input_second_pass(m, m->m_pkthdr.rcvif,
779 tbl[i].pkte_npkts, tbl[i].pkte_nbytes, args);
780
781 if (tbl[i].pkte_npkts > 2) {
782 ipstat.ips_rxc_chainsz_gt2++;
783 }
784 if (tbl[i].pkte_npkts > 4) {
785 ipstat.ips_rxc_chainsz_gt4++;
786 }
787 #if (DEBUG || DEVELOPMENT)
788 if (ip_input_measure) {
789 net_perf_histogram(&net_perf, tbl[i].pkte_npkts);
790 }
791 #endif /* (DEBUG || DEVELOPMENT) */
792 tbl[i].pkte_head = tbl[i].pkte_tail = NULL;
793 tbl[i].pkte_npkts = 0;
794 tbl[i].pkte_nbytes = 0;
795 /* no need to initialize address and protocol in tbl */
796 }
797 }
798 }
799
800 static void
ip_input_cpout_args(struct ip_fw_in_args * args,struct ip_fw_args * args1,boolean_t * done_init)801 ip_input_cpout_args(struct ip_fw_in_args *args, struct ip_fw_args *args1,
802 boolean_t *done_init)
803 {
804 if (*done_init == FALSE) {
805 bzero(args1, sizeof(struct ip_fw_args));
806 *done_init = TRUE;
807 }
808 args1->fwa_pf_rule = args->fwai_pf_rule;
809 }
810
811 static void
ip_input_cpin_args(struct ip_fw_args * args1,struct ip_fw_in_args * args)812 ip_input_cpin_args(struct ip_fw_args *args1, struct ip_fw_in_args *args)
813 {
814 args->fwai_pf_rule = args1->fwa_pf_rule;
815 }
816
817 typedef enum {
818 IPINPUT_DOCHAIN = 0,
819 IPINPUT_DONTCHAIN,
820 IPINPUT_FREED,
821 IPINPUT_DONE
822 } ipinput_chain_ret_t;
823
824 static void
ip_input_update_nstat(struct ifnet * ifp,struct in_addr src_ip,u_int32_t packets,u_int32_t bytes)825 ip_input_update_nstat(struct ifnet *ifp, struct in_addr src_ip,
826 u_int32_t packets, u_int32_t bytes)
827 {
828 if (nstat_collect) {
829 struct rtentry *rt = ifnet_cached_rtlookup_inet(ifp,
830 src_ip);
831 if (rt != NULL) {
832 nstat_route_rx(rt, packets, bytes, 0);
833 rtfree(rt);
834 }
835 }
836 }
837
838 static void
ip_input_dispatch_chain(struct mbuf * m)839 ip_input_dispatch_chain(struct mbuf *m)
840 {
841 mbuf_ref_t tmp_mbuf = m;
842 mbuf_ref_t nxt_mbuf = NULL;
843 struct ip *__single ip = NULL;
844 unsigned int hlen;
845
846 ip = mtod(tmp_mbuf, struct ip *);
847 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
848 while (tmp_mbuf != NULL) {
849 nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
850 mbuf_setnextpkt(tmp_mbuf, NULL);
851 ip_proto_dispatch_in(tmp_mbuf, hlen, ip->ip_p, 0);
852 tmp_mbuf = nxt_mbuf;
853 if (tmp_mbuf) {
854 ip = mtod(tmp_mbuf, struct ip *);
855 /* first mbuf of chain already has adjusted ip_len */
856 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
857 ip->ip_len -= hlen;
858 }
859 }
860 }
861
862 static void
ip_input_setdst_chain(struct mbuf * m,uint16_t ifindex,struct in_ifaddr * ia)863 ip_input_setdst_chain(struct mbuf *m, uint16_t ifindex, struct in_ifaddr *ia)
864 {
865 mbuf_ref_t tmp_mbuf = m;
866
867 while (tmp_mbuf != NULL) {
868 ip_setdstifaddr_info(tmp_mbuf, ifindex, ia);
869 tmp_mbuf = mbuf_nextpkt(tmp_mbuf);
870 }
871 }
872
873 static void
ip_input_adjust(struct mbuf * m,struct ip * ip,struct ifnet * inifp)874 ip_input_adjust(struct mbuf *m, struct ip *ip, struct ifnet *inifp)
875 {
876 boolean_t adjust = TRUE;
877
878 ASSERT(m_pktlen(m) > ip->ip_len);
879
880 /*
881 * Invalidate hardware checksum info if ip_adj_clear_hwcksum
882 * is set; useful to handle buggy drivers. Note that this
883 * should not be enabled by default, as we may get here due
884 * to link-layer padding.
885 */
886 if (ip_adj_clear_hwcksum &&
887 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) &&
888 !(inifp->if_flags & IFF_LOOPBACK) &&
889 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
890 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
891 m->m_pkthdr.csum_data = 0;
892 ipstat.ips_adj_hwcsum_clr++;
893 }
894
895 /*
896 * If partial checksum information is available, subtract
897 * out the partial sum of postpended extraneous bytes, and
898 * update the checksum metadata accordingly. By doing it
899 * here, the upper layer transport only needs to adjust any
900 * prepended extraneous bytes (else it will do both.)
901 */
902 if (ip_adj_partial_sum &&
903 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
904 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
905 m->m_pkthdr.csum_rx_val = m_adj_sum16(m,
906 m->m_pkthdr.csum_rx_start, m->m_pkthdr.csum_rx_start,
907 (ip->ip_len - m->m_pkthdr.csum_rx_start),
908 m->m_pkthdr.csum_rx_val);
909 } else if ((m->m_pkthdr.csum_flags &
910 (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
911 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
912 /*
913 * If packet has partial checksum info and we decided not
914 * to subtract the partial sum of postpended extraneous
915 * bytes here (not the default case), leave that work to
916 * be handled by the other layers. For now, only TCP, UDP
917 * layers are capable of dealing with this. For all other
918 * protocols (including fragments), trim and ditch the
919 * partial sum as those layers might not implement partial
920 * checksumming (or adjustment) at all.
921 */
922 if ((ip->ip_off & (IP_MF | IP_OFFMASK)) == 0 &&
923 (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_UDP)) {
924 adjust = FALSE;
925 } else {
926 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
927 m->m_pkthdr.csum_data = 0;
928 ipstat.ips_adj_hwcsum_clr++;
929 }
930 }
931
932 if (adjust) {
933 ipstat.ips_adj++;
934 if (m->m_len == m->m_pkthdr.len) {
935 m->m_len = ip->ip_len;
936 m->m_pkthdr.len = ip->ip_len;
937 } else {
938 m_adj(m, ip->ip_len - m->m_pkthdr.len);
939 }
940 }
941 }
942
943 /*
944 * First pass does all essential packet validation and places on a per flow
945 * queue for doing operations that have same outcome for all packets of a flow.
946 */
947 static ipinput_chain_ret_t
ip_input_first_pass(struct mbuf * m,struct ip_fw_in_args * args,struct mbuf ** modm)948 ip_input_first_pass(struct mbuf *m, struct ip_fw_in_args *args, struct mbuf **modm)
949 {
950 struct ip *__single ip;
951 ifnet_ref_t inifp;
952 unsigned int hlen;
953 int retval = IPINPUT_DOCHAIN;
954 int len = 0;
955 struct in_addr src_ip;
956 #if DUMMYNET
957 struct m_tag *copy;
958 struct m_tag *p;
959 boolean_t delete = FALSE;
960 struct ip_fw_args args1;
961 boolean_t init = FALSE;
962 #endif /* DUMMYNET */
963 ipfilter_t __single inject_filter_ref = NULL;
964
965 /* Check if the mbuf is still valid after interface filter processing */
966 MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
967 inifp = mbuf_pkthdr_rcvif(m);
968 VERIFY(inifp != NULL);
969
970 /* Perform IP header alignment fixup, if needed */
971 IP_HDR_ALIGNMENT_FIXUP(m, inifp, return );
972
973 m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
974
975 #if DUMMYNET
976 /*
977 * Don't bother searching for tag(s) if there's none.
978 */
979 if (SLIST_EMPTY(&m->m_pkthdr.tags)) {
980 goto ipfw_tags_done;
981 }
982
983 /* Grab info from mtags prepended to the chain */
984 p = m_tag_first(m);
985 while (p) {
986 if (p->m_tag_id == KERNEL_MODULE_TAG_ID) {
987 if (p->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET) {
988 struct dn_pkt_tag *dn_tag;
989
990 dn_tag = (struct dn_pkt_tag *)(p->m_tag_data);
991 args->fwai_pf_rule = dn_tag->dn_pf_rule;
992 delete = TRUE;
993 }
994
995 if (delete) {
996 copy = p;
997 p = m_tag_next(m, p);
998 m_tag_delete(m, copy);
999 } else {
1000 p = m_tag_next(m, p);
1001 }
1002 } else {
1003 p = m_tag_next(m, p);
1004 }
1005 }
1006
1007 #if DIAGNOSTIC
1008 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1009 panic("ip_input no HDR");
1010 }
1011 #endif
1012
1013 if (args->fwai_pf_rule) {
1014 /* dummynet already filtered us */
1015 ip = mtod(m, struct ip *);
1016 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1017 inject_filter_ref = ipf_get_inject_filter(m);
1018 if (args->fwai_pf_rule) {
1019 goto check_with_pf;
1020 }
1021 }
1022 ipfw_tags_done:
1023 #endif /* DUMMYNET */
1024
1025 /*
1026 * No need to process packet twice if we've already seen it.
1027 */
1028 if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
1029 inject_filter_ref = ipf_get_inject_filter(m);
1030 }
1031 if (inject_filter_ref != NULL) {
1032 ip = mtod(m, struct ip *);
1033 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1034
1035 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1036 struct ip *, ip, struct ifnet *, inifp,
1037 struct ip *, ip, struct ip6_hdr *, NULL);
1038
1039 ip->ip_len = ntohs(ip->ip_len) - (u_short)hlen;
1040 ip->ip_off = ntohs(ip->ip_off);
1041 ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref);
1042 return IPINPUT_DONE;
1043 }
1044
1045 if (__improbable(m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
1046 if_ports_used_match_mbuf(inifp, PF_INET, m);
1047 }
1048
1049 if (m->m_pkthdr.len < sizeof(struct ip)) {
1050 OSAddAtomic(1, &ipstat.ips_total);
1051 OSAddAtomic(1, &ipstat.ips_tooshort);
1052 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_TOO_SHORT,
1053 NULL, 0);
1054 return IPINPUT_FREED;
1055 }
1056
1057 if (m->m_len < sizeof(struct ip) &&
1058 (m = m_pullup(m, sizeof(struct ip))) == NULL) {
1059 OSAddAtomic(1, &ipstat.ips_total);
1060 OSAddAtomic(1, &ipstat.ips_toosmall);
1061 return IPINPUT_FREED;
1062 }
1063
1064 ip = mtod(m, struct ip *);
1065 *modm = m;
1066
1067 KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
1068 ip->ip_p, ip->ip_off, ip->ip_len);
1069
1070 if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
1071 OSAddAtomic(1, &ipstat.ips_total);
1072 OSAddAtomic(1, &ipstat.ips_badvers);
1073 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1074 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_BAD_VERSION,
1075 NULL, 0);
1076 return IPINPUT_FREED;
1077 }
1078
1079 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1080 if (hlen < sizeof(struct ip)) {
1081 OSAddAtomic(1, &ipstat.ips_total);
1082 OSAddAtomic(1, &ipstat.ips_badhlen);
1083 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1084 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_BAD_HDR_LENGTH,
1085 NULL, 0);
1086 return IPINPUT_FREED;
1087 }
1088
1089 if (hlen > m->m_len) {
1090 if ((m = m_pullup(m, hlen)) == NULL) {
1091 OSAddAtomic(1, &ipstat.ips_total);
1092 OSAddAtomic(1, &ipstat.ips_badhlen);
1093 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1094 return IPINPUT_FREED;
1095 }
1096 ip = mtod(m, struct ip *);
1097 *modm = m;
1098 }
1099
1100 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_ECT1) {
1101 m->m_pkthdr.pkt_ext_flags |= PKTF_EXT_L4S;
1102 }
1103
1104 /* 127/8 must not appear on wire - RFC1122 */
1105 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
1106 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
1107 /*
1108 * Allow for the following exceptions:
1109 *
1110 * 1. If the packet was sent to loopback (i.e. rcvif
1111 * would have been set earlier at output time.)
1112 *
1113 * 2. If the packet was sent out on loopback from a local
1114 * source address which belongs to a non-loopback
1115 * interface (i.e. rcvif may not necessarily be a
1116 * loopback interface, hence the test for PKTF_LOOP.)
1117 * Unlike IPv6, there is no interface scope ID, and
1118 * therefore we don't care so much about PKTF_IFINFO.
1119 */
1120 if (!(inifp->if_flags & IFF_LOOPBACK) &&
1121 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1122 OSAddAtomic(1, &ipstat.ips_total);
1123 OSAddAtomic(1, &ipstat.ips_badaddr);
1124 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1125 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_INVALID_ADDR,
1126 NULL, 0);
1127 return IPINPUT_FREED;
1128 }
1129 }
1130
1131 /* IPv4 Link-Local Addresses as defined in RFC3927 */
1132 if ((IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
1133 IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)))) {
1134 ip_linklocal_stat.iplls_in_total++;
1135 if (ip->ip_ttl != MAXTTL) {
1136 OSAddAtomic(1, &ip_linklocal_stat.iplls_in_badttl);
1137 /* Silently drop link local traffic with bad TTL */
1138 if (!ip_linklocal_in_allowbadttl) {
1139 OSAddAtomic(1, &ipstat.ips_total);
1140 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1141 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_BAD_TTL,
1142 NULL, 0);
1143 return IPINPUT_FREED;
1144 }
1145 }
1146 }
1147
1148 if (ip_cksum(m, hlen)) {
1149 OSAddAtomic(1, &ipstat.ips_total);
1150 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1151 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_BAD_CHECKSUM,
1152 NULL, 0);
1153 return IPINPUT_FREED;
1154 }
1155
1156 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1157 struct ip *, ip, struct ifnet *, inifp,
1158 struct ip *, ip, struct ip6_hdr *, NULL);
1159
1160 /*
1161 * Convert fields to host representation.
1162 */
1163 #if BYTE_ORDER != BIG_ENDIAN
1164 NTOHS(ip->ip_len);
1165 #endif
1166
1167 if (ip->ip_len < hlen) {
1168 OSAddAtomic(1, &ipstat.ips_total);
1169 OSAddAtomic(1, &ipstat.ips_badlen);
1170 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1171 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_BAD_LENGTH,
1172 NULL, 0);
1173 return IPINPUT_FREED;
1174 }
1175
1176 #if BYTE_ORDER != BIG_ENDIAN
1177 NTOHS(ip->ip_off);
1178 #endif
1179
1180 /*
1181 * Check that the amount of data in the buffers
1182 * is as at least much as the IP header would have us expect.
1183 * Trim mbufs if longer than we expect.
1184 * Drop packet if shorter than we expect.
1185 */
1186 if (m->m_pkthdr.len < ip->ip_len) {
1187 OSAddAtomic(1, &ipstat.ips_total);
1188 OSAddAtomic(1, &ipstat.ips_tooshort);
1189 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1190 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_TOO_SHORT,
1191 NULL, 0);
1192 return IPINPUT_FREED;
1193 }
1194
1195 if (m->m_pkthdr.len > ip->ip_len) {
1196 ip_input_adjust(m, ip, inifp);
1197 }
1198
1199 /* for netstat route statistics */
1200 src_ip = ip->ip_src;
1201 len = m->m_pkthdr.len;
1202
1203 #if DUMMYNET
1204 check_with_pf:
1205 #endif /* DUMMYNET */
1206 #if PF
1207 /* Invoke inbound packet filter */
1208 if (PF_IS_ENABLED) {
1209 int error;
1210 ip_input_cpout_args(args, &args1, &init);
1211 ip = mtod(m, struct ip *);
1212 src_ip = ip->ip_src;
1213
1214 #if DUMMYNET
1215 error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, &args1);
1216 #else
1217 error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, NULL);
1218 #endif /* DUMMYNET */
1219 if (error != 0 || m == NULL) {
1220 if (m != NULL) {
1221 panic("%s: unexpected packet %p",
1222 __func__, m);
1223 /* NOTREACHED */
1224 }
1225 /* Already freed by callee */
1226 ip_input_update_nstat(inifp, src_ip, 1, len);
1227 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1228 OSAddAtomic(1, &ipstat.ips_total);
1229 return IPINPUT_FREED;
1230 }
1231 ip = mtod(m, struct ip *);
1232 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1233 *modm = m;
1234 ip_input_cpin_args(&args1, args);
1235 }
1236 #endif /* PF */
1237
1238 #if IPSEC
1239 if (ipsec_bypass == 0 && ipsec_get_history_count(m)) {
1240 retval = IPINPUT_DONTCHAIN; /* XXX scope for chaining here? */
1241 goto pass;
1242 }
1243 #endif
1244
1245 #if IPSEC
1246 pass:
1247 #endif
1248 /*
1249 * Process options and, if not destined for us,
1250 * ship it on. ip_dooptions returns 1 when an
1251 * error was detected (causing an icmp message
1252 * to be sent and the original packet to be freed).
1253 */
1254 ip_nhops = 0; /* for source routed packets */
1255 if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) {
1256 src_ip = ip->ip_src;
1257 ip_input_update_nstat(inifp, src_ip, 1, len);
1258 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1259 OSAddAtomic(1, &ipstat.ips_total);
1260 return IPINPUT_FREED;
1261 }
1262
1263 /*
1264 * Don't chain fragmented packets
1265 */
1266 if (ip->ip_off & ~(IP_DF | IP_RF)) {
1267 return IPINPUT_DONTCHAIN;
1268 }
1269
1270 /* Allow DHCP/BootP responses through */
1271 if ((inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
1272 hlen == sizeof(struct ip) && ip->ip_p == IPPROTO_UDP) {
1273 struct udpiphdr *__single ui;
1274
1275 if (m->m_len < sizeof(struct udpiphdr) &&
1276 (m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) {
1277 OSAddAtomic(1, &udpstat.udps_hdrops);
1278 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1279 OSAddAtomic(1, &ipstat.ips_total);
1280 return IPINPUT_FREED;
1281 }
1282 *modm = m;
1283 ui = mtod(m, struct udpiphdr *);
1284 if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
1285 ip_setdstifaddr_info(m, inifp->if_index, NULL);
1286 return IPINPUT_DONTCHAIN;
1287 }
1288 }
1289
1290 /* Avoid chaining raw sockets as ipsec checks occur later for them */
1291 if (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR) {
1292 return IPINPUT_DONTCHAIN;
1293 }
1294
1295 return retval;
1296 }
1297
1298 /*
1299 * Because the call to m_pullup() may freem the mbuf, the function frees the mbuf packet
1300 * chain before it return IP_CHECK_IF_DROP
1301 */
1302 static ip_check_if_result_t
ip_input_check_interface(struct mbuf ** mp,struct ip * ip,struct ifnet * inifp)1303 ip_input_check_interface(struct mbuf **mp, struct ip *ip, struct ifnet *inifp)
1304 {
1305 mbuf_ref_t m = *mp;
1306 struct in_ifaddr *__single ia = NULL;
1307 struct in_ifaddr *__single best_ia = NULL;
1308 ifnet_ref_t match_ifp = NULL;
1309 ip_check_if_result_t result = IP_CHECK_IF_NONE;
1310
1311 /*
1312 * Host broadcast and all network broadcast addresses are always a match
1313 */
1314 if (ip->ip_dst.s_addr == (u_int32_t)INADDR_BROADCAST ||
1315 ip->ip_dst.s_addr == INADDR_ANY) {
1316 ip_input_setdst_chain(m, inifp->if_index, NULL);
1317 return IP_CHECK_IF_OURS;
1318 }
1319
1320 /*
1321 * Check for a match in the hash bucket.
1322 */
1323 lck_rw_lock_shared(&in_ifaddr_rwlock);
1324 TAILQ_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
1325 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr) {
1326 best_ia = ia;
1327 match_ifp = best_ia->ia_ifp;
1328
1329 if (ia->ia_ifp == inifp || (inifp->if_flags & IFF_LOOPBACK) ||
1330 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1331 /*
1332 * A locally originated packet or packet from the loopback
1333 * interface is always an exact interface address match
1334 */
1335 match_ifp = inifp;
1336 break;
1337 }
1338 /*
1339 * Continue the loop in case there's a exact match with another
1340 * interface
1341 */
1342 }
1343 }
1344 if (best_ia != NULL) {
1345 if (match_ifp != inifp && ipforwarding == 0 &&
1346 ((ip_checkinterface == IP_CHECKINTERFACE_HYBRID_ES &&
1347 (match_ifp->if_family == IFNET_FAMILY_IPSEC ||
1348 match_ifp->if_family == IFNET_FAMILY_UTUN)) ||
1349 ip_checkinterface == IP_CHECKINTERFACE_STRONG_ES)) {
1350 /*
1351 * Drop when interface address check is strict and forwarding
1352 * is disabled
1353 */
1354 result = IP_CHECK_IF_DROP;
1355 } else {
1356 result = IP_CHECK_IF_OURS;
1357 ip_input_setdst_chain(m, 0, best_ia);
1358 }
1359 }
1360 lck_rw_done(&in_ifaddr_rwlock);
1361
1362 if (result == IP_CHECK_IF_NONE && (inifp->if_flags & IFF_BROADCAST)) {
1363 /*
1364 * Check for broadcast addresses.
1365 *
1366 * Only accept broadcast packets that arrive via the matching
1367 * interface. Reception of forwarded directed broadcasts would be
1368 * handled via ip_forward() and ether_frameout() with the loopback
1369 * into the stack for SIMPLEX interfaces handled by ether_frameout().
1370 */
1371 struct ifaddr *__single ifa;
1372
1373 ifnet_lock_shared(inifp);
1374 TAILQ_FOREACH(ifa, &inifp->if_addrhead, ifa_link) {
1375 if (ifa->ifa_addr->sa_family != AF_INET) {
1376 continue;
1377 }
1378 ia = ifatoia(ifa);
1379 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == ip->ip_dst.s_addr ||
1380 ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) {
1381 ip_input_setdst_chain(m, 0, ia);
1382 result = IP_CHECK_IF_OURS;
1383 match_ifp = inifp;
1384 break;
1385 }
1386 }
1387 ifnet_lock_done(inifp);
1388 }
1389
1390 /* Allow DHCP/BootP responses through */
1391 if (result == IP_CHECK_IF_NONE && (inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
1392 ip->ip_p == IPPROTO_UDP && (IP_VHL_HL(ip->ip_vhl) << 2) == sizeof(struct ip)) {
1393 struct udpiphdr *__single ui;
1394
1395 if (m->m_len < sizeof(struct udpiphdr)) {
1396 if ((m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) {
1397 OSAddAtomic(1, &udpstat.udps_hdrops);
1398 *mp = NULL;
1399 return IP_CHECK_IF_DROP;
1400 }
1401 /*
1402 * m_pullup can return a different mbuf
1403 */
1404 *mp = m;
1405 ip = mtod(m, struct ip *);
1406 }
1407 ui = mtod(m, struct udpiphdr *);
1408 if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
1409 ip_input_setdst_chain(m, inifp->if_index, NULL);
1410 result = IP_CHECK_IF_OURS;
1411 match_ifp = inifp;
1412 }
1413 }
1414
1415 if (result == IP_CHECK_IF_NONE) {
1416 if (ipforwarding == 0) {
1417 result = IP_CHECK_IF_DROP;
1418 } else {
1419 result = IP_CHECK_IF_FORWARD;
1420 ip_input_setdst_chain(m, inifp->if_index, NULL);
1421 }
1422 }
1423
1424 if (result == IP_CHECK_IF_OURS && match_ifp != inifp) {
1425 ipstat.ips_rcv_if_weak_match++;
1426
1427 /* Logging is too noisy when forwarding is enabled */
1428 if (ip_checkinterface_debug != 0 && ipforwarding == 0) {
1429 char src_str[MAX_IPv4_STR_LEN];
1430 char dst_str[MAX_IPv4_STR_LEN];
1431
1432 inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str));
1433 inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str));
1434 os_log_info(OS_LOG_DEFAULT,
1435 "%s: weak ES interface match to %s for packet from %s to %s proto %u received via %s",
1436 __func__, best_ia->ia_ifp->if_xname, src_str, dst_str, ip->ip_p, inifp->if_xname);
1437 }
1438 } else if (result == IP_CHECK_IF_DROP) {
1439 if (ip_checkinterface_debug > 0) {
1440 char src_str[MAX_IPv4_STR_LEN];
1441 char dst_str[MAX_IPv4_STR_LEN];
1442
1443 inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str));
1444 inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str));
1445 os_log(OS_LOG_DEFAULT,
1446 "%s: no interface match for packet from %s to %s proto %u received via %s",
1447 __func__, src_str, dst_str, ip->ip_p, inifp->if_xname);
1448 }
1449 mbuf_ref_t tmp_mbuf = m;
1450 while (tmp_mbuf != NULL) {
1451 ipstat.ips_rcv_if_no_match++;
1452 tmp_mbuf = tmp_mbuf->m_nextpkt;
1453 }
1454 m_drop_list(m, NULL, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_RCV_IF_NO_MATCH, NULL, 0);
1455 *mp = NULL;
1456 }
1457
1458 return result;
1459 }
1460
1461 static void
ip_input_second_pass(struct mbuf * m,struct ifnet * inifp,int npkts_in_chain,int bytes_in_chain,struct ip_fw_in_args * args)1462 ip_input_second_pass(struct mbuf *m, struct ifnet *inifp,
1463 int npkts_in_chain, int bytes_in_chain, struct ip_fw_in_args *args)
1464 {
1465 struct mbuf *tmp_mbuf = NULL;
1466 unsigned int hlen;
1467
1468 #pragma unused (args)
1469
1470 struct ip *__single ip = mtod(m, struct ip *);
1471 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1472
1473 OSAddAtomic(npkts_in_chain, &ipstat.ips_total);
1474
1475 /*
1476 * Naively assume we can attribute inbound data to the route we would
1477 * use to send to this destination. Asymmetric routing breaks this
1478 * assumption, but it still allows us to account for traffic from
1479 * a remote node in the routing table.
1480 * this has a very significant performance impact so we bypass
1481 * if nstat_collect is disabled. We may also bypass if the
1482 * protocol is tcp in the future because tcp will have a route that
1483 * we can use to attribute the data to. That does mean we would not
1484 * account for forwarded tcp traffic.
1485 */
1486 ip_input_update_nstat(inifp, ip->ip_src, npkts_in_chain,
1487 bytes_in_chain);
1488
1489 /*
1490 * Check our list of addresses, to see if the packet is for us.
1491 * If we don't have any addresses, assume any unicast packet
1492 * we receive might be for us (and let the upper layers deal
1493 * with it).
1494 */
1495 tmp_mbuf = m;
1496 if (TAILQ_EMPTY(&in_ifaddrhead)) {
1497 while (tmp_mbuf != NULL) {
1498 if (!(tmp_mbuf->m_flags & (M_MCAST | M_BCAST))) {
1499 ip_setdstifaddr_info(tmp_mbuf, inifp->if_index,
1500 NULL);
1501 }
1502 tmp_mbuf = mbuf_nextpkt(tmp_mbuf);
1503 }
1504 goto ours;
1505 }
1506
1507 /*
1508 * Enable a consistency check between the destination address
1509 * and the arrival interface for a unicast packet (the RFC 1122
1510 * strong ES model) if IP forwarding is disabled and the packet
1511 * is not locally generated
1512 *
1513 * XXX - Checking also should be disabled if the destination
1514 * address is ipnat'ed to a different interface.
1515 *
1516 * XXX - Checking is incompatible with IP aliases added
1517 * to the loopback interface instead of the interface where
1518 * the packets are received.
1519 */
1520 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
1521 ip_check_if_result_t ip_check_if_result = IP_CHECK_IF_NONE;
1522
1523 ip_check_if_result = ip_input_check_interface(&m, ip, inifp);
1524 ASSERT(ip_check_if_result != IP_CHECK_IF_NONE);
1525 if (ip_check_if_result == IP_CHECK_IF_OURS) {
1526 goto ours;
1527 } else if (ip_check_if_result == IP_CHECK_IF_DROP) {
1528 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1529 return;
1530 }
1531 } else {
1532 struct in_multi *__single inm;
1533 /*
1534 * See if we belong to the destination multicast group on the
1535 * arrival interface.
1536 */
1537 in_multihead_lock_shared();
1538 IN_LOOKUP_MULTI(&ip->ip_dst, inifp, inm);
1539 in_multihead_lock_done();
1540 if (inm == NULL) {
1541 OSAddAtomic(npkts_in_chain, &ipstat.ips_notmember);
1542 m_drop_list(m, NULL, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_UNKNOWN_MULTICAST_GROUP,
1543 NULL, 0);
1544 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1545 return;
1546 }
1547 ip_input_setdst_chain(m, inifp->if_index, NULL);
1548 INM_REMREF(inm);
1549 goto ours;
1550 }
1551
1552 tmp_mbuf = m;
1553 struct mbuf *__single nxt_mbuf = NULL;
1554 while (tmp_mbuf != NULL) {
1555 nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
1556 /*
1557 * Not for us; forward if possible and desirable.
1558 */
1559 mbuf_setnextpkt(tmp_mbuf, NULL);
1560 if (ipforwarding == 0) {
1561 OSAddAtomic(1, &ipstat.ips_cantforward);
1562 m_drop(tmp_mbuf, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_CANNOT_FORWARD,
1563 NULL, 0);
1564 } else {
1565 ip_forward(tmp_mbuf, 0, NULL);
1566 }
1567 tmp_mbuf = nxt_mbuf;
1568 }
1569 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1570 return;
1571 ours:
1572 ip = mtod(m, struct ip *); /* in case it changed */
1573 /*
1574 * If offset is set, must reassemble.
1575 */
1576 if (ip->ip_off & ~(IP_DF | IP_RF)) {
1577 VERIFY(npkts_in_chain == 1);
1578 m = ip_reass(m);
1579 if (m == NULL) {
1580 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1581 return;
1582 }
1583 ip = mtod(m, struct ip *);
1584 /* Get the header length of the reassembled packet */
1585 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1586 }
1587
1588 /*
1589 * Further protocols expect the packet length to be w/o the
1590 * IP header.
1591 */
1592 ip->ip_len -= hlen;
1593
1594 #if IPSEC
1595 /*
1596 * enforce IPsec policy checking if we are seeing last header.
1597 * note that we do not visit this with protocols with pcb layer
1598 * code - like udp/tcp/raw ip.
1599 */
1600 if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR)) {
1601 VERIFY(npkts_in_chain == 1);
1602 if (ipsec4_in_reject(m, NULL)) {
1603 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
1604 m_drop(tmp_mbuf, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IPSEC_REJECT,
1605 NULL, 0);
1606 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1607 return;
1608 }
1609 }
1610 #endif /* IPSEC */
1611
1612 /*
1613 * Switch out to protocol's input routine.
1614 */
1615 OSAddAtomic(npkts_in_chain, &ipstat.ips_delivered);
1616
1617 ip_input_dispatch_chain(m);
1618
1619 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1620 return;
1621 }
1622
1623 void
ip_proto_input(protocol_family_t protocol,mbuf_t packet_list)1624 ip_proto_input(protocol_family_t protocol, mbuf_t packet_list)
1625 {
1626 #pragma unused(protocol)
1627 pktchain_elm_t pktchain_tbl[PKTTBL_SZ];
1628
1629 mbuf_ref_t packet = NULL;
1630 mbuf_ref_t modm = NULL; /* modified mbuf */
1631 int retval = 0;
1632 #if (DEBUG || DEVELOPMENT)
1633 struct timeval start_tv;
1634 #endif /* (DEBUG || DEVELOPMENT) */
1635 int num_pkts = 0;
1636 int chain = 0;
1637 struct ip_fw_in_args args;
1638
1639 #if (DEBUG || DEVELOPMENT)
1640 if (ip_input_measure) {
1641 net_perf_start_time(&net_perf, &start_tv);
1642 }
1643 #endif /* (DEBUG || DEVELOPMENT) */
1644
1645 bzero(&pktchain_tbl, sizeof(pktchain_tbl));
1646 restart_list_process:
1647 chain = 0;
1648 for (packet = packet_list; packet; packet = packet_list) {
1649 m_add_crumb(packet, PKT_CRUMB_IP_INPUT);
1650
1651 packet_list = mbuf_nextpkt(packet);
1652 mbuf_setnextpkt(packet, NULL);
1653
1654 num_pkts++;
1655 modm = NULL;
1656 bzero(&args, sizeof(args));
1657
1658 retval = ip_input_first_pass(packet, &args, &modm);
1659
1660 if (retval == IPINPUT_DOCHAIN) {
1661 if (modm) {
1662 packet = modm;
1663 }
1664 packet = ip_chain_insert(packet, &pktchain_tbl[0]);
1665 if (packet == NULL) {
1666 ipstat.ips_rxc_chained++;
1667 chain++;
1668 if (chain > ip_chainsz) {
1669 break;
1670 }
1671 } else {
1672 ipstat.ips_rxc_collisions++;
1673 break;
1674 }
1675 } else if (retval == IPINPUT_DONTCHAIN) {
1676 /* in order to preserve order, exit from chaining */
1677 if (modm) {
1678 packet = modm;
1679 }
1680 ipstat.ips_rxc_notchain++;
1681 break;
1682 } else {
1683 /* packet was freed or delivered, do nothing. */
1684 }
1685 }
1686
1687 /* do second pass here for pktchain_tbl */
1688 if (chain) {
1689 ip_input_second_pass_loop_tbl(&pktchain_tbl[0], &args);
1690 }
1691
1692 if (packet) {
1693 /*
1694 * equivalent update in chaining case if performed in
1695 * ip_input_second_pass_loop_tbl().
1696 */
1697 #if (DEBUG || DEVELOPMENT)
1698 if (ip_input_measure) {
1699 net_perf_histogram(&net_perf, 1);
1700 }
1701 #endif /* (DEBUG || DEVELOPMENT) */
1702 ip_input_second_pass(packet, packet->m_pkthdr.rcvif,
1703 1, packet->m_pkthdr.len, &args);
1704 }
1705
1706 if (packet_list) {
1707 goto restart_list_process;
1708 }
1709
1710 #if (DEBUG || DEVELOPMENT)
1711 if (ip_input_measure) {
1712 net_perf_measure_time(&net_perf, &start_tv, num_pkts);
1713 }
1714 #endif /* (DEBUG || DEVELOPMENT) */
1715 }
1716
1717 static void
ipq_updateparams(void)1718 ipq_updateparams(void)
1719 {
1720 LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
1721 /*
1722 * -1 for unlimited allocation.
1723 */
1724 if (maxnipq < 0) {
1725 ipq_limit = 0;
1726 }
1727 /*
1728 * Positive number for specific bound.
1729 */
1730 if (maxnipq > 0) {
1731 ipq_limit = maxnipq;
1732 }
1733 /*
1734 * Zero specifies no further fragment queue allocation -- set the
1735 * bound very low, but rely on implementation elsewhere to actually
1736 * prevent allocation and reclaim current queues.
1737 */
1738 if (maxnipq == 0) {
1739 ipq_limit = 1;
1740 }
1741 /*
1742 * Arm the purge timer if not already and if there's work to do
1743 */
1744 frag_sched_timeout();
1745 }
1746
1747 static int
1748 sysctl_maxnipq SYSCTL_HANDLER_ARGS
1749 {
1750 #pragma unused(arg1, arg2)
1751 int error, i;
1752
1753 lck_mtx_lock(&ipqlock);
1754 i = maxnipq;
1755 error = sysctl_handle_int(oidp, &i, 0, req);
1756 if (error || req->newptr == USER_ADDR_NULL) {
1757 goto done;
1758 }
1759 /* impose bounds */
1760 if (i < -1) {
1761 error = EINVAL;
1762 goto done;
1763 }
1764 maxnipq = i;
1765 ipq_updateparams();
1766 done:
1767 lck_mtx_unlock(&ipqlock);
1768 return error;
1769 }
1770
1771 static int
1772 sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS
1773 {
1774 #pragma unused(arg1, arg2)
1775 int error, i;
1776
1777 lck_mtx_lock(&ipqlock);
1778 i = maxfragsperpacket;
1779 error = sysctl_handle_int(oidp, &i, 0, req);
1780 if (error || req->newptr == USER_ADDR_NULL) {
1781 goto done;
1782 }
1783 maxfragsperpacket = i;
1784 ipq_updateparams(); /* see if we need to arm timer */
1785 done:
1786 lck_mtx_unlock(&ipqlock);
1787 return error;
1788 }
1789
1790 /*
1791 * Take incoming datagram fragment and try to reassemble it into
1792 * whole datagram. If a chain for reassembly of this datagram already
1793 * exists, then it is given as fp; otherwise have to make a chain.
1794 *
1795 * The IP header is *NOT* adjusted out of iplen (but in host byte order).
1796 */
1797 static struct mbuf *
ip_reass(struct mbuf * m)1798 ip_reass(struct mbuf *m)
1799 {
1800 struct ip *__single ip;
1801 mbuf_ref_t p, q, nq, t;
1802 struct ipq *__single fp = NULL;
1803 struct ipqhead *__single head;
1804 int i, hlen, next;
1805 u_int8_t ecn, ecn0;
1806 uint32_t csum, csum_flags;
1807 uint16_t hash;
1808 struct fq_head dfq;
1809
1810 MBUFQ_INIT(&dfq); /* for deferred frees */
1811
1812 /* If maxnipq or maxfragsperpacket is 0, never accept fragments. */
1813 if (maxnipq == 0 || maxfragsperpacket == 0) {
1814 ipstat.ips_fragments++;
1815 ipstat.ips_fragdropped++;
1816 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_FRAG_NOT_ACCEPTED,
1817 NULL, 0);
1818 if (nipq > 0) {
1819 lck_mtx_lock(&ipqlock);
1820 frag_sched_timeout(); /* purge stale fragments */
1821 lck_mtx_unlock(&ipqlock);
1822 }
1823 return NULL;
1824 }
1825
1826 ip = mtod(m, struct ip *);
1827 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1828
1829 lck_mtx_lock(&ipqlock);
1830
1831 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
1832 head = &ipq[hash];
1833
1834 /*
1835 * Look for queue of fragments
1836 * of this datagram.
1837 */
1838 TAILQ_FOREACH(fp, head, ipq_list) {
1839 if (ip->ip_id == fp->ipq_id &&
1840 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
1841 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
1842 ip->ip_p == fp->ipq_p) {
1843 goto found;
1844 }
1845 }
1846
1847 fp = NULL;
1848
1849 /*
1850 * Attempt to trim the number of allocated fragment queues if it
1851 * exceeds the administrative limit.
1852 */
1853 if ((nipq > (unsigned)maxnipq) && (maxnipq > 0)) {
1854 /*
1855 * drop something from the tail of the current queue
1856 * before proceeding further
1857 */
1858 struct ipq *__single fq = TAILQ_LAST(head, ipqhead);
1859 if (fq == NULL) { /* gak */
1860 for (i = 0; i < IPREASS_NHASH; i++) {
1861 struct ipq *__single r = TAILQ_LAST(&ipq[i], ipqhead);
1862 if (r) {
1863 ipstat.ips_fragdropped += r->ipq_nfrags;
1864 frag_freef(&ipq[i], r, DROP_REASON_IP_FRAG_TOO_MANY);
1865 break;
1866 }
1867 }
1868 } else {
1869 ipstat.ips_fragdropped += fq->ipq_nfrags;
1870 frag_freef(head, fq, DROP_REASON_IP_FRAG_TOO_MANY);
1871 }
1872 }
1873
1874 found:
1875 /*
1876 * Leverage partial checksum offload for IP fragments. Narrow down
1877 * the scope to cover only UDP without IP options, as that is the
1878 * most common case.
1879 *
1880 * Perform 1's complement adjustment of octets that got included/
1881 * excluded in the hardware-calculated checksum value. Ignore cases
1882 * where the value includes the entire IPv4 header span, as the sum
1883 * for those octets would already be 0 by the time we get here; IP
1884 * has already performed its header checksum validation. Also take
1885 * care of any trailing bytes and subtract out their partial sum.
1886 */
1887 if (ip->ip_p == IPPROTO_UDP && hlen == sizeof(struct ip) &&
1888 (m->m_pkthdr.csum_flags &
1889 (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) ==
1890 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
1891 uint32_t start = m->m_pkthdr.csum_rx_start;
1892 int32_t trailer = (m_pktlen(m) - ip->ip_len);
1893 uint32_t swbytes = (uint32_t)trailer;
1894
1895 csum = m->m_pkthdr.csum_rx_val;
1896
1897 ASSERT(trailer >= 0);
1898 if ((start != 0 && start != hlen) || trailer != 0) {
1899 uint32_t datalen = ip->ip_len - hlen;
1900
1901 #if BYTE_ORDER != BIG_ENDIAN
1902 if (start < hlen) {
1903 HTONS(ip->ip_len);
1904 HTONS(ip->ip_off);
1905 }
1906 #endif /* BYTE_ORDER != BIG_ENDIAN */
1907 /* callee folds in sum */
1908 csum = m_adj_sum16(m, start, hlen, datalen, csum);
1909 if (hlen > start) {
1910 swbytes += (hlen - start);
1911 } else {
1912 swbytes += (start - hlen);
1913 }
1914 #if BYTE_ORDER != BIG_ENDIAN
1915 if (start < hlen) {
1916 NTOHS(ip->ip_off);
1917 NTOHS(ip->ip_len);
1918 }
1919 #endif /* BYTE_ORDER != BIG_ENDIAN */
1920 }
1921 csum_flags = m->m_pkthdr.csum_flags;
1922
1923 if (swbytes != 0) {
1924 udp_in_cksum_stats(swbytes);
1925 }
1926 if (trailer != 0) {
1927 m_adj(m, -trailer);
1928 }
1929 } else {
1930 csum = 0;
1931 csum_flags = 0;
1932 }
1933
1934 /* Invalidate checksum */
1935 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
1936
1937 ipstat.ips_fragments++;
1938
1939 /*
1940 * Adjust ip_len to not reflect header,
1941 * convert offset of this to bytes.
1942 */
1943 ip->ip_len -= hlen;
1944 if (ip->ip_off & IP_MF) {
1945 /*
1946 * Make sure that fragments have a data length
1947 * that's a non-zero multiple of 8 bytes.
1948 */
1949 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
1950 OSAddAtomic(1, &ipstat.ips_toosmall);
1951 /*
1952 * Reassembly queue may have been found if previous
1953 * fragments were valid; given that this one is bad,
1954 * we need to drop it. Make sure to set fp to NULL
1955 * if not already, since we don't want to decrement
1956 * ipq_nfrags as it doesn't include this packet.
1957 */
1958 fp = NULL;
1959 goto dropfrag;
1960 }
1961 m->m_flags |= M_FRAG;
1962 } else {
1963 /* Clear the flag in case packet comes from loopback */
1964 m->m_flags &= ~M_FRAG;
1965 }
1966 ip->ip_off = (u_short)(ip->ip_off << 3);
1967
1968 m->m_pkthdr.pkt_hdr = ip;
1969
1970 /* Previous ip_reass() started here. */
1971 /*
1972 * Presence of header sizes in mbufs
1973 * would confuse code below.
1974 */
1975 m->m_data += hlen;
1976 m->m_len -= hlen;
1977
1978 /*
1979 * If first fragment to arrive, create a reassembly queue.
1980 */
1981 if (fp == NULL) {
1982 fp = ipq_alloc();
1983 if (fp == NULL) {
1984 goto dropfrag;
1985 }
1986 TAILQ_INSERT_HEAD(head, fp, ipq_list);
1987 nipq++;
1988 fp->ipq_nfrags = 1;
1989 fp->ipq_ttl = IPFRAGTTL;
1990 fp->ipq_p = ip->ip_p;
1991 fp->ipq_id = ip->ip_id;
1992 fp->ipq_src = ip->ip_src;
1993 fp->ipq_dst = ip->ip_dst;
1994 fp->ipq_frags = m;
1995 m->m_nextpkt = NULL;
1996 /*
1997 * If the first fragment has valid checksum offload
1998 * info, the rest of fragments are eligible as well.
1999 */
2000 if (csum_flags != 0) {
2001 fp->ipq_csum = csum;
2002 fp->ipq_csum_flags = csum_flags;
2003 }
2004 m = NULL; /* nothing to return */
2005 goto done;
2006 } else {
2007 fp->ipq_nfrags++;
2008 }
2009
2010 #define GETIP(m) ((struct ip *)((m)->m_pkthdr.pkt_hdr))
2011
2012 /*
2013 * Handle ECN by comparing this segment with the first one;
2014 * if CE is set, do not lose CE.
2015 * drop if CE and not-ECT are mixed for the same packet.
2016 */
2017 ecn = ip->ip_tos & IPTOS_ECN_MASK;
2018 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
2019 if (ecn == IPTOS_ECN_CE) {
2020 if (ecn0 == IPTOS_ECN_NOTECT) {
2021 goto dropfrag;
2022 }
2023 if (ecn0 != IPTOS_ECN_CE) {
2024 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
2025 }
2026 }
2027 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
2028 goto dropfrag;
2029 }
2030
2031 /*
2032 * Find a segment which begins after this one does.
2033 */
2034 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
2035 if (GETIP(q)->ip_off > ip->ip_off) {
2036 break;
2037 }
2038 }
2039
2040 /*
2041 * If there is a preceding segment, it may provide some of
2042 * our data already. If so, drop the data from the incoming
2043 * segment. If it provides all of our data, drop us, otherwise
2044 * stick new segment in the proper place.
2045 *
2046 * If some of the data is dropped from the preceding
2047 * segment, then it's checksum is invalidated.
2048 */
2049 if (p) {
2050 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
2051 if (i > 0) {
2052 if (i >= ip->ip_len) {
2053 goto dropfrag;
2054 }
2055 m_adj(m, i);
2056 fp->ipq_csum_flags = 0;
2057 ip->ip_off += i;
2058 ip->ip_len -= i;
2059 }
2060 m->m_nextpkt = p->m_nextpkt;
2061 p->m_nextpkt = m;
2062 } else {
2063 m->m_nextpkt = fp->ipq_frags;
2064 fp->ipq_frags = m;
2065 }
2066
2067 /*
2068 * While we overlap succeeding segments trim them or,
2069 * if they are completely covered, dequeue them.
2070 */
2071 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
2072 q = nq) {
2073 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
2074 if (i < GETIP(q)->ip_len) {
2075 GETIP(q)->ip_len -= i;
2076 GETIP(q)->ip_off += i;
2077 m_adj(q, i);
2078 fp->ipq_csum_flags = 0;
2079 break;
2080 }
2081 nq = q->m_nextpkt;
2082 m->m_nextpkt = nq;
2083 ipstat.ips_fragdropped++;
2084 fp->ipq_nfrags--;
2085 /* defer freeing until after lock is dropped */
2086 MBUFQ_ENQUEUE(&dfq, q);
2087 }
2088
2089 /*
2090 * If this fragment contains similar checksum offload info
2091 * as that of the existing ones, accumulate checksum. Otherwise,
2092 * invalidate checksum offload info for the entire datagram.
2093 */
2094 if (csum_flags != 0 && csum_flags == fp->ipq_csum_flags) {
2095 fp->ipq_csum += csum;
2096 } else if (fp->ipq_csum_flags != 0) {
2097 fp->ipq_csum_flags = 0;
2098 }
2099
2100
2101 /*
2102 * Check for complete reassembly and perform frag per packet
2103 * limiting.
2104 *
2105 * Frag limiting is performed here so that the nth frag has
2106 * a chance to complete the packet before we drop the packet.
2107 * As a result, n+1 frags are actually allowed per packet, but
2108 * only n will ever be stored. (n = maxfragsperpacket.)
2109 *
2110 */
2111 next = 0;
2112 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
2113 if (GETIP(q)->ip_off != next) {
2114 if (fp->ipq_nfrags > maxfragsperpacket) {
2115 ipstat.ips_fragdropped += fp->ipq_nfrags;
2116 frag_freef(head, fp, DROP_REASON_IP_FRAG_TOO_MANY);
2117 }
2118 m = NULL; /* nothing to return */
2119 goto done;
2120 }
2121 next += GETIP(q)->ip_len;
2122 }
2123 /* Make sure the last packet didn't have the IP_MF flag */
2124 if (p->m_flags & M_FRAG) {
2125 if (fp->ipq_nfrags > maxfragsperpacket) {
2126 ipstat.ips_fragdropped += fp->ipq_nfrags;
2127 frag_freef(head, fp, DROP_REASON_IP_FRAG_TOO_MANY);
2128 }
2129 m = NULL; /* nothing to return */
2130 goto done;
2131 }
2132
2133 /*
2134 * Reassembly is complete. Make sure the packet is a sane size.
2135 */
2136 q = fp->ipq_frags;
2137 ip = GETIP(q);
2138 if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) {
2139 ipstat.ips_toolong++;
2140 ipstat.ips_fragdropped += fp->ipq_nfrags;
2141 frag_freef(head, fp, DROP_REASON_IP_FRAG_TOO_LONG);
2142 m = NULL; /* nothing to return */
2143 goto done;
2144 }
2145
2146 /*
2147 * Concatenate fragments.
2148 */
2149 m = q;
2150 t = m->m_next;
2151 m->m_next = NULL;
2152 m_cat(m, t);
2153 nq = q->m_nextpkt;
2154 q->m_nextpkt = NULL;
2155 for (q = nq; q != NULL; q = nq) {
2156 nq = q->m_nextpkt;
2157 q->m_nextpkt = NULL;
2158 m_cat(m, q);
2159 }
2160
2161 /*
2162 * Store partial hardware checksum info from the fragment queue;
2163 * the receive start offset is set to 20 bytes (see code at the
2164 * top of this routine.)
2165 */
2166 if (fp->ipq_csum_flags != 0) {
2167 csum = fp->ipq_csum;
2168
2169 ADDCARRY(csum);
2170
2171 m->m_pkthdr.csum_rx_val = (uint16_t)csum;
2172 m->m_pkthdr.csum_rx_start = sizeof(struct ip);
2173 m->m_pkthdr.csum_flags = fp->ipq_csum_flags;
2174 } else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) ||
2175 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
2176 /* loopback checksums are always OK */
2177 m->m_pkthdr.csum_data = 0xffff;
2178 m->m_pkthdr.csum_flags =
2179 CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
2180 CSUM_IP_CHECKED | CSUM_IP_VALID;
2181 }
2182
2183 /*
2184 * Create header for new ip packet by modifying header of first
2185 * packet; dequeue and discard fragment reassembly header.
2186 * Make header visible.
2187 */
2188 ip->ip_len = (u_short)((IP_VHL_HL(ip->ip_vhl) << 2) + next);
2189 ip->ip_src = fp->ipq_src;
2190 ip->ip_dst = fp->ipq_dst;
2191
2192 fp->ipq_frags = NULL; /* return to caller as 'm' */
2193 frag_freef(head, fp, DROP_REASON_UNSPECIFIED);
2194 fp = NULL;
2195
2196 m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2);
2197 m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2);
2198 /* some debugging cruft by sklower, below, will go away soon */
2199 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
2200 m_fixhdr(m);
2201 }
2202 ipstat.ips_reassembled++;
2203
2204 /* arm the purge timer if not already and if there's work to do */
2205 frag_sched_timeout();
2206 lck_mtx_unlock(&ipqlock);
2207 /* perform deferred free (if needed) now that lock is dropped */
2208 if (!MBUFQ_EMPTY(&dfq)) {
2209 MBUFQ_DRAIN(&dfq);
2210 }
2211 VERIFY(MBUFQ_EMPTY(&dfq));
2212 return m;
2213
2214 done:
2215 VERIFY(m == NULL);
2216 /* arm the purge timer if not already and if there's work to do */
2217 frag_sched_timeout();
2218 lck_mtx_unlock(&ipqlock);
2219 /* perform deferred free (if needed) */
2220 if (!MBUFQ_EMPTY(&dfq)) {
2221 MBUFQ_DRAIN(&dfq);
2222 }
2223 VERIFY(MBUFQ_EMPTY(&dfq));
2224 return NULL;
2225
2226 dropfrag:
2227 ipstat.ips_fragdropped++;
2228 if (fp != NULL) {
2229 fp->ipq_nfrags--;
2230 }
2231 /* arm the purge timer if not already and if there's work to do */
2232 frag_sched_timeout();
2233 lck_mtx_unlock(&ipqlock);
2234 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_FRAG_DROPPED,
2235 NULL, 0);
2236 /* perform deferred free (if needed) */
2237 if (!MBUFQ_EMPTY(&dfq)) {
2238 MBUFQ_DRAIN(&dfq);
2239 }
2240 VERIFY(MBUFQ_EMPTY(&dfq));
2241 return NULL;
2242 #undef GETIP
2243 }
2244
2245 /*
2246 * Free a fragment reassembly header and all
2247 * associated datagrams.
2248 */
2249 static void
frag_freef(struct ipqhead * fhp,struct ipq * fp,drop_reason_t drop_reason)2250 frag_freef(struct ipqhead *fhp, struct ipq *fp, drop_reason_t drop_reason)
2251 {
2252 LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2253
2254 fp->ipq_nfrags = 0;
2255 if (fp->ipq_frags != NULL) {
2256 if (drop_reason == DROP_REASON_UNSPECIFIED) {
2257 m_freem_list(fp->ipq_frags);
2258 } else {
2259 m_drop_list(fp->ipq_frags, NULL, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, drop_reason, NULL, 0);
2260 }
2261 fp->ipq_frags = NULL;
2262 }
2263 TAILQ_REMOVE(fhp, fp, ipq_list);
2264 nipq--;
2265 ipq_free(fp);
2266 }
2267
2268 /*
2269 * IP reassembly timer processing
2270 */
2271 static void
frag_timeout(void * arg)2272 frag_timeout(void *arg)
2273 {
2274 #pragma unused(arg)
2275 struct ipq *__single fp;
2276 int i;
2277
2278 /*
2279 * Update coarse-grained networking timestamp (in sec.); the idea
2280 * is to piggy-back on the timeout callout to update the counter
2281 * returnable via net_uptime().
2282 */
2283 net_update_uptime();
2284
2285 lck_mtx_lock(&ipqlock);
2286 for (i = 0; i < IPREASS_NHASH; i++) {
2287 for (fp = TAILQ_FIRST(&ipq[i]); fp;) {
2288 struct ipq *__single fpp;
2289
2290 fpp = fp;
2291 fp = TAILQ_NEXT(fp, ipq_list);
2292 if (--fpp->ipq_ttl == 0) {
2293 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
2294 frag_freef(&ipq[i], fpp, DROP_REASON_IP_FRAG_TIMEOUT);
2295 }
2296 }
2297 }
2298 /*
2299 * If we are over the maximum number of fragments
2300 * (due to the limit being lowered), drain off
2301 * enough to get down to the new limit.
2302 */
2303 if (maxnipq >= 0 && nipq > (unsigned)maxnipq) {
2304 for (i = 0; i < IPREASS_NHASH; i++) {
2305 while (nipq > (unsigned)maxnipq &&
2306 !TAILQ_EMPTY(&ipq[i])) {
2307 ipstat.ips_fragdropped +=
2308 TAILQ_FIRST(&ipq[i])->ipq_nfrags;
2309 frag_freef(&ipq[i], TAILQ_FIRST(&ipq[i]), DROP_REASON_IP_FRAG_DROPPED);
2310 }
2311 }
2312 }
2313 /* re-arm the purge timer if there's work to do */
2314 frag_timeout_run = 0;
2315 frag_sched_timeout();
2316 lck_mtx_unlock(&ipqlock);
2317 }
2318
2319 static void
frag_sched_timeout(void)2320 frag_sched_timeout(void)
2321 {
2322 LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2323
2324 if (!frag_timeout_run && nipq > 0) {
2325 frag_timeout_run = 1;
2326 timeout(frag_timeout, NULL, hz);
2327 }
2328 }
2329
2330 /*
2331 * Drain off all datagram fragments.
2332 */
2333 static void
frag_drain(void)2334 frag_drain(void)
2335 {
2336 int i;
2337
2338 lck_mtx_lock(&ipqlock);
2339 for (i = 0; i < IPREASS_NHASH; i++) {
2340 while (!TAILQ_EMPTY(&ipq[i])) {
2341 ipstat.ips_fragdropped +=
2342 TAILQ_FIRST(&ipq[i])->ipq_nfrags;
2343 frag_freef(&ipq[i], TAILQ_FIRST(&ipq[i]), DROP_REASON_IP_FRAG_DRAINED);
2344 }
2345 }
2346 lck_mtx_unlock(&ipqlock);
2347 }
2348
2349 static struct ipq *
ipq_alloc(void)2350 ipq_alloc(void)
2351 {
2352 struct ipq *__single fp;
2353
2354 /*
2355 * See comments in ipq_updateparams(). Keep the count separate
2356 * from nipq since the latter represents the elements already
2357 * in the reassembly queues.
2358 */
2359 if (ipq_limit > 0 && ipq_count > ipq_limit) {
2360 return NULL;
2361 }
2362
2363 fp = kalloc_type(struct ipq, Z_NOWAIT | Z_ZERO);
2364 if (fp != NULL) {
2365 os_atomic_inc(&ipq_count, relaxed);
2366 }
2367 return fp;
2368 }
2369
2370 static void
ipq_free(struct ipq * fp)2371 ipq_free(struct ipq *fp)
2372 {
2373 kfree_type(struct ipq, fp);
2374 os_atomic_dec(&ipq_count, relaxed);
2375 }
2376
2377 /*
2378 * Drain callback
2379 */
2380 void
ip_drain(void)2381 ip_drain(void)
2382 {
2383 frag_drain(); /* fragments */
2384 in_rtqdrain(); /* protocol cloned routes */
2385 in_arpdrain(NULL); /* cloned routes: ARP */
2386 }
2387
2388 /*
2389 * Do option processing on a datagram,
2390 * possibly discarding it if bad options are encountered,
2391 * or forwarding it if source-routed.
2392 * The pass argument is used when operating in the IPSTEALTH
2393 * mode to tell what options to process:
2394 * [LS]SRR (pass 0) or the others (pass 1).
2395 * The reason for as many as two passes is that when doing IPSTEALTH,
2396 * non-routing options should be processed only if the packet is for us.
2397 * Returns 1 if packet has been forwarded/freed,
2398 * 0 if the packet should be processed further.
2399 */
2400 static int
ip_dooptions(struct mbuf * m,int pass,struct sockaddr_in * next_hop)2401 ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop)
2402 {
2403 #pragma unused(pass)
2404 struct ip *ip = mtod(m, struct ip *);
2405 u_char *cp;
2406 struct ip_timestamp *__single ipt;
2407 struct in_ifaddr *__single ia;
2408 int opt, optlen, cnt, off, type = ICMP_PARAMPROB, forward = 0;
2409 uint8_t code = 0;
2410 struct in_addr *__single sin, dst;
2411 u_int32_t ntime;
2412 struct sockaddr_in ipaddr = {
2413 .sin_len = sizeof(ipaddr),
2414 .sin_family = AF_INET,
2415 .sin_port = 0,
2416 .sin_addr = { .s_addr = 0 },
2417 .sin_zero = { 0, }
2418 };
2419
2420 /* Expect 32-bit aligned data pointer on strict-align platforms */
2421 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
2422
2423 dst = ip->ip_dst;
2424 cp = (u_char *)(ip + 1);
2425 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
2426 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2427 opt = cp[IPOPT_OPTVAL];
2428 if (opt == IPOPT_EOL) {
2429 break;
2430 }
2431 if (opt == IPOPT_NOP) {
2432 optlen = 1;
2433 } else {
2434 if (cnt < IPOPT_OLEN + sizeof(*cp)) {
2435 code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2436 goto bad;
2437 }
2438 optlen = cp[IPOPT_OLEN];
2439 if (optlen < IPOPT_OLEN + sizeof(*cp) ||
2440 optlen > cnt) {
2441 code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2442 goto bad;
2443 }
2444 }
2445 switch (opt) {
2446 default:
2447 break;
2448
2449 /*
2450 * Source routing with record.
2451 * Find interface with current destination address.
2452 * If none on this machine then drop if strictly routed,
2453 * or do nothing if loosely routed.
2454 * Record interface address and bring up next address
2455 * component. If strictly routed make sure next
2456 * address is on directly accessible net.
2457 */
2458 case IPOPT_LSRR:
2459 case IPOPT_SSRR:
2460 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
2461 code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2462 goto bad;
2463 }
2464 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
2465 code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2466 goto bad;
2467 }
2468 ipaddr.sin_addr = ip->ip_dst;
2469 ia = ifatoia(ifa_ifwithaddr(SA(&ipaddr)));
2470 if (ia == NULL) {
2471 if (opt == IPOPT_SSRR) {
2472 type = ICMP_UNREACH;
2473 code = ICMP_UNREACH_SRCFAIL;
2474 goto bad;
2475 }
2476 if (!ip_dosourceroute) {
2477 goto nosourcerouting;
2478 }
2479 /*
2480 * Loose routing, and not at next destination
2481 * yet; nothing to do except forward.
2482 */
2483 break;
2484 } else {
2485 ifa_remref(&ia->ia_ifa);
2486 ia = NULL;
2487 }
2488 off--; /* 0 origin */
2489 if (off > optlen - (int)sizeof(struct in_addr)) {
2490 /*
2491 * End of source route. Should be for us.
2492 */
2493 if (!ip_acceptsourceroute) {
2494 goto nosourcerouting;
2495 }
2496 save_rte(cp, ip->ip_src);
2497 break;
2498 }
2499
2500 if (!ip_dosourceroute) {
2501 if (ipforwarding) {
2502 char buf[MAX_IPv4_STR_LEN];
2503 char buf2[MAX_IPv4_STR_LEN];
2504 /*
2505 * Acting as a router, so generate ICMP
2506 */
2507 nosourcerouting:
2508 log(LOG_WARNING,
2509 "attempted source route from %s "
2510 "to %s\n",
2511 inet_ntop(AF_INET, &ip->ip_src,
2512 buf, sizeof(buf)),
2513 inet_ntop(AF_INET, &ip->ip_dst,
2514 buf2, sizeof(buf2)));
2515 type = ICMP_UNREACH;
2516 code = ICMP_UNREACH_SRCFAIL;
2517 goto bad;
2518 } else {
2519 /*
2520 * Not acting as a router,
2521 * so silently drop.
2522 */
2523 OSAddAtomic(1, &ipstat.ips_cantforward);
2524 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_CANNOT_FORWARD,
2525 NULL, 0);
2526 return 1;
2527 }
2528 }
2529
2530 /*
2531 * locate outgoing interface
2532 */
2533 (void) memcpy(&ipaddr.sin_addr, cp + off,
2534 sizeof(ipaddr.sin_addr));
2535
2536 if (opt == IPOPT_SSRR) {
2537 #define INA struct in_ifaddr *
2538 if ((ia = (INA)ifa_ifwithdstaddr(
2539 SA(&ipaddr))) == NULL) {
2540 ia = (INA)ifa_ifwithnet(SA(&ipaddr));
2541 }
2542 } else {
2543 ia = ip_rtaddr(ipaddr.sin_addr);
2544 }
2545 if (ia == NULL) {
2546 type = ICMP_UNREACH;
2547 code = ICMP_UNREACH_SRCFAIL;
2548 goto bad;
2549 }
2550 ip->ip_dst = ipaddr.sin_addr;
2551 IFA_LOCK(&ia->ia_ifa);
2552 (void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
2553 sizeof(struct in_addr));
2554 IFA_UNLOCK(&ia->ia_ifa);
2555 ifa_remref(&ia->ia_ifa);
2556 ia = NULL;
2557 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
2558 /*
2559 * Let ip_intr's mcast routing check handle mcast pkts
2560 */
2561 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
2562 break;
2563
2564 case IPOPT_RR:
2565 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
2566 code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2567 goto bad;
2568 }
2569 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
2570 code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2571 goto bad;
2572 }
2573 /*
2574 * If no space remains, ignore.
2575 */
2576 off--; /* 0 origin */
2577 if (off > optlen - (int)sizeof(struct in_addr)) {
2578 break;
2579 }
2580 (void) memcpy(&ipaddr.sin_addr, &ip->ip_dst,
2581 sizeof(ipaddr.sin_addr));
2582 /*
2583 * locate outgoing interface; if we're the destination,
2584 * use the incoming interface (should be same).
2585 */
2586 if ((ia = (INA)ifa_ifwithaddr(SA(&ipaddr))) == NULL) {
2587 if ((ia = ip_rtaddr(ipaddr.sin_addr)) == NULL) {
2588 type = ICMP_UNREACH;
2589 code = ICMP_UNREACH_HOST;
2590 goto bad;
2591 }
2592 }
2593 IFA_LOCK(&ia->ia_ifa);
2594 (void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
2595 sizeof(struct in_addr));
2596 IFA_UNLOCK(&ia->ia_ifa);
2597 ifa_remref(&ia->ia_ifa);
2598 ia = NULL;
2599 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
2600 break;
2601
2602 case IPOPT_TS:
2603 code = (uint8_t)(cp - (u_char *)ip);
2604 ipt = (struct ip_timestamp *)(void *)cp;
2605 if (ipt->ipt_len < 4 || ipt->ipt_len > 40) {
2606 code = (uint8_t)((u_char *)&ipt->ipt_len -
2607 (u_char *)ip);
2608 goto bad;
2609 }
2610 if (ipt->ipt_ptr < 5) {
2611 code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2612 (u_char *)ip);
2613 goto bad;
2614 }
2615 if (ipt->ipt_ptr >
2616 ipt->ipt_len - (int)sizeof(int32_t)) {
2617 if (++ipt->ipt_oflw == 0) {
2618 code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2619 (u_char *)ip);
2620 goto bad;
2621 }
2622 break;
2623 }
2624 sin = (struct in_addr *)(void *)(cp + ipt->ipt_ptr - 1);
2625 switch (ipt->ipt_flg) {
2626 case IPOPT_TS_TSONLY:
2627 break;
2628
2629 case IPOPT_TS_TSANDADDR:
2630 if (ipt->ipt_ptr - 1 + sizeof(n_time) +
2631 sizeof(struct in_addr) > ipt->ipt_len) {
2632 code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2633 (u_char *)ip);
2634 goto bad;
2635 }
2636 ipaddr.sin_addr = dst;
2637 ia = (INA)ifaof_ifpforaddr(SA(&ipaddr),
2638 m->m_pkthdr.rcvif);
2639 if (ia == NULL) {
2640 continue;
2641 }
2642 IFA_LOCK(&ia->ia_ifa);
2643 (void) memcpy(sin, &IA_SIN(ia)->sin_addr,
2644 sizeof(struct in_addr));
2645 IFA_UNLOCK(&ia->ia_ifa);
2646 ipt->ipt_ptr += sizeof(struct in_addr);
2647 ifa_remref(&ia->ia_ifa);
2648 ia = NULL;
2649 break;
2650
2651 case IPOPT_TS_PRESPEC:
2652 if (ipt->ipt_ptr - 1 + sizeof(n_time) +
2653 sizeof(struct in_addr) > ipt->ipt_len) {
2654 code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2655 (u_char *)ip);
2656 goto bad;
2657 }
2658 (void) memcpy(&ipaddr.sin_addr, sin,
2659 sizeof(struct in_addr));
2660 if ((ia = ifatoia(ifa_ifwithaddr(
2661 SA(&ipaddr)))) == NULL) {
2662 continue;
2663 }
2664 ifa_remref(&ia->ia_ifa);
2665 ia = NULL;
2666 ipt->ipt_ptr += sizeof(struct in_addr);
2667 break;
2668
2669 default:
2670 /* XXX can't take &ipt->ipt_flg */
2671 code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2672 (u_char *)ip + 1);
2673 goto bad;
2674 }
2675 ntime = iptime();
2676 (void) memcpy(cp + ipt->ipt_ptr - 1, &ntime,
2677 sizeof(n_time));
2678 ipt->ipt_ptr += sizeof(n_time);
2679 }
2680 }
2681 if (forward && ipforwarding) {
2682 ip_forward(m, 1, next_hop);
2683 return 1;
2684 }
2685 return 0;
2686 bad:
2687 icmp_error(m, type, code, 0, 0);
2688 OSAddAtomic(1, &ipstat.ips_badoptions);
2689 return 1;
2690 }
2691
2692 /*
2693 * Check for the presence of the IP Router Alert option [RFC2113]
2694 * in the header of an IPv4 datagram.
2695 *
2696 * This call is not intended for use from the forwarding path; it is here
2697 * so that protocol domains may check for the presence of the option.
2698 * Given how FreeBSD's IPv4 stack is currently structured, the Router Alert
2699 * option does not have much relevance to the implementation, though this
2700 * may change in future.
2701 * Router alert options SHOULD be passed if running in IPSTEALTH mode and
2702 * we are not the endpoint.
2703 * Length checks on individual options should already have been peformed
2704 * by ip_dooptions() therefore they are folded under DIAGNOSTIC here.
2705 *
2706 * Return zero if not present or options are invalid, non-zero if present.
2707 */
2708 int
ip_checkrouteralert(struct mbuf * m)2709 ip_checkrouteralert(struct mbuf *m)
2710 {
2711 struct ip *ip = mtod(m, struct ip *);
2712 u_char *cp;
2713 int opt, optlen, cnt, found_ra;
2714
2715 found_ra = 0;
2716 cp = (u_char *)(ip + 1);
2717 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
2718 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2719 opt = cp[IPOPT_OPTVAL];
2720 if (opt == IPOPT_EOL) {
2721 break;
2722 }
2723 if (opt == IPOPT_NOP) {
2724 optlen = 1;
2725 } else {
2726 #ifdef DIAGNOSTIC
2727 if (cnt < IPOPT_OLEN + sizeof(*cp)) {
2728 break;
2729 }
2730 #endif
2731 optlen = cp[IPOPT_OLEN];
2732 #ifdef DIAGNOSTIC
2733 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
2734 break;
2735 }
2736 #endif
2737 }
2738 switch (opt) {
2739 case IPOPT_RA:
2740 #ifdef DIAGNOSTIC
2741 if (optlen != IPOPT_OFFSET + sizeof(uint16_t) ||
2742 (*((uint16_t *)(void *)&cp[IPOPT_OFFSET]) != 0)) {
2743 break;
2744 } else
2745 #endif
2746 found_ra = 1;
2747 break;
2748 default:
2749 break;
2750 }
2751 }
2752
2753 return found_ra;
2754 }
2755
2756 /*
2757 * Given address of next destination (final or next hop),
2758 * return internet address info of interface to be used to get there.
2759 */
2760 struct in_ifaddr *
ip_rtaddr(struct in_addr dst)2761 ip_rtaddr(struct in_addr dst)
2762 {
2763 struct sockaddr_in *__single sin;
2764 struct ifaddr *__single rt_ifa;
2765 struct route ro;
2766
2767 bzero(&ro, sizeof(ro));
2768 sin = SIN(&ro.ro_dst);
2769 sin->sin_family = AF_INET;
2770 sin->sin_len = sizeof(*sin);
2771 sin->sin_addr = dst;
2772
2773 rtalloc_ign(&ro, RTF_PRCLONING);
2774 if (ro.ro_rt == NULL) {
2775 ROUTE_RELEASE(&ro);
2776 return NULL;
2777 }
2778
2779 RT_LOCK(ro.ro_rt);
2780 if ((rt_ifa = ro.ro_rt->rt_ifa) != NULL) {
2781 ifa_addref(rt_ifa);
2782 }
2783 RT_UNLOCK(ro.ro_rt);
2784 ROUTE_RELEASE(&ro);
2785
2786 return ifatoia(rt_ifa);
2787 }
2788
2789 /*
2790 * Save incoming source route for use in replies,
2791 * to be picked up later by ip_srcroute if the receiver is interested.
2792 */
2793 static void
save_rte(u_char * __indexable option,struct in_addr dst)2794 save_rte(u_char *__indexable option, struct in_addr dst)
2795 {
2796 unsigned olen;
2797
2798 olen = option[IPOPT_OLEN];
2799 #if DIAGNOSTIC
2800 if (ipprintfs) {
2801 printf("save_rte: olen %d\n", olen);
2802 }
2803 #endif
2804 if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst))) {
2805 return;
2806 }
2807 bcopy(option, ip_srcrt.srcopt, olen);
2808 ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
2809 ip_srcrt.dst = dst;
2810 }
2811
2812 /*
2813 * Retrieve incoming source route for use in replies,
2814 * in the same form used by setsockopt.
2815 * The first hop is placed before the options, will be removed later.
2816 */
2817 struct mbuf *
ip_srcroute(void)2818 ip_srcroute(void)
2819 {
2820 struct in_addr *p, *q;
2821 struct mbuf *m;
2822
2823 if (ip_nhops == 0) {
2824 return NULL;
2825 }
2826
2827 m = m_get(M_DONTWAIT, MT_HEADER);
2828 if (m == NULL) {
2829 return NULL;
2830 }
2831
2832 #define OPTSIZ (sizeof (ip_srcrt.nop) + sizeof (ip_srcrt.srcopt))
2833
2834 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */
2835 m->m_len = ip_nhops * sizeof(struct in_addr) +
2836 sizeof(struct in_addr) + OPTSIZ;
2837 #if DIAGNOSTIC
2838 if (ipprintfs) {
2839 printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len);
2840 }
2841 #endif
2842
2843 /*
2844 * Notes: to the astute reader:
2845 * 1. The code is sequenced in the order
2846 * of writing to the mbuf contents.
2847 * 2. The order of addresses in `ip_srcrt.route`
2848 * is the reverse of the order in the wire format.
2849 */
2850 /*
2851 * First save first hop for return route
2852 */
2853 p = &ip_srcrt.route[ip_nhops - 1];
2854 *(mtod(m, struct in_addr *)) = *p;
2855 #if DIAGNOSTIC
2856 if (ipprintfs) {
2857 printf(" hops %lx",
2858 (u_int32_t)ntohl(mtod(m, struct in_addr *)->s_addr));
2859 }
2860 #endif
2861
2862 /*
2863 * Copy option fields and padding (nop) to mbuf.
2864 */
2865 ip_srcrt.nop = IPOPT_NOP;
2866 ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF;
2867 (void) __nochk_memcpy(mtod(m, caddr_t) + sizeof(struct in_addr),
2868 (caddr_t)&ip_srcrt + sizeof(struct in_addr), OPTSIZ);
2869 q = (struct in_addr *)(void *)(mtod(m, caddr_t) +
2870 sizeof(struct in_addr) + OPTSIZ);
2871 #undef OPTSIZ
2872 /*
2873 * If multiple return addresses were provided,
2874 * record the return path as an IP source route,
2875 * reversing the path.
2876 */
2877 for (int i = 0; i < (ip_nhops - 1); i++) {
2878 q[i] = ip_srcrt.route[ip_nhops - (i + 2)];
2879 #if DIAGNOSTIC
2880 if (ipprintfs) {
2881 printf(" %lx", (u_int32_t)ntohl(q[i].s_addr));
2882 }
2883 #endif
2884 }
2885 /*
2886 * Last hop goes to final destination.
2887 */
2888 q[ip_nhops - 1] = ip_srcrt.dst;
2889 #if DIAGNOSTIC
2890 if (ipprintfs) {
2891 printf(" %lx\n", (u_int32_t)ntohl(q[ip_nhops - 1].s_addr));
2892 }
2893 #endif
2894 return m;
2895 }
2896
2897 /*
2898 * Strip out IP options, at higher level protocol in the kernel.
2899 */
2900 void
ip_stripoptions(struct mbuf * m)2901 ip_stripoptions(struct mbuf *m)
2902 {
2903 int i;
2904 struct ip *ip = mtod(m, struct ip *);
2905 caddr_t opts;
2906 int olen;
2907
2908 /* Expect 32-bit aligned data pointer on strict-align platforms */
2909 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
2910
2911 /* use bcopy() since it supports overlapping range */
2912 olen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
2913 opts = (caddr_t)(ip + 1);
2914 i = m->m_len - (sizeof(struct ip) + olen);
2915 bcopy(opts + olen, opts, (unsigned)i);
2916 m->m_len -= olen;
2917 if (m->m_flags & M_PKTHDR) {
2918 m->m_pkthdr.len -= olen;
2919 }
2920 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2921
2922 /*
2923 * We expect ip_{off,len} to be in host order by now, and
2924 * that the original IP header length has been subtracted
2925 * out from ip_len. Temporarily adjust ip_len for checksum
2926 * recalculation, and restore it afterwards.
2927 */
2928 ip->ip_len += sizeof(struct ip);
2929
2930 /* recompute checksum now that IP header is smaller */
2931 #if BYTE_ORDER != BIG_ENDIAN
2932 HTONS(ip->ip_len);
2933 HTONS(ip->ip_off);
2934 #endif /* BYTE_ORDER != BIG_ENDIAN */
2935 ip->ip_sum = in_cksum_hdr(ip);
2936 #if BYTE_ORDER != BIG_ENDIAN
2937 NTOHS(ip->ip_off);
2938 NTOHS(ip->ip_len);
2939 #endif /* BYTE_ORDER != BIG_ENDIAN */
2940
2941 ip->ip_len -= sizeof(struct ip);
2942
2943 /*
2944 * Given that we've just stripped IP options from the header,
2945 * we need to adjust the start offset accordingly if this
2946 * packet had gone thru partial checksum offload.
2947 */
2948 if ((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
2949 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
2950 if (m->m_pkthdr.csum_rx_start >= (sizeof(struct ip) + olen)) {
2951 /* most common case */
2952 m->m_pkthdr.csum_rx_start -= olen;
2953 } else {
2954 /* compute checksum in software instead */
2955 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
2956 m->m_pkthdr.csum_data = 0;
2957 ipstat.ips_adj_hwcsum_clr++;
2958 }
2959 }
2960 }
2961
2962 u_char inetctlerrmap[PRC_NCMDS] = {
2963 0, 0, 0, 0,
2964 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
2965 ENETUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
2966 EMSGSIZE, EHOSTUNREACH, 0, 0,
2967 0, 0, EHOSTUNREACH, 0,
2968 ENOPROTOOPT, ECONNREFUSED
2969 };
2970
2971 static int
2972 sysctl_ipforwarding SYSCTL_HANDLER_ARGS
2973 {
2974 #pragma unused(arg1, arg2)
2975 int i, was_ipforwarding = ipforwarding;
2976
2977 i = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
2978 if (i != 0 || req->newptr == USER_ADDR_NULL) {
2979 return i;
2980 }
2981
2982 if (was_ipforwarding && !ipforwarding) {
2983 /* clean up IPv4 forwarding cached routes */
2984 ifnet_head_lock_shared();
2985 for (i = 0; i <= if_index; i++) {
2986 ifnet_ref_t ifp = ifindex2ifnet[i];
2987 if (ifp != NULL) {
2988 lck_mtx_lock(&ifp->if_cached_route_lock);
2989 ROUTE_RELEASE(&ifp->if_fwd_route);
2990 bzero(&ifp->if_fwd_route,
2991 sizeof(ifp->if_fwd_route));
2992 lck_mtx_unlock(&ifp->if_cached_route_lock);
2993 }
2994 }
2995 ifnet_head_done();
2996 }
2997
2998 return 0;
2999 }
3000
3001 /*
3002 * Similar to inp_route_{copyout,copyin} routines except that these copy
3003 * out the cached IPv4 forwarding route from struct ifnet instead of the
3004 * inpcb. See comments for those routines for explanations.
3005 */
3006 static void
ip_fwd_route_copyout(struct ifnet * ifp,struct route * dst)3007 ip_fwd_route_copyout(struct ifnet *ifp, struct route *dst)
3008 {
3009 struct route *src = &ifp->if_fwd_route;
3010
3011 lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3012 lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3013
3014 /* Minor sanity check */
3015 if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3016 panic("%s: wrong or corrupted route: %p", __func__, src);
3017 }
3018
3019 route_copyout(dst, src, sizeof(*dst));
3020
3021 lck_mtx_unlock(&ifp->if_cached_route_lock);
3022 }
3023
3024 static void
ip_fwd_route_copyin(struct ifnet * ifp,struct route * src)3025 ip_fwd_route_copyin(struct ifnet *ifp, struct route *src)
3026 {
3027 struct route *dst = &ifp->if_fwd_route;
3028
3029 lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3030 lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3031
3032 /* Minor sanity check */
3033 if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3034 panic("%s: wrong or corrupted route: %p", __func__, src);
3035 }
3036
3037 if (ifp->if_fwd_cacheok) {
3038 route_copyin(src, dst, sizeof(*src));
3039 }
3040
3041 lck_mtx_unlock(&ifp->if_cached_route_lock);
3042 }
3043
3044 /*
3045 * Forward a packet. If some error occurs return the sender
3046 * an icmp packet. Note we can't always generate a meaningful
3047 * icmp message because icmp doesn't have a large enough repertoire
3048 * of codes and types.
3049 *
3050 * If not forwarding, just drop the packet. This could be confusing
3051 * if ipforwarding was zero but some routing protocol was advancing
3052 * us as a gateway to somewhere. However, we must let the routing
3053 * protocol deal with that.
3054 *
3055 * The srcrt parameter indicates whether the packet is being forwarded
3056 * via a source route.
3057 */
3058 static void
ip_forward(struct mbuf * m,int srcrt,struct sockaddr_in * next_hop)3059 ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop)
3060 {
3061 #pragma unused(next_hop)
3062 struct ip *__single ip = mtod(m, struct ip *);
3063 struct sockaddr_in *__single sin;
3064 rtentry_ref_t rt;
3065 struct route fwd_rt;
3066 int error, type = 0, code = 0;
3067 mbuf_ref_t mcopy;
3068 n_long dest;
3069 struct in_addr pkt_dst;
3070 u_int32_t nextmtu = 0, len;
3071 struct ip_out_args ipoa;
3072 struct ifnet *__single rcvifp = m->m_pkthdr.rcvif;
3073
3074 bzero(&ipoa, sizeof(ipoa));
3075 ipoa.ipoa_boundif = IFSCOPE_NONE;
3076 ipoa.ipoa_sotc = SO_TC_UNSPEC;
3077 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3078
3079 #if IPSEC
3080 struct secpolicy *sp = NULL;
3081 int ipsecerror;
3082 #endif /* IPSEC */
3083 #if PF
3084 struct pf_mtag *pf_mtag;
3085 #endif /* PF */
3086
3087 dest = 0;
3088 pkt_dst = ip->ip_dst;
3089
3090 #if DIAGNOSTIC
3091 if (ipprintfs) {
3092 printf("forward: src %lx dst %lx ttl %x\n",
3093 (u_int32_t)ip->ip_src.s_addr, (u_int32_t)pkt_dst.s_addr,
3094 ip->ip_ttl);
3095 }
3096 #endif
3097
3098 if (m->m_flags & (M_BCAST | M_MCAST) || !in_canforward(pkt_dst)) {
3099 OSAddAtomic(1, &ipstat.ips_cantforward);
3100 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_CANNOT_FORWARD,
3101 NULL, 0);
3102 return;
3103 }
3104 #if IPSTEALTH
3105 if (!ipstealth) {
3106 #endif /* IPSTEALTH */
3107 if (ip->ip_ttl <= IPTTLDEC) {
3108 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
3109 dest, 0);
3110 return;
3111 }
3112 #if IPSTEALTH
3113 }
3114 #endif /* IPSTEALTH */
3115
3116 #if PF
3117 pf_mtag = pf_find_mtag(m);
3118 if (pf_mtag != NULL && pf_mtag->pftag_rtableid != IFSCOPE_NONE) {
3119 ipoa.ipoa_boundif = pf_mtag->pftag_rtableid;
3120 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
3121 }
3122 #endif /* PF */
3123
3124 ip_fwd_route_copyout(rcvifp, &fwd_rt);
3125
3126 sin = SIN(&fwd_rt.ro_dst);
3127 if (ROUTE_UNUSABLE(&fwd_rt) || pkt_dst.s_addr != sin->sin_addr.s_addr) {
3128 ROUTE_RELEASE(&fwd_rt);
3129
3130 sin->sin_family = AF_INET;
3131 sin->sin_len = sizeof(*sin);
3132 sin->sin_addr = pkt_dst;
3133
3134 rtalloc_scoped_ign(&fwd_rt, RTF_PRCLONING, ipoa.ipoa_boundif);
3135 if (fwd_rt.ro_rt == NULL) {
3136 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0);
3137 goto done;
3138 }
3139 }
3140 rt = fwd_rt.ro_rt;
3141
3142 /*
3143 * Save the IP header and at most 8 bytes of the payload,
3144 * in case we need to generate an ICMP message to the src.
3145 *
3146 * We don't use m_copy() because it might return a reference
3147 * to a shared cluster. Both this function and ip_output()
3148 * assume exclusive access to the IP header in `m', so any
3149 * data in a cluster may change before we reach icmp_error().
3150 */
3151 MGET(mcopy, M_DONTWAIT, m->m_type);
3152 if (mcopy != NULL && m_dup_pkthdr(mcopy, m, M_DONTWAIT) == 0) {
3153 mcopy->m_len = imin((IP_VHL_HL(ip->ip_vhl) << 2) + 8,
3154 (int)ip->ip_len);
3155 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
3156 }
3157
3158 #if IPSTEALTH
3159 if (!ipstealth) {
3160 #endif /* IPSTEALTH */
3161 ip->ip_ttl -= IPTTLDEC;
3162 #if IPSTEALTH
3163 }
3164 #endif /* IPSTEALTH */
3165
3166 /*
3167 * If forwarding packet using same interface that it came in on,
3168 * perhaps should send a redirect to sender to shortcut a hop.
3169 * Only send redirect if source is sending directly to us,
3170 * and if packet was not source routed (or has any options).
3171 * Also, don't send redirect if forwarding using a default route
3172 * or a route modified by a redirect.
3173 */
3174 RT_LOCK_SPIN(rt);
3175 if (rt->rt_ifp == m->m_pkthdr.rcvif &&
3176 !(rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) &&
3177 satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY &&
3178 ipsendredirects && !srcrt && rt->rt_ifa != NULL) {
3179 struct in_ifaddr *ia = ifatoia(rt->rt_ifa);
3180 u_int32_t src = ntohl(ip->ip_src.s_addr);
3181
3182 /* Become a regular mutex */
3183 RT_CONVERT_LOCK(rt);
3184 IFA_LOCK_SPIN(&ia->ia_ifa);
3185 if ((src & ia->ia_subnetmask) == ia->ia_subnet) {
3186 if (rt->rt_flags & RTF_GATEWAY) {
3187 dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
3188 } else {
3189 dest = pkt_dst.s_addr;
3190 }
3191 /*
3192 * Router requirements says to only send
3193 * host redirects.
3194 */
3195 type = ICMP_REDIRECT;
3196 code = ICMP_REDIRECT_HOST;
3197 #if DIAGNOSTIC
3198 if (ipprintfs) {
3199 printf("redirect (%d) to %lx\n", code,
3200 (u_int32_t)dest);
3201 }
3202 #endif
3203 }
3204 IFA_UNLOCK(&ia->ia_ifa);
3205 }
3206 RT_UNLOCK(rt);
3207
3208
3209 /* Mark this packet as being forwarded from another interface */
3210 m->m_pkthdr.pkt_flags |= PKTF_FORWARDED;
3211 len = m_pktlen(m);
3212
3213 error = ip_output(m, NULL, &fwd_rt, IP_FORWARDING | IP_OUTARGS,
3214 NULL, &ipoa);
3215
3216 /* Refresh rt since the route could have changed while in IP */
3217 rt = fwd_rt.ro_rt;
3218
3219 if (error != 0) {
3220 OSAddAtomic(1, &ipstat.ips_cantforward);
3221 } else {
3222 /*
3223 * Increment stats on the source interface; the ones
3224 * for destination interface has been taken care of
3225 * during output above by virtue of PKTF_FORWARDED.
3226 */
3227 rcvifp->if_fpackets++;
3228 rcvifp->if_fbytes += len;
3229
3230 OSAddAtomic(1, &ipstat.ips_forward);
3231 if (type != 0) {
3232 OSAddAtomic(1, &ipstat.ips_redirectsent);
3233 } else {
3234 if (mcopy != NULL) {
3235 /*
3236 * If we didn't have to go thru ipflow and
3237 * the packet was successfully consumed by
3238 * ip_output, the mcopy is rather a waste;
3239 * this could be further optimized.
3240 */
3241 m_freem(mcopy);
3242 }
3243 goto done;
3244 }
3245 }
3246 if (mcopy == NULL) {
3247 goto done;
3248 }
3249
3250 switch (error) {
3251 case 0: /* forwarded, but need redirect */
3252 /* type, code set above */
3253 break;
3254
3255 case ENETUNREACH: /* shouldn't happen, checked above */
3256 case EHOSTUNREACH:
3257 case ENETDOWN:
3258 case EHOSTDOWN:
3259 default:
3260 type = ICMP_UNREACH;
3261 code = ICMP_UNREACH_HOST;
3262 break;
3263
3264 case EMSGSIZE:
3265 type = ICMP_UNREACH;
3266 code = ICMP_UNREACH_NEEDFRAG;
3267
3268 if (rt == NULL) {
3269 break;
3270 } else {
3271 RT_LOCK_SPIN(rt);
3272 if (rt->rt_ifp != NULL) {
3273 nextmtu = rt->rt_ifp->if_mtu;
3274 }
3275 RT_UNLOCK(rt);
3276 }
3277 #ifdef IPSEC
3278 if (ipsec_bypass) {
3279 break;
3280 }
3281
3282 /*
3283 * If the packet is routed over IPsec tunnel, tell the
3284 * originator the tunnel MTU.
3285 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
3286 * XXX quickhack!!!
3287 */
3288 sp = ipsec4_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND,
3289 IP_FORWARDING, &ipsecerror);
3290
3291 if (sp == NULL) {
3292 break;
3293 }
3294
3295 /*
3296 * find the correct route for outer IPv4
3297 * header, compute tunnel MTU.
3298 */
3299 nextmtu = 0;
3300
3301 if (sp->req != NULL &&
3302 sp->req->saidx.mode == IPSEC_MODE_TUNNEL) {
3303 struct secasindex saidx;
3304 struct secasvar *__single sav;
3305 struct route *__single ro;
3306 struct ip *__single ipm;
3307 size_t ipsechdr;
3308
3309 /* count IPsec header size */
3310 ipsechdr = ipsec_hdrsiz(sp);
3311
3312 ipm = mtod(mcopy, struct ip *);
3313 bcopy(&sp->req->saidx, &saidx, sizeof(saidx));
3314 saidx.mode = sp->req->saidx.mode;
3315 saidx.reqid = sp->req->saidx.reqid;
3316 sin = SIN(&saidx.src);
3317 if (sin->sin_len == 0) {
3318 sin->sin_len = sizeof(*sin);
3319 sin->sin_family = AF_INET;
3320 sin->sin_port = IPSEC_PORT_ANY;
3321 bcopy(&ipm->ip_src, &sin->sin_addr,
3322 sizeof(sin->sin_addr));
3323 }
3324 sin = SIN(&saidx.dst);
3325 if (sin->sin_len == 0) {
3326 sin->sin_len = sizeof(*sin);
3327 sin->sin_family = AF_INET;
3328 sin->sin_port = IPSEC_PORT_ANY;
3329 bcopy(&ipm->ip_dst, &sin->sin_addr,
3330 sizeof(sin->sin_addr));
3331 }
3332 sav = key_allocsa_policy(&saidx);
3333 if (sav != NULL) {
3334 lck_mtx_lock(sadb_mutex);
3335 if (sav->sah != NULL) {
3336 ro = (struct route *)&sav->sah->sa_route;
3337 if (ro->ro_rt != NULL) {
3338 RT_LOCK(ro->ro_rt);
3339 if (ro->ro_rt->rt_ifp != NULL) {
3340 nextmtu = ro->ro_rt->
3341 rt_ifp->if_mtu;
3342 nextmtu -= ipsechdr;
3343 }
3344 RT_UNLOCK(ro->ro_rt);
3345 }
3346 }
3347 key_freesav(sav, KEY_SADB_LOCKED);
3348 lck_mtx_unlock(sadb_mutex);
3349 }
3350 }
3351 key_freesp(sp, KEY_SADB_UNLOCKED);
3352 #endif /* IPSEC */
3353 break;
3354
3355 case ENOBUFS:
3356 /*
3357 * A router should not generate ICMP_SOURCEQUENCH as
3358 * required in RFC1812 Requirements for IP Version 4 Routers.
3359 * Source quench could be a big problem under DoS attacks,
3360 * or if the underlying interface is rate-limited.
3361 * Those who need source quench packets may re-enable them
3362 * via the net.inet.ip.sendsourcequench sysctl.
3363 */
3364 if (ip_sendsourcequench == 0) {
3365 m_freem(mcopy);
3366 goto done;
3367 } else {
3368 type = ICMP_SOURCEQUENCH;
3369 code = 0;
3370 }
3371 break;
3372
3373 case EACCES:
3374 m_freem(mcopy);
3375 goto done;
3376 }
3377
3378 if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG) {
3379 OSAddAtomic(1, &ipstat.ips_cantfrag);
3380 }
3381
3382 icmp_error(mcopy, type, code, dest, nextmtu);
3383 done:
3384 ip_fwd_route_copyin(rcvifp, &fwd_rt);
3385 }
3386
3387 int
ip_savecontrol(struct inpcb * inp,struct mbuf ** mp,struct ip * ip,struct mbuf * m)3388 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
3389 struct mbuf *m)
3390 {
3391 *mp = NULL;
3392 if (inp->inp_socket->so_options & SO_TIMESTAMP) {
3393 struct timeval tv;
3394
3395 getmicrotime(&tv);
3396 mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof(tv),
3397 SCM_TIMESTAMP, SOL_SOCKET, mp);
3398 if (*mp == NULL) {
3399 goto no_mbufs;
3400 }
3401 }
3402 if (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) {
3403 uint64_t time;
3404
3405 time = mach_absolute_time();
3406 mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
3407 SCM_TIMESTAMP_MONOTONIC, SOL_SOCKET, mp);
3408 if (*mp == NULL) {
3409 goto no_mbufs;
3410 }
3411 }
3412 if (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) {
3413 uint64_t time;
3414
3415 time = mach_continuous_time();
3416 mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
3417 SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp);
3418 if (*mp == NULL) {
3419 goto no_mbufs;
3420 }
3421 }
3422 if (inp->inp_socket->so_flags & SOF_RECV_TRAFFIC_CLASS) {
3423 int tc = m_get_traffic_class(m);
3424
3425 mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof(tc),
3426 SO_TRAFFIC_CLASS, SOL_SOCKET, mp);
3427 if (*mp == NULL) {
3428 goto no_mbufs;
3429 }
3430 }
3431 if ((inp->inp_socket->so_flags & SOF_RECV_WAKE_PKT) &&
3432 (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
3433 int flag = 1;
3434
3435 mp = sbcreatecontrol_mbuf((caddr_t)&flag, sizeof(flag),
3436 SO_RECV_WAKE_PKT, SOL_SOCKET, mp);
3437 if (*mp == NULL) {
3438 goto no_mbufs;
3439 }
3440 }
3441
3442 if (inp->inp_flags & INP_RECVDSTADDR || SOFLOW_ENABLED(inp->inp_socket)) {
3443 mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_dst,
3444 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP, mp);
3445 if (*mp == NULL) {
3446 goto no_mbufs;
3447 }
3448 }
3449 #ifdef notyet
3450 /*
3451 * XXX
3452 * Moving these out of udp_input() made them even more broken
3453 * than they already were.
3454 */
3455 /* options were tossed already */
3456 if (inp->inp_flags & INP_RECVOPTS) {
3457 mp = sbcreatecontrol_mbuf((caddr_t)opts_deleted_above,
3458 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP, mp);
3459 if (*mp == NULL) {
3460 goto no_mbufs;
3461 }
3462 }
3463 /* ip_srcroute doesn't do what we want here, need to fix */
3464 if (inp->inp_flags & INP_RECVRETOPTS) {
3465 mp = sbcreatecontrol_mbuf((caddr_t)ip_srcroute(),
3466 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP, mp);
3467 if (*mp == NULL) {
3468 goto no_mbufs;
3469 }
3470 }
3471 #endif /* notyet */
3472 if (inp->inp_flags & INP_RECVIF) {
3473 ifnet_ref_t ifp;
3474 uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
3475 struct sockaddr_dl *sdl2 = SDL(sdlbuf);
3476
3477 /*
3478 * Make sure to accomodate the largest possible
3479 * size of SA(if_lladdr)->sa_len.
3480 */
3481 static_assert(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
3482
3483 ifnet_head_lock_shared();
3484 if ((ifp = m->m_pkthdr.rcvif) != NULL &&
3485 ifp->if_index && IF_INDEX_IN_RANGE(ifp->if_index)) {
3486 struct ifaddr *__single ifa = ifnet_addrs[ifp->if_index - 1];
3487 struct sockaddr_dl *sdp;
3488
3489 if (!ifa || !ifa->ifa_addr) {
3490 goto makedummy;
3491 }
3492
3493 IFA_LOCK_SPIN(ifa);
3494 sdp = SDL(ifa->ifa_addr);
3495 /*
3496 * Change our mind and don't try copy.
3497 */
3498 if (sdp->sdl_family != AF_LINK) {
3499 IFA_UNLOCK(ifa);
3500 goto makedummy;
3501 }
3502 /* the above static_assert() ensures sdl_len fits in sdlbuf */
3503 SOCKADDR_COPY(sdp, sdl2, sdp->sdl_len);
3504 IFA_UNLOCK(ifa);
3505 } else {
3506 makedummy:
3507 sdl2->sdl_len =
3508 offsetof(struct sockaddr_dl, sdl_data[0]);
3509 sdl2->sdl_family = AF_LINK;
3510 sdl2->sdl_index = 0;
3511 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
3512 }
3513 ifnet_head_done();
3514 mp = sbcreatecontrol_mbuf((caddr_t)SA_BYTES(sdl2), sdl2->sdl_len,
3515 IP_RECVIF, IPPROTO_IP, mp);
3516 if (*mp == NULL) {
3517 goto no_mbufs;
3518 }
3519 }
3520 if (inp->inp_flags & INP_RECVTTL) {
3521 mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_ttl,
3522 sizeof(ip->ip_ttl), IP_RECVTTL, IPPROTO_IP, mp);
3523 if (*mp == NULL) {
3524 goto no_mbufs;
3525 }
3526 }
3527 if (inp->inp_flags & INP_PKTINFO) {
3528 struct in_pktinfo pi;
3529
3530 bzero(&pi, sizeof(struct in_pktinfo));
3531 bcopy(&ip->ip_dst, &pi.ipi_addr, sizeof(struct in_addr));
3532 pi.ipi_ifindex = (m != NULL && m->m_pkthdr.rcvif != NULL) ?
3533 m->m_pkthdr.rcvif->if_index : 0;
3534
3535 mp = sbcreatecontrol_mbuf((caddr_t)&pi,
3536 sizeof(struct in_pktinfo), IP_RECVPKTINFO, IPPROTO_IP, mp);
3537 if (*mp == NULL) {
3538 goto no_mbufs;
3539 }
3540 }
3541 if (inp->inp_flags & INP_RECVTOS) {
3542 mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_tos,
3543 sizeof(u_char), IP_RECVTOS, IPPROTO_IP, mp);
3544 if (*mp == NULL) {
3545 goto no_mbufs;
3546 }
3547 }
3548 if (inp->inp_flags2 & INP2_RECV_LINK_ADDR_TYPE) {
3549 int mode = IP_RECV_LINK_ADDR_UNICAST;
3550
3551 if (m->m_flags & M_BCAST) {
3552 mode = IP_RECV_LINK_ADDR_BROADCAST;
3553 } else if (m->m_flags & M_MCAST) {
3554 mode = IP_RECV_LINK_ADDR_MULTICAST;
3555 }
3556
3557 mp = sbcreatecontrol_mbuf((caddr_t)&mode,
3558 sizeof(int), IP_RECV_LINK_ADDR_TYPE, IPPROTO_IP, mp);
3559 if (*mp == NULL) {
3560 goto no_mbufs;
3561 }
3562 }
3563 return 0;
3564
3565 no_mbufs:
3566 ipstat.ips_pktdropcntrl++;
3567 return ENOBUFS;
3568 }
3569
3570 static inline u_short
ip_cksum(struct mbuf * m,int hlen)3571 ip_cksum(struct mbuf *m, int hlen)
3572 {
3573 u_short sum;
3574
3575 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3576 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3577 } else if (!(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) &&
3578 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
3579 /*
3580 * The packet arrived on an interface which isn't capable
3581 * of performing IP header checksum; compute it now.
3582 */
3583 sum = ip_cksum_hdr_in(m, hlen);
3584 } else {
3585 sum = 0;
3586 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
3587 CSUM_IP_CHECKED | CSUM_IP_VALID);
3588 m->m_pkthdr.csum_data = 0xffff;
3589 }
3590
3591 if (sum != 0) {
3592 OSAddAtomic(1, &ipstat.ips_badsum);
3593 }
3594
3595 return sum;
3596 }
3597
3598 static int
3599 ip_getstat SYSCTL_HANDLER_ARGS
3600 {
3601 #pragma unused(oidp, arg1, arg2)
3602 if (req->oldptr == USER_ADDR_NULL) {
3603 req->oldlen = (size_t)sizeof(struct ipstat);
3604 }
3605
3606 return SYSCTL_OUT(req, &ipstat, MIN(sizeof(ipstat), req->oldlen));
3607 }
3608
3609 void
ip_setsrcifaddr_info(struct mbuf * m,uint16_t src_idx,struct in_ifaddr * ia)3610 ip_setsrcifaddr_info(struct mbuf *m, uint16_t src_idx, struct in_ifaddr *ia)
3611 {
3612 VERIFY(m->m_flags & M_PKTHDR);
3613
3614 /*
3615 * If the source ifaddr is specified, pick up the information
3616 * from there; otherwise just grab the passed-in ifindex as the
3617 * caller may not have the ifaddr available.
3618 */
3619 if (ia != NULL) {
3620 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3621 m->m_pkthdr.src_ifindex = ia->ia_ifp->if_index;
3622 } else {
3623 m->m_pkthdr.src_ifindex = src_idx;
3624 if (src_idx != 0) {
3625 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3626 }
3627 }
3628 }
3629
3630 void
ip_setdstifaddr_info(struct mbuf * m,uint16_t dst_idx,struct in_ifaddr * ia)3631 ip_setdstifaddr_info(struct mbuf *m, uint16_t dst_idx, struct in_ifaddr *ia)
3632 {
3633 VERIFY(m->m_flags & M_PKTHDR);
3634
3635 /*
3636 * If the destination ifaddr is specified, pick up the information
3637 * from there; otherwise just grab the passed-in ifindex as the
3638 * caller may not have the ifaddr available.
3639 */
3640 if (ia != NULL) {
3641 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3642 m->m_pkthdr.dst_ifindex = ia->ia_ifp->if_index;
3643 } else {
3644 m->m_pkthdr.dst_ifindex = dst_idx;
3645 if (dst_idx != 0) {
3646 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3647 }
3648 }
3649 }
3650
3651 int
ip_getsrcifaddr_info(struct mbuf * m,uint32_t * src_idx,uint32_t * iaf)3652 ip_getsrcifaddr_info(struct mbuf *m, uint32_t *src_idx, uint32_t *iaf)
3653 {
3654 VERIFY(m->m_flags & M_PKTHDR);
3655
3656 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
3657 return -1;
3658 }
3659
3660 if (src_idx != NULL) {
3661 *src_idx = m->m_pkthdr.src_ifindex;
3662 }
3663
3664 if (iaf != NULL) {
3665 *iaf = 0;
3666 }
3667
3668 return 0;
3669 }
3670
3671 int
ip_getdstifaddr_info(struct mbuf * m,uint32_t * dst_idx,uint32_t * iaf)3672 ip_getdstifaddr_info(struct mbuf *m, uint32_t *dst_idx, uint32_t *iaf)
3673 {
3674 VERIFY(m->m_flags & M_PKTHDR);
3675
3676 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
3677 return -1;
3678 }
3679
3680 if (dst_idx != NULL) {
3681 *dst_idx = m->m_pkthdr.dst_ifindex;
3682 }
3683
3684 if (iaf != NULL) {
3685 *iaf = 0;
3686 }
3687
3688 return 0;
3689 }
3690
3691 /*
3692 * Protocol input handler for IPPROTO_GRE.
3693 */
3694 void
gre_input(struct mbuf * m,int off)3695 gre_input(struct mbuf *m, int off)
3696 {
3697 gre_input_func_t fn = gre_input_func;
3698
3699 /*
3700 * If there is a registered GRE input handler, pass mbuf to it.
3701 */
3702 if (fn != NULL) {
3703 lck_mtx_unlock(inet_domain_mutex);
3704 m = fn(m, off, (mtod(m, struct ip *))->ip_p);
3705 lck_mtx_lock(inet_domain_mutex);
3706 }
3707
3708 /*
3709 * If no matching tunnel that is up is found, we inject
3710 * the mbuf to raw ip socket to see if anyone picks it up.
3711 */
3712 if (m != NULL) {
3713 rip_input(m, off);
3714 }
3715 }
3716
3717 /*
3718 * Private KPI for PPP/PPTP.
3719 */
3720 int
ip_gre_register_input(gre_input_func_t fn)3721 ip_gre_register_input(gre_input_func_t fn)
3722 {
3723 lck_mtx_lock(inet_domain_mutex);
3724 gre_input_func = fn;
3725 lck_mtx_unlock(inet_domain_mutex);
3726
3727 return 0;
3728 }
3729
3730 #if (DEBUG || DEVELOPMENT)
3731 static int
3732 sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS
3733 {
3734 #pragma unused(arg1, arg2)
3735 int error, i;
3736
3737 i = ip_input_measure;
3738 error = sysctl_handle_int(oidp, &i, 0, req);
3739 if (error || req->newptr == USER_ADDR_NULL) {
3740 goto done;
3741 }
3742 /* impose bounds */
3743 if (i < 0 || i > 1) {
3744 error = EINVAL;
3745 goto done;
3746 }
3747 if (ip_input_measure != i && i == 1) {
3748 net_perf_initialize(&net_perf, ip_input_measure_bins);
3749 }
3750 ip_input_measure = i;
3751 done:
3752 return error;
3753 }
3754
3755 static int
3756 sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS
3757 {
3758 #pragma unused(arg1, arg2)
3759 int error;
3760 uint64_t i;
3761
3762 i = ip_input_measure_bins;
3763 error = sysctl_handle_quad(oidp, &i, 0, req);
3764 if (error || req->newptr == USER_ADDR_NULL) {
3765 goto done;
3766 }
3767 /* validate data */
3768 if (!net_perf_validate_bins(i)) {
3769 error = EINVAL;
3770 goto done;
3771 }
3772 ip_input_measure_bins = i;
3773 done:
3774 return error;
3775 }
3776
3777 static int
3778 sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS
3779 {
3780 #pragma unused(oidp, arg1, arg2)
3781 if (req->oldptr == USER_ADDR_NULL) {
3782 req->oldlen = (size_t)sizeof(struct ipstat);
3783 }
3784
3785 return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen));
3786 }
3787 #endif /* (DEBUG || DEVELOPMENT) */
3788
3789 static int
3790 sysctl_ip_checkinterface SYSCTL_HANDLER_ARGS
3791 {
3792 #pragma unused(arg1, arg2)
3793 int error, i;
3794
3795 i = ip_checkinterface;
3796 error = sysctl_handle_int(oidp, &i, 0, req);
3797 if (error != 0 || req->newptr == USER_ADDR_NULL) {
3798 return error;
3799 }
3800
3801 switch (i) {
3802 case IP_CHECKINTERFACE_WEAK_ES:
3803 case IP_CHECKINTERFACE_HYBRID_ES:
3804 case IP_CHECKINTERFACE_STRONG_ES:
3805 if (ip_checkinterface != i) {
3806 ip_checkinterface = i;
3807 os_log(OS_LOG_DEFAULT, "%s: ip_checkinterface is now %d\n",
3808 __func__, ip_checkinterface);
3809 }
3810 break;
3811 default:
3812 error = EINVAL;
3813 break;
3814 }
3815 return error;
3816 }
3817