1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2007 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #define _IP_VHL
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/mbuf.h>
74 #include <sys/malloc.h>
75 #include <sys/domain.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/time.h>
79 #include <sys/kernel.h>
80 #include <sys/syslog.h>
81 #include <sys/sysctl.h>
82 #include <sys/mcache.h>
83 #include <sys/socketvar.h>
84 #include <sys/kdebug.h>
85 #include <mach/mach_time.h>
86 #include <mach/sdt.h>
87
88 #include <machine/endian.h>
89 #include <dev/random/randomdev.h>
90
91 #include <kern/queue.h>
92 #include <kern/locks.h>
93 #include <libkern/OSAtomic.h>
94
95 #include <pexpert/pexpert.h>
96
97 #include <net/if.h>
98 #include <net/if_var.h>
99 #include <net/if_dl.h>
100 #include <net/route.h>
101 #include <net/kpi_protocol.h>
102 #include <net/ntstat.h>
103 #include <net/dlil.h>
104 #include <net/classq/classq.h>
105 #include <net/net_perf.h>
106 #include <net/init.h>
107 #if PF
108 #include <net/pfvar.h>
109 #endif /* PF */
110 #include <net/if_ports_used.h>
111 #include <net/droptap.h>
112
113 #include <netinet/in.h>
114 #include <netinet/in_systm.h>
115 #include <netinet/in_var.h>
116 #include <netinet/in_arp.h>
117 #include <netinet/ip.h>
118 #include <netinet/in_pcb.h>
119 #include <netinet/ip_var.h>
120 #include <netinet/ip_icmp.h>
121 #include <netinet/kpi_ipfilter_var.h>
122 #include <netinet/udp.h>
123 #include <netinet/udp_var.h>
124 #include <netinet/bootp.h>
125
126 #if DUMMYNET
127 #include <netinet/ip_dummynet.h>
128 #endif /* DUMMYNET */
129
130 #if IPSEC
131 #include <netinet6/ipsec.h>
132 #include <netkey/key.h>
133 #endif /* IPSEC */
134
135 #include <net/sockaddr_utils.h>
136
137 #include <os/log.h>
138
139 extern struct inpcbinfo ripcbinfo;
140
141 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 0)
142 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 2)
143 #define DBG_FNC_IP_INPUT NETDBG_CODE(DBG_NETIP, (2 << 8))
144
145 #if IPSEC
146 extern int ipsec_bypass;
147 #endif /* IPSEC */
148
149 MBUFQ_HEAD(fq_head);
150
151 static int frag_timeout_run; /* frag timer is scheduled to run */
152 static void frag_timeout(void *);
153 static void frag_sched_timeout(void);
154
155 static struct ipq *ipq_alloc(void);
156 static void ipq_free(struct ipq *);
157 static void ipq_updateparams(void);
158 static void ip_input_second_pass(struct mbuf *, struct ifnet *,
159 int, int, struct ip_fw_in_args *);
160
161 static LCK_GRP_DECLARE(ipqlock_grp, "ipqlock");
162 static LCK_MTX_DECLARE(ipqlock, &ipqlock_grp);
163
164
165 /* Packet reassembly stuff */
166 #define IPREASS_NHASH_LOG2 6
167 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
168 #define IPREASS_HMASK (IPREASS_NHASH - 1)
169 #define IPREASS_HASH(x, y) \
170 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
171
172 /* IP fragment reassembly queues (protected by ipqlock) */
173 static TAILQ_HEAD(ipqhead, ipq) ipq[IPREASS_NHASH]; /* ip reassembly queues */
174 static int maxnipq; /* max packets in reass queues */
175 static u_int32_t maxfragsperpacket; /* max frags/packet in reass queues */
176 static u_int32_t nipq; /* # of packets in reass queues */
177 static u_int32_t ipq_limit; /* ipq allocation limit */
178 static u_int32_t ipq_count; /* current # of allocated ipq's */
179
180 static int sysctl_ipforwarding SYSCTL_HANDLER_ARGS;
181 static int sysctl_maxnipq SYSCTL_HANDLER_ARGS;
182 static int sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS;
183
184 #if (DEBUG || DEVELOPMENT)
185 static int sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS;
186 static int sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS;
187 static int sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS;
188 #endif /* (DEBUG || DEVELOPMENT) */
189
190 int ipforwarding = 0;
191 SYSCTL_PROC(_net_inet_ip, IPCTL_FORWARDING, forwarding,
192 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ipforwarding, 0,
193 sysctl_ipforwarding, "I", "Enable IP forwarding between interfaces");
194
195 static int ipsendredirects = 1; /* XXX */
196 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect,
197 CTLFLAG_RW | CTLFLAG_LOCKED, &ipsendredirects, 0,
198 "Enable sending IP redirects");
199
200 int ip_defttl = IPDEFTTL;
201 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW | CTLFLAG_LOCKED,
202 &ip_defttl, 0, "Maximum TTL on IP packets");
203
204 static int ip_dosourceroute = 0;
205 SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute,
206 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_dosourceroute, 0,
207 "Enable forwarding source routed IP packets");
208
209 static int ip_acceptsourceroute = 0;
210 SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
211 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_acceptsourceroute, 0,
212 "Enable accepting source routed IP packets");
213
214 static int ip_sendsourcequench = 0;
215 SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench,
216 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_sendsourcequench, 0,
217 "Enable the transmission of source quench packets");
218
219 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets,
220 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxnipq, 0, sysctl_maxnipq,
221 "I", "Maximum number of IPv4 fragment reassembly queue entries");
222
223 SYSCTL_UINT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD | CTLFLAG_LOCKED,
224 &nipq, 0, "Current number of IPv4 fragment reassembly queue entries");
225
226 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragsperpacket,
227 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxfragsperpacket, 0,
228 sysctl_maxfragsperpacket, "I",
229 "Maximum number of IPv4 fragments allowed per packet");
230
231 static uint32_t ip_adj_clear_hwcksum = 0;
232 SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_clear_hwcksum,
233 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_clear_hwcksum, 0,
234 "Invalidate hwcksum info when adjusting length");
235
236 static uint32_t ip_adj_partial_sum = 1;
237 SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_partial_sum,
238 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_partial_sum, 0,
239 "Perform partial sum adjustment of trailing bytes at IP layer");
240
241 /*
242 * ip_checkinterface controls the receive side of the models for multihoming
243 * that are discussed in RFC 1122.
244 *
245 * ip_checkinterface values are:
246 * IP_CHECKINTERFACE_WEAK_ES:
247 * This corresponds to the Weak End-System model where incoming packets from
248 * any interface are accepted provided the destination address of the incoming packet
249 * is assigned to some interface.
250 *
251 * IP_CHECKINTERFACE_HYBRID_ES:
252 * The Hybrid End-System model use the Strong End-System for tunnel interfaces
253 * (ipsec and utun) and the weak End-System model for other interfaces families.
254 * This prevents a rogue middle box to probe for signs of TCP connections
255 * that use the tunnel interface.
256 *
257 * IP_CHECKINTERFACE_STRONG_ES:
258 * The Strong model model requires the packet arrived on an interface that
259 * is assigned the destination address of the packet.
260 *
261 * Since the routing table and transmit implementation do not implement the Strong ES model,
262 * setting this to a value different from IP_CHECKINTERFACE_WEAK_ES may lead to unexpected results.
263 *
264 * When forwarding is enabled, the system reverts to the Weak ES model as a router
265 * is expected by design to receive packets from several interfaces to the same address.
266 *
267 * XXX - ip_checkinterface currently must be set to IP_CHECKINTERFACE_WEAK_ES if you use ipnat
268 * to translate the destination address to another local interface.
269 *
270 * XXX - ip_checkinterface must be set to IP_CHECKINTERFACE_WEAK_ES if you add IP aliases
271 * to the loopback interface instead of the interface where the
272 * packets for those addresses are received.
273 */
274 #define IP_CHECKINTERFACE_WEAK_ES 0
275 #define IP_CHECKINTERFACE_HYBRID_ES 1
276 #define IP_CHECKINTERFACE_STRONG_ES 2
277
278 static int ip_checkinterface = IP_CHECKINTERFACE_HYBRID_ES;
279
280 static int sysctl_ip_checkinterface SYSCTL_HANDLER_ARGS;
281 SYSCTL_PROC(_net_inet_ip, OID_AUTO, check_interface,
282 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
283 0, 0, sysctl_ip_checkinterface, "I", "Verify packet arrives on correct interface");
284
285 #if (DEBUG || DEVELOPMENT)
286 #define IP_CHECK_IF_DEBUG 1
287 #else
288 #define IP_CHECK_IF_DEBUG 0
289 #endif /* (DEBUG || DEVELOPMENT) */
290 static int ip_checkinterface_debug = IP_CHECK_IF_DEBUG;
291 SYSCTL_INT(_net_inet_ip, OID_AUTO, checkinterface_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
292 &ip_checkinterface_debug, IP_CHECK_IF_DEBUG, "");
293
294 static int ip_chainsz = 6;
295 SYSCTL_INT(_net_inet_ip, OID_AUTO, rx_chainsz, CTLFLAG_RW | CTLFLAG_LOCKED,
296 &ip_chainsz, 1, "IP receive side max chaining");
297
298 #if (DEBUG || DEVELOPMENT)
299 static int ip_input_measure = 0;
300 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf,
301 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
302 &ip_input_measure, 0, sysctl_reset_ip_input_stats, "I", "Do time measurement");
303
304 static uint64_t ip_input_measure_bins = 0;
305 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_bins,
306 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_input_measure_bins, 0,
307 sysctl_ip_input_measure_bins, "I",
308 "bins for chaining performance data histogram");
309
310 static net_perf_t net_perf;
311 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_data,
312 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
313 0, 0, sysctl_ip_input_getperf, "S,net_perf",
314 "IP input performance data (struct net_perf, net/net_perf.h)");
315 #endif /* (DEBUG || DEVELOPMENT) */
316
317 #if DIAGNOSTIC
318 static int ipprintfs = 0;
319 #endif
320
321 struct protosw *ip_protox[IPPROTO_MAX];
322
323 static LCK_GRP_DECLARE(in_ifaddr_rwlock_grp, "in_ifaddr_rwlock");
324 LCK_RW_DECLARE(in_ifaddr_rwlock, &in_ifaddr_rwlock_grp);
325
326 #define INADDR_NHASH 61
327 static uint32_t inaddr_nhash; /* hash table size */
328 static uint32_t inaddr_hashp; /* next largest prime */
329
330 /* Protected by in_ifaddr_rwlock */
331 struct in_ifaddrhead in_ifaddrhead; /* first inet address */
332 static struct in_ifaddrhashhead *__counted_by(inaddr_nhash) in_ifaddrhashtbl = NULL; /* inet addr hash table */
333
334 static int ip_getstat SYSCTL_HANDLER_ARGS;
335 struct ipstat ipstat;
336 SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats,
337 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
338 0, 0, ip_getstat, "S,ipstat",
339 "IP statistics (struct ipstat, netinet/ip_var.h)");
340
341 #if IPCTL_DEFMTU
342 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW | CTLFLAG_LOCKED,
343 &ip_mtu, 0, "Default MTU");
344 #endif /* IPCTL_DEFMTU */
345
346 #if IPSTEALTH
347 static int ipstealth = 0;
348 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW | CTLFLAG_LOCKED,
349 &ipstealth, 0, "");
350 #endif /* IPSTEALTH */
351
352 #if DUMMYNET
353 ip_dn_io_t *ip_dn_io_ptr;
354 #endif /* DUMMYNET */
355
356 SYSCTL_NODE(_net_inet_ip, OID_AUTO, linklocal,
357 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local");
358
359 struct ip_linklocal_stat ip_linklocal_stat;
360 SYSCTL_STRUCT(_net_inet_ip_linklocal, OID_AUTO, stat,
361 CTLFLAG_RD | CTLFLAG_LOCKED, &ip_linklocal_stat, ip_linklocal_stat,
362 "Number of link local packets with TTL less than 255");
363
364 SYSCTL_NODE(_net_inet_ip_linklocal, OID_AUTO, in,
365 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local input");
366
367 int ip_linklocal_in_allowbadttl = 1;
368 SYSCTL_INT(_net_inet_ip_linklocal_in, OID_AUTO, allowbadttl,
369 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_linklocal_in_allowbadttl, 0,
370 "Allow incoming link local packets with TTL less than 255");
371
372
373 /*
374 * We need to save the IP options in case a protocol wants to respond
375 * to an incoming packet over the same route if the packet got here
376 * using IP source routing. This allows connection establishment and
377 * maintenance when the remote end is on a network that is not known
378 * to us.
379 */
380 static int ip_nhops = 0;
381 static struct ip_srcrt {
382 struct in_addr dst; /* final destination */
383 char nop; /* one NOP to align */
384 char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */
385 struct in_addr route[MAX_IPOPTLEN / sizeof(struct in_addr)];
386 } ip_srcrt;
387
388 static void in_ifaddrhashtbl_init(void);
389 static void save_rte(u_char *__indexable, struct in_addr);
390 static int ip_dooptions(struct mbuf *, int, struct sockaddr_in *);
391 static void ip_forward(struct mbuf *, int, struct sockaddr_in *);
392 static void frag_freef(struct ipqhead *, struct ipq *, drop_reason_t);
393 static struct mbuf *ip_reass(struct mbuf *);
394 static void ip_fwd_route_copyout(struct ifnet *, struct route *);
395 static void ip_fwd_route_copyin(struct ifnet *, struct route *);
396 static inline u_short ip_cksum(struct mbuf *, int);
397
398 /*
399 * On platforms which require strict alignment (currently for anything but
400 * i386 or x86_64 or arm64), check if the IP header pointer is 32-bit aligned; if not,
401 * copy the contents of the mbuf chain into a new chain, and free the original
402 * one. Create some head room in the first mbuf of the new chain, in case
403 * it's needed later on.
404 */
405 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
406 #define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0)
407 #else /* !__i386__ && !__x86_64__ && !__arm64__ */
408 #define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { \
409 if (!IP_HDR_ALIGNED_P(mtod(_m, caddr_t))) { \
410 struct mbuf *_n; \
411 struct ifnet *__ifp = (_ifp); \
412 os_atomic_inc(&(__ifp)->if_alignerrs, relaxed); \
413 if (((_m)->m_flags & M_PKTHDR) && \
414 (_m)->m_pkthdr.pkt_hdr != NULL) \
415 (_m)->m_pkthdr.pkt_hdr = NULL; \
416 _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT); \
417 if (_n == NULL) { \
418 os_atomic_inc(&ipstat.ips_toosmall, relaxed); \
419 m_drop(_m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_TOO_SMALL, NULL, 0);\
420 (_m) = NULL; \
421 _action; \
422 } else { \
423 VERIFY(_n != (_m)); \
424 (_m) = _n; \
425 } \
426 } \
427 } while (0)
428 #endif /* !__i386__ && !__x86_64__ && !__arm64__ */
429
430
431 typedef enum ip_check_if_result {
432 IP_CHECK_IF_NONE = 0,
433 IP_CHECK_IF_OURS = 1,
434 IP_CHECK_IF_DROP = 2,
435 IP_CHECK_IF_FORWARD = 3
436 } ip_check_if_result_t;
437
438 static ip_check_if_result_t ip_input_check_interface(struct mbuf **, struct ip *, struct ifnet *);
439
440 /*
441 * GRE input handler function, settable via ip_gre_register_input() for PPTP.
442 */
443 static gre_input_func_t gre_input_func;
444
445 static void
ip_init_delayed(void)446 ip_init_delayed(void)
447 {
448 struct ifreq ifr;
449 int error;
450 struct sockaddr_in *__single sin;
451
452 bzero(&ifr, sizeof(ifr));
453 strlcpy(ifr.ifr_name, "lo0", sizeof(ifr.ifr_name));
454 sin = SIN(&ifr.ifr_addr);
455 sin->sin_len = sizeof(struct sockaddr_in);
456 sin->sin_family = AF_INET;
457 sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
458 error = in_control(NULL, SIOCSIFADDR, (caddr_t)&ifr, lo_ifp, kernproc);
459 if (error) {
460 printf("%s: failed to initialise lo0's address, error=%d\n",
461 __func__, error);
462 }
463 }
464
465 /*
466 * IP initialization: fill in IP protocol switch table.
467 * All protocols not implemented in kernel go to raw IP protocol handler.
468 */
469 void
ip_init(struct protosw * pp,struct domain * dp)470 ip_init(struct protosw *pp, struct domain *dp)
471 {
472 static int ip_initialized = 0;
473 struct protosw *__single pr;
474 struct timeval tv;
475 int i;
476
477 domain_proto_mtx_lock_assert_held();
478 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
479
480 /*
481 * Some ioctls (e.g. SIOCAIFADDR) use ifaliasreq struct, which is
482 * interchangeable with in_aliasreq; they must have the same size.
483 */
484 static_assert(sizeof(struct ifaliasreq) == sizeof(struct in_aliasreq));
485
486 if (!os_atomic_cmpxchg(&ip_initialized, 0, 1, relaxed)) {
487 return;
488 }
489
490 TAILQ_INIT(&in_ifaddrhead);
491 in_ifaddrhashtbl_init();
492
493 ip_moptions_init();
494
495 pr = pffindproto_locked(PF_INET, IPPROTO_RAW, SOCK_RAW);
496 if (pr == NULL) {
497 panic("%s: Unable to find [PF_INET,IPPROTO_RAW,SOCK_RAW]",
498 __func__);
499 /* NOTREACHED */
500 }
501
502 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
503 for (i = 0; i < IPPROTO_MAX; i++) {
504 ip_protox[i] = pr;
505 }
506 /*
507 * Cycle through IP protocols and put them into the appropriate place
508 * in ip_protox[], skipping protocols IPPROTO_{IP,RAW}.
509 */
510 VERIFY(dp == inetdomain && dp->dom_family == PF_INET);
511 TAILQ_FOREACH(pr, &dp->dom_protosw, pr_entry) {
512 VERIFY(pr->pr_domain == dp);
513 if (pr->pr_protocol != 0 && pr->pr_protocol != IPPROTO_RAW) {
514 /* Be careful to only index valid IP protocols. */
515 if (pr->pr_protocol < IPPROTO_MAX) {
516 ip_protox[pr->pr_protocol] = pr;
517 }
518 }
519 }
520
521 lck_mtx_lock(&ipqlock);
522 /* Initialize IP reassembly queue. */
523 for (i = 0; i < IPREASS_NHASH; i++) {
524 TAILQ_INIT(&ipq[i]);
525 }
526
527 maxnipq = 8192;
528 maxfragsperpacket = 128; /* enough for 64k in 512 byte fragments */
529 ipq_updateparams();
530 lck_mtx_unlock(&ipqlock);
531
532 getmicrotime(&tv);
533 ip_id = (u_short)(RandomULong() ^ tv.tv_usec);
534
535 PE_parse_boot_argn("ip_checkinterface", &i, sizeof(i));
536 switch (i) {
537 case IP_CHECKINTERFACE_WEAK_ES:
538 case IP_CHECKINTERFACE_HYBRID_ES:
539 case IP_CHECKINTERFACE_STRONG_ES:
540 ip_checkinterface = i;
541 break;
542 default:
543 break;
544 }
545
546 net_init_add(ip_init_delayed);
547 }
548
549 /*
550 * Initialize IPv4 source address hash table.
551 */
552 static void
in_ifaddrhashtbl_init(void)553 in_ifaddrhashtbl_init(void)
554 {
555 int i, k, p;
556 uint32_t nhash = 0;
557 uint32_t hash_size;
558
559 if (in_ifaddrhashtbl != NULL) {
560 return;
561 }
562
563 PE_parse_boot_argn("inaddr_nhash", &nhash,
564 sizeof(inaddr_nhash));
565 if (nhash == 0) {
566 nhash = INADDR_NHASH;
567 }
568
569 hash_size = nhash * sizeof(*in_ifaddrhashtbl);
570
571 in_ifaddrhashtbl = zalloc_permanent(
572 hash_size,
573 ZALIGN_PTR);
574 inaddr_nhash = nhash;
575
576 /*
577 * Generate the next largest prime greater than inaddr_nhash.
578 */
579 k = (inaddr_nhash % 2 == 0) ? inaddr_nhash + 1 : inaddr_nhash + 2;
580 for (;;) {
581 p = 1;
582 for (i = 3; i * i <= k; i += 2) {
583 if (k % i == 0) {
584 p = 0;
585 }
586 }
587 if (p == 1) {
588 break;
589 }
590 k += 2;
591 }
592 inaddr_hashp = k;
593 }
594
595 uint32_t
inaddr_hashval(uint32_t key)596 inaddr_hashval(uint32_t key)
597 {
598 /*
599 * The hash index is the computed prime times the key modulo
600 * the hash size, as documented in "Introduction to Algorithms"
601 * (Cormen, Leiserson, Rivest).
602 */
603 if (inaddr_nhash > 1) {
604 return (key * inaddr_hashp) % inaddr_nhash;
605 } else {
606 return 0;
607 }
608 }
609
610 struct in_ifaddrhashhead *
inaddr_hashlookup(uint32_t key)611 inaddr_hashlookup(uint32_t key)
612 {
613 return &in_ifaddrhashtbl[inaddr_hashval(key)];
614 }
615
616 extern void log_hexdump(os_log_t log_handle, void *__sized_by(len) data, size_t len);
617
618 static void
log_wake_ip_mbuf(struct ifnet * ifp,struct mbuf * m)619 log_wake_ip_mbuf(struct ifnet *ifp, struct mbuf *m)
620 {
621 char buffer[64];
622 size_t buflen = MIN(mbuf_pkthdr_len(m), sizeof(buffer));
623
624 os_log(wake_packet_log_handle, "wake IP packet from %s len %d if_is_lpw_enabled: %d",
625 ifp->if_xname, m_pktlen(m), if_is_lpw_enabled(ifp));
626 if (mbuf_copydata(m, 0, buflen, buffer) == 0) {
627 log_hexdump(wake_packet_log_handle, buffer, buflen);
628 }
629 }
630
631 static void
ip_proto_process_wake_packet(struct mbuf * m)632 ip_proto_process_wake_packet(struct mbuf *m)
633 {
634 struct ifnet *ifp = m->m_pkthdr.rcvif;
635
636 if (if_is_lpw_enabled(ifp)) {
637 if (net_wake_pkt_debug > 0) {
638 log_wake_ip_mbuf(ifp, m);
639 }
640 if_exit_lpw(ifp, "IP packet");
641 }
642 }
643
644 __private_extern__ void
ip_proto_dispatch_in(struct mbuf * m,int hlen,u_int8_t proto,ipfilter_t inject_ipfref)645 ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto,
646 ipfilter_t inject_ipfref)
647 {
648 struct ipfilter *__single filter;
649 int seen = (inject_ipfref == NULL);
650 int changed_header = 0;
651 struct ip *ip;
652 void (*pr_input)(struct mbuf *, int len);
653
654 if (!TAILQ_EMPTY(&ipv4_filters)) {
655 ipf_ref();
656 TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
657 if (seen == 0) {
658 if ((struct ipfilter *)inject_ipfref == filter) {
659 seen = 1;
660 }
661 } else if (filter->ipf_filter.ipf_input) {
662 errno_t result;
663
664 if (changed_header == 0) {
665 /*
666 * Perform IP header alignment fixup,
667 * if needed, before passing packet
668 * into filter(s).
669 */
670 IP_HDR_ALIGNMENT_FIXUP(m,
671 m->m_pkthdr.rcvif, ipf_unref());
672
673 /* ipf_unref() already called */
674 if (m == NULL) {
675 return;
676 }
677
678 changed_header = 1;
679 ip = mtod(m, struct ip *);
680 ip->ip_len = htons(ip->ip_len + (uint16_t)hlen);
681 ip->ip_off = htons(ip->ip_off);
682 ip->ip_sum = 0;
683 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
684 }
685 result = filter->ipf_filter.ipf_input(
686 filter->ipf_filter.cookie, (mbuf_t *)&m,
687 hlen, proto);
688 if (result == EJUSTRETURN) {
689 ipf_unref();
690 return;
691 }
692 if (result != 0) {
693 ipstat.ips_input_ipf_drop++;
694 ipf_unref();
695 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_FILTER_DROP,
696 NULL, 0);
697 return;
698 }
699 }
700 }
701 ipf_unref();
702 }
703
704 /* Perform IP header alignment fixup (post-filters), if needed */
705 IP_HDR_ALIGNMENT_FIXUP(m, m->m_pkthdr.rcvif, return );
706
707 ip = mtod(m, struct ip *);
708
709 if (changed_header) {
710 ip->ip_len = ntohs(ip->ip_len) - (u_short)hlen;
711 ip->ip_off = ntohs(ip->ip_off);
712 }
713
714 /*
715 * Check if need to switch to full wake mode -- TCP knows about idle connections
716 */
717 if (__improbable(ip->ip_p != IPPROTO_TCP && (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) != 0)) {
718 ip_proto_process_wake_packet(m);
719 }
720
721 /*
722 * If there isn't a specific lock for the protocol
723 * we're about to call, use the generic lock for AF_INET.
724 * otherwise let the protocol deal with its own locking
725 */
726 if ((pr_input = ip_protox[ip->ip_p]->pr_input) == NULL) {
727 ipstat.ips_input_no_proto++;
728 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_NO_PROTO,
729 NULL, 0);
730 } else if (!(ip_protox[ip->ip_p]->pr_flags & PR_PROTOLOCK)) {
731 lck_mtx_lock(inet_domain_mutex);
732 pr_input(m, hlen);
733 lck_mtx_unlock(inet_domain_mutex);
734 } else {
735 pr_input(m, hlen);
736 }
737 }
738
739 struct pktchain_elm {
740 struct mbuf *pkte_head;
741 struct mbuf *pkte_tail;
742 struct in_addr pkte_saddr;
743 struct in_addr pkte_daddr;
744 uint16_t pkte_npkts;
745 uint16_t pkte_proto;
746 uint32_t pkte_nbytes;
747 };
748
749 typedef struct pktchain_elm pktchain_elm_t;
750
751 /* Store upto PKTTBL_SZ unique flows on the stack */
752 #define PKTTBL_SZ 7
753
754 static struct mbuf *
ip_chain_insert(struct mbuf * packet,pktchain_elm_t * __counted_by (PKTTBL_SZ)tbl)755 ip_chain_insert(struct mbuf *packet, pktchain_elm_t *__counted_by(PKTTBL_SZ) tbl)
756 {
757 struct ip* ip;
758 int pkttbl_idx = 0;
759 struct mbuf *head;
760
761 ip = mtod(packet, struct ip*);
762
763 /* reusing the hash function from inaddr_hashval */
764 pkttbl_idx = inaddr_hashval(ntohl(ip->ip_src.s_addr)) % PKTTBL_SZ;
765 head = tbl[pkttbl_idx].pkte_head;
766 if (head == NULL) {
767 tbl[pkttbl_idx].pkte_head = packet;
768 tbl[pkttbl_idx].pkte_saddr.s_addr = ip->ip_src.s_addr;
769 tbl[pkttbl_idx].pkte_daddr.s_addr = ip->ip_dst.s_addr;
770 tbl[pkttbl_idx].pkte_proto = ip->ip_p;
771 } else {
772 if ((ip->ip_dst.s_addr == tbl[pkttbl_idx].pkte_daddr.s_addr) &&
773 (ip->ip_src.s_addr == tbl[pkttbl_idx].pkte_saddr.s_addr) &&
774 (ip->ip_p == tbl[pkttbl_idx].pkte_proto) &&
775 (packet->m_flags & (M_BCAST | M_MCAST)) == (head->m_flags & (M_BCAST | M_MCAST))) {
776 } else {
777 return packet;
778 }
779 }
780 if (tbl[pkttbl_idx].pkte_tail != NULL) {
781 mbuf_setnextpkt(tbl[pkttbl_idx].pkte_tail, packet);
782 }
783
784 tbl[pkttbl_idx].pkte_tail = packet;
785 tbl[pkttbl_idx].pkte_npkts += 1;
786 tbl[pkttbl_idx].pkte_nbytes += packet->m_pkthdr.len;
787 return NULL;
788 }
789
790 /* args is a dummy variable here for backward compatibility */
791 static void
ip_input_second_pass_loop_tbl(pktchain_elm_t * __counted_by (PKTTBL_SZ)tbl,struct ip_fw_in_args * args)792 ip_input_second_pass_loop_tbl(pktchain_elm_t *__counted_by(PKTTBL_SZ) tbl, struct ip_fw_in_args *args)
793 {
794 int i = 0;
795
796 for (i = 0; i < PKTTBL_SZ; i++) {
797 if (tbl[i].pkte_head != NULL) {
798 struct mbuf *m = tbl[i].pkte_head;
799 ip_input_second_pass(m, m->m_pkthdr.rcvif,
800 tbl[i].pkte_npkts, tbl[i].pkte_nbytes, args);
801
802 if (tbl[i].pkte_npkts > 2) {
803 ipstat.ips_rxc_chainsz_gt2++;
804 }
805 if (tbl[i].pkte_npkts > 4) {
806 ipstat.ips_rxc_chainsz_gt4++;
807 }
808 #if (DEBUG || DEVELOPMENT)
809 if (ip_input_measure) {
810 net_perf_histogram(&net_perf, tbl[i].pkte_npkts);
811 }
812 #endif /* (DEBUG || DEVELOPMENT) */
813 tbl[i].pkte_head = tbl[i].pkte_tail = NULL;
814 tbl[i].pkte_npkts = 0;
815 tbl[i].pkte_nbytes = 0;
816 /* no need to initialize address and protocol in tbl */
817 }
818 }
819 }
820
821 static void
ip_input_cpout_args(struct ip_fw_in_args * args,struct ip_fw_args * args1,boolean_t * done_init)822 ip_input_cpout_args(struct ip_fw_in_args *args, struct ip_fw_args *args1,
823 boolean_t *done_init)
824 {
825 if (*done_init == FALSE) {
826 bzero(args1, sizeof(struct ip_fw_args));
827 *done_init = TRUE;
828 }
829 args1->fwa_pf_rule = args->fwai_pf_rule;
830 }
831
832 static void
ip_input_cpin_args(struct ip_fw_args * args1,struct ip_fw_in_args * args)833 ip_input_cpin_args(struct ip_fw_args *args1, struct ip_fw_in_args *args)
834 {
835 args->fwai_pf_rule = args1->fwa_pf_rule;
836 }
837
838 typedef enum {
839 IPINPUT_DOCHAIN = 0,
840 IPINPUT_DONTCHAIN,
841 IPINPUT_FREED,
842 IPINPUT_DONE
843 } ipinput_chain_ret_t;
844
845 static void
ip_input_update_nstat(struct ifnet * ifp,struct in_addr src_ip,u_int32_t packets,u_int32_t bytes)846 ip_input_update_nstat(struct ifnet *ifp, struct in_addr src_ip,
847 u_int32_t packets, u_int32_t bytes)
848 {
849 if (nstat_collect) {
850 struct rtentry *rt = ifnet_cached_rtlookup_inet(ifp,
851 src_ip);
852 if (rt != NULL) {
853 nstat_route_rx(rt, packets, bytes, 0);
854 rtfree(rt);
855 }
856 }
857 }
858
859 static void
ip_input_dispatch_chain(struct mbuf * m)860 ip_input_dispatch_chain(struct mbuf *m)
861 {
862 mbuf_ref_t tmp_mbuf = m;
863 mbuf_ref_t nxt_mbuf = NULL;
864 struct ip *__single ip = NULL;
865 unsigned int hlen;
866
867 ip = mtod(tmp_mbuf, struct ip *);
868 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
869 while (tmp_mbuf != NULL) {
870 nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
871 mbuf_setnextpkt(tmp_mbuf, NULL);
872 ip_proto_dispatch_in(tmp_mbuf, hlen, ip->ip_p, 0);
873 tmp_mbuf = nxt_mbuf;
874 if (tmp_mbuf) {
875 ip = mtod(tmp_mbuf, struct ip *);
876 /* first mbuf of chain already has adjusted ip_len */
877 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
878 ip->ip_len -= hlen;
879 }
880 }
881 }
882
883 static void
ip_input_setdst_chain(struct mbuf * m,uint16_t ifindex,struct in_ifaddr * ia)884 ip_input_setdst_chain(struct mbuf *m, uint16_t ifindex, struct in_ifaddr *ia)
885 {
886 mbuf_ref_t tmp_mbuf = m;
887
888 while (tmp_mbuf != NULL) {
889 ip_setdstifaddr_info(tmp_mbuf, ifindex, ia);
890 tmp_mbuf = mbuf_nextpkt(tmp_mbuf);
891 }
892 }
893
894 static void
ip_input_adjust(struct mbuf * m,struct ip * ip,struct ifnet * inifp)895 ip_input_adjust(struct mbuf *m, struct ip *ip, struct ifnet *inifp)
896 {
897 boolean_t adjust = TRUE;
898
899 ASSERT(m_pktlen(m) > ip->ip_len);
900
901 /*
902 * Invalidate hardware checksum info if ip_adj_clear_hwcksum
903 * is set; useful to handle buggy drivers. Note that this
904 * should not be enabled by default, as we may get here due
905 * to link-layer padding.
906 */
907 if (ip_adj_clear_hwcksum &&
908 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) &&
909 !(inifp->if_flags & IFF_LOOPBACK) &&
910 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
911 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
912 m->m_pkthdr.csum_data = 0;
913 ipstat.ips_adj_hwcsum_clr++;
914 }
915
916 /*
917 * If partial checksum information is available, subtract
918 * out the partial sum of postpended extraneous bytes, and
919 * update the checksum metadata accordingly. By doing it
920 * here, the upper layer transport only needs to adjust any
921 * prepended extraneous bytes (else it will do both.)
922 */
923 if (ip_adj_partial_sum &&
924 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
925 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
926 m->m_pkthdr.csum_rx_val = m_adj_sum16(m,
927 m->m_pkthdr.csum_rx_start, m->m_pkthdr.csum_rx_start,
928 (ip->ip_len - m->m_pkthdr.csum_rx_start),
929 m->m_pkthdr.csum_rx_val);
930 } else if ((m->m_pkthdr.csum_flags &
931 (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
932 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
933 /*
934 * If packet has partial checksum info and we decided not
935 * to subtract the partial sum of postpended extraneous
936 * bytes here (not the default case), leave that work to
937 * be handled by the other layers. For now, only TCP, UDP
938 * layers are capable of dealing with this. For all other
939 * protocols (including fragments), trim and ditch the
940 * partial sum as those layers might not implement partial
941 * checksumming (or adjustment) at all.
942 */
943 if ((ip->ip_off & (IP_MF | IP_OFFMASK)) == 0 &&
944 (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_UDP)) {
945 adjust = FALSE;
946 } else {
947 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
948 m->m_pkthdr.csum_data = 0;
949 ipstat.ips_adj_hwcsum_clr++;
950 }
951 }
952
953 if (adjust) {
954 ipstat.ips_adj++;
955 if (m->m_len == m->m_pkthdr.len) {
956 m->m_len = ip->ip_len;
957 m->m_pkthdr.len = ip->ip_len;
958 } else {
959 m_adj(m, ip->ip_len - m->m_pkthdr.len);
960 }
961 }
962 }
963
964 /*
965 * First pass does all essential packet validation and places on a per flow
966 * queue for doing operations that have same outcome for all packets of a flow.
967 */
968 static ipinput_chain_ret_t
ip_input_first_pass(struct mbuf * m,struct ip_fw_in_args * args,struct mbuf ** modm)969 ip_input_first_pass(struct mbuf *m, struct ip_fw_in_args *args, struct mbuf **modm)
970 {
971 struct ip *__single ip;
972 ifnet_ref_t inifp;
973 unsigned int hlen;
974 int retval = IPINPUT_DOCHAIN;
975 int len = 0;
976 struct in_addr src_ip;
977 #if DUMMYNET
978 struct m_tag *copy;
979 struct m_tag *p;
980 boolean_t delete = FALSE;
981 struct ip_fw_args args1;
982 boolean_t init = FALSE;
983 #endif /* DUMMYNET */
984 ipfilter_t __single inject_filter_ref = NULL;
985
986 /* Check if the mbuf is still valid after interface filter processing */
987 MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
988 inifp = mbuf_pkthdr_rcvif(m);
989 VERIFY(inifp != NULL);
990
991 /* Perform IP header alignment fixup, if needed */
992 IP_HDR_ALIGNMENT_FIXUP(m, inifp, return );
993
994 m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
995
996 #if DUMMYNET
997 /*
998 * Don't bother searching for tag(s) if there's none.
999 */
1000 if (SLIST_EMPTY(&m->m_pkthdr.tags)) {
1001 goto ipfw_tags_done;
1002 }
1003
1004 /* Grab info from mtags prepended to the chain */
1005 p = m_tag_first(m);
1006 while (p) {
1007 if (p->m_tag_id == KERNEL_MODULE_TAG_ID) {
1008 if (p->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET) {
1009 struct dn_pkt_tag *dn_tag;
1010
1011 dn_tag = (struct dn_pkt_tag *)(p->m_tag_data);
1012 args->fwai_pf_rule = dn_tag->dn_pf_rule;
1013 delete = TRUE;
1014 }
1015
1016 if (delete) {
1017 copy = p;
1018 p = m_tag_next(m, p);
1019 m_tag_delete(m, copy);
1020 } else {
1021 p = m_tag_next(m, p);
1022 }
1023 } else {
1024 p = m_tag_next(m, p);
1025 }
1026 }
1027
1028 #if DIAGNOSTIC
1029 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1030 panic("ip_input no HDR");
1031 }
1032 #endif
1033
1034 if (args->fwai_pf_rule) {
1035 /* dummynet already filtered us */
1036 ip = mtod(m, struct ip *);
1037 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1038 inject_filter_ref = ipf_get_inject_filter(m);
1039 if (args->fwai_pf_rule) {
1040 goto check_with_pf;
1041 }
1042 }
1043 ipfw_tags_done:
1044 #endif /* DUMMYNET */
1045
1046 /*
1047 * No need to process packet twice if we've already seen it.
1048 */
1049 if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
1050 inject_filter_ref = ipf_get_inject_filter(m);
1051 }
1052 if (inject_filter_ref != NULL) {
1053 ip = mtod(m, struct ip *);
1054 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1055
1056 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1057 struct ip *, ip, struct ifnet *, inifp,
1058 struct ip *, ip, struct ip6_hdr *, NULL);
1059
1060 ip->ip_len = ntohs(ip->ip_len) - (u_short)hlen;
1061 ip->ip_off = ntohs(ip->ip_off);
1062 ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref);
1063 return IPINPUT_DONE;
1064 }
1065
1066 if (__improbable(m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
1067 if_ports_used_match_mbuf(inifp, PF_INET, m);
1068 }
1069
1070 if (m->m_pkthdr.len < sizeof(struct ip)) {
1071 OSAddAtomic(1, &ipstat.ips_total);
1072 OSAddAtomic(1, &ipstat.ips_tooshort);
1073 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_TOO_SHORT,
1074 NULL, 0);
1075 return IPINPUT_FREED;
1076 }
1077
1078 if (m->m_len < sizeof(struct ip) &&
1079 (m = m_pullup(m, sizeof(struct ip))) == NULL) {
1080 OSAddAtomic(1, &ipstat.ips_total);
1081 OSAddAtomic(1, &ipstat.ips_toosmall);
1082 return IPINPUT_FREED;
1083 }
1084
1085 ip = mtod(m, struct ip *);
1086 *modm = m;
1087
1088 KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
1089 ip->ip_p, ip->ip_off, ip->ip_len);
1090
1091 if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
1092 OSAddAtomic(1, &ipstat.ips_total);
1093 OSAddAtomic(1, &ipstat.ips_badvers);
1094 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1095 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_BAD_VERSION,
1096 NULL, 0);
1097 return IPINPUT_FREED;
1098 }
1099
1100 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1101 if (hlen < sizeof(struct ip)) {
1102 OSAddAtomic(1, &ipstat.ips_total);
1103 OSAddAtomic(1, &ipstat.ips_badhlen);
1104 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1105 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_BAD_HDR_LENGTH,
1106 NULL, 0);
1107 return IPINPUT_FREED;
1108 }
1109
1110 if (hlen > m->m_len) {
1111 if ((m = m_pullup(m, hlen)) == NULL) {
1112 OSAddAtomic(1, &ipstat.ips_total);
1113 OSAddAtomic(1, &ipstat.ips_badhlen);
1114 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1115 return IPINPUT_FREED;
1116 }
1117 ip = mtod(m, struct ip *);
1118 *modm = m;
1119 }
1120
1121 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_ECT1) {
1122 m->m_pkthdr.pkt_ext_flags |= PKTF_EXT_L4S;
1123 }
1124
1125 /* 127/8 must not appear on wire - RFC1122 */
1126 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
1127 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
1128 /*
1129 * Allow for the following exceptions:
1130 *
1131 * 1. If the packet was sent to loopback (i.e. rcvif
1132 * would have been set earlier at output time.)
1133 *
1134 * 2. If the packet was sent out on loopback from a local
1135 * source address which belongs to a non-loopback
1136 * interface (i.e. rcvif may not necessarily be a
1137 * loopback interface, hence the test for PKTF_LOOP.)
1138 * Unlike IPv6, there is no interface scope ID, and
1139 * therefore we don't care so much about PKTF_IFINFO.
1140 */
1141 if (!(inifp->if_flags & IFF_LOOPBACK) &&
1142 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1143 OSAddAtomic(1, &ipstat.ips_total);
1144 OSAddAtomic(1, &ipstat.ips_badaddr);
1145 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1146 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_INVALID_ADDR,
1147 NULL, 0);
1148 return IPINPUT_FREED;
1149 }
1150 }
1151
1152 /* IPv4 Link-Local Addresses as defined in RFC3927 */
1153 if ((IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
1154 IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)))) {
1155 ip_linklocal_stat.iplls_in_total++;
1156 if (ip->ip_ttl != MAXTTL) {
1157 OSAddAtomic(1, &ip_linklocal_stat.iplls_in_badttl);
1158 /* Silently drop link local traffic with bad TTL */
1159 if (!ip_linklocal_in_allowbadttl) {
1160 OSAddAtomic(1, &ipstat.ips_total);
1161 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1162 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_BAD_TTL,
1163 NULL, 0);
1164 return IPINPUT_FREED;
1165 }
1166 }
1167 }
1168
1169 if (ip_cksum(m, hlen)) {
1170 OSAddAtomic(1, &ipstat.ips_total);
1171 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1172 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_BAD_CHECKSUM,
1173 NULL, 0);
1174 return IPINPUT_FREED;
1175 }
1176
1177 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1178 struct ip *, ip, struct ifnet *, inifp,
1179 struct ip *, ip, struct ip6_hdr *, NULL);
1180
1181 /*
1182 * Convert fields to host representation.
1183 */
1184 #if BYTE_ORDER != BIG_ENDIAN
1185 NTOHS(ip->ip_len);
1186 #endif
1187
1188 if (ip->ip_len < hlen) {
1189 OSAddAtomic(1, &ipstat.ips_total);
1190 OSAddAtomic(1, &ipstat.ips_badlen);
1191 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1192 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_BAD_LENGTH,
1193 NULL, 0);
1194 return IPINPUT_FREED;
1195 }
1196
1197 #if BYTE_ORDER != BIG_ENDIAN
1198 NTOHS(ip->ip_off);
1199 #endif
1200
1201 /*
1202 * Check that the amount of data in the buffers
1203 * is as at least much as the IP header would have us expect.
1204 * Trim mbufs if longer than we expect.
1205 * Drop packet if shorter than we expect.
1206 */
1207 if (m->m_pkthdr.len < ip->ip_len) {
1208 OSAddAtomic(1, &ipstat.ips_total);
1209 OSAddAtomic(1, &ipstat.ips_tooshort);
1210 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1211 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_TOO_SHORT,
1212 NULL, 0);
1213 return IPINPUT_FREED;
1214 }
1215
1216 if (m->m_pkthdr.len > ip->ip_len) {
1217 ip_input_adjust(m, ip, inifp);
1218 }
1219
1220 /* for netstat route statistics */
1221 src_ip = ip->ip_src;
1222 len = m->m_pkthdr.len;
1223
1224 #if DUMMYNET
1225 check_with_pf:
1226 #endif /* DUMMYNET */
1227 #if PF
1228 /* Invoke inbound packet filter */
1229 if (PF_IS_ENABLED) {
1230 int error;
1231 ip_input_cpout_args(args, &args1, &init);
1232 ip = mtod(m, struct ip *);
1233 src_ip = ip->ip_src;
1234
1235 #if DUMMYNET
1236 error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, &args1);
1237 #else
1238 error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, NULL);
1239 #endif /* DUMMYNET */
1240 if (error != 0 || m == NULL) {
1241 if (m != NULL) {
1242 panic("%s: unexpected packet %p",
1243 __func__, m);
1244 /* NOTREACHED */
1245 }
1246 /* Already freed by callee */
1247 ip_input_update_nstat(inifp, src_ip, 1, len);
1248 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1249 OSAddAtomic(1, &ipstat.ips_total);
1250 return IPINPUT_FREED;
1251 }
1252 ip = mtod(m, struct ip *);
1253 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1254 *modm = m;
1255 ip_input_cpin_args(&args1, args);
1256 }
1257 #endif /* PF */
1258
1259 #if IPSEC
1260 if (ipsec_bypass == 0 && ipsec_get_history_count(m)) {
1261 retval = IPINPUT_DONTCHAIN; /* XXX scope for chaining here? */
1262 goto pass;
1263 }
1264 #endif
1265
1266 #if IPSEC
1267 pass:
1268 #endif
1269 /*
1270 * Process options and, if not destined for us,
1271 * ship it on. ip_dooptions returns 1 when an
1272 * error was detected (causing an icmp message
1273 * to be sent and the original packet to be freed).
1274 */
1275 ip_nhops = 0; /* for source routed packets */
1276 if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) {
1277 src_ip = ip->ip_src;
1278 ip_input_update_nstat(inifp, src_ip, 1, len);
1279 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1280 OSAddAtomic(1, &ipstat.ips_total);
1281 return IPINPUT_FREED;
1282 }
1283
1284 /*
1285 * Don't chain fragmented packets
1286 */
1287 if (ip->ip_off & ~(IP_DF | IP_RF)) {
1288 return IPINPUT_DONTCHAIN;
1289 }
1290
1291 /* Allow DHCP/BootP responses through */
1292 if ((inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
1293 hlen == sizeof(struct ip) && ip->ip_p == IPPROTO_UDP) {
1294 struct udpiphdr *__single ui;
1295
1296 if (m->m_len < sizeof(struct udpiphdr) &&
1297 (m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) {
1298 OSAddAtomic(1, &udpstat.udps_hdrops);
1299 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1300 OSAddAtomic(1, &ipstat.ips_total);
1301 return IPINPUT_FREED;
1302 }
1303 *modm = m;
1304 ui = mtod(m, struct udpiphdr *);
1305 if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
1306 ip_setdstifaddr_info(m, inifp->if_index, NULL);
1307 return IPINPUT_DONTCHAIN;
1308 }
1309 }
1310
1311 /* Avoid chaining raw sockets as ipsec checks occur later for them */
1312 if (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR) {
1313 return IPINPUT_DONTCHAIN;
1314 }
1315
1316 return retval;
1317 }
1318
1319 /*
1320 * Because the call to m_pullup() may freem the mbuf, the function frees the mbuf packet
1321 * chain before it return IP_CHECK_IF_DROP
1322 */
1323 static ip_check_if_result_t
ip_input_check_interface(struct mbuf ** mp,struct ip * ip,struct ifnet * inifp)1324 ip_input_check_interface(struct mbuf **mp, struct ip *ip, struct ifnet *inifp)
1325 {
1326 mbuf_ref_t m = *mp;
1327 struct in_ifaddr *__single ia = NULL;
1328 struct in_ifaddr *__single best_ia = NULL;
1329 ifnet_ref_t match_ifp = NULL;
1330 ip_check_if_result_t result = IP_CHECK_IF_NONE;
1331 enum drop_reason drop_reason = DROP_REASON_IP_RCV_IF_NO_MATCH;
1332
1333 /*
1334 * Host broadcast and all network broadcast addresses are always a match
1335 */
1336 if (ip->ip_dst.s_addr == (u_int32_t)INADDR_BROADCAST ||
1337 ip->ip_dst.s_addr == INADDR_ANY) {
1338 ip_input_setdst_chain(m, inifp->if_index, NULL);
1339 return IP_CHECK_IF_OURS;
1340 }
1341
1342 /*
1343 * Check for a match in the hash bucket.
1344 */
1345 lck_rw_lock_shared(&in_ifaddr_rwlock);
1346 TAILQ_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
1347 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr) {
1348 best_ia = ia;
1349 match_ifp = best_ia->ia_ifp;
1350
1351 if (ia->ia_ifp == inifp || (inifp->if_flags & IFF_LOOPBACK) ||
1352 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1353 /*
1354 * A locally originated packet or packet from the loopback
1355 * interface is always an exact interface address match
1356 */
1357 match_ifp = inifp;
1358 break;
1359 }
1360 /*
1361 * Continue the loop in case there's a exact match with another
1362 * interface
1363 */
1364 }
1365 }
1366 if (best_ia != NULL) {
1367 if (match_ifp != inifp && ipforwarding == 0 &&
1368 ((ip_checkinterface == IP_CHECKINTERFACE_HYBRID_ES &&
1369 (match_ifp->if_family == IFNET_FAMILY_IPSEC ||
1370 match_ifp->if_family == IFNET_FAMILY_UTUN)) ||
1371 ip_checkinterface == IP_CHECKINTERFACE_STRONG_ES)) {
1372 /*
1373 * Drop when interface address check is strict and forwarding
1374 * is disabled
1375 */
1376 result = IP_CHECK_IF_DROP;
1377 } else {
1378 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0) {
1379 drop_reason = DROP_REASON_FSW_DEMUX_L2_MULTI_L3_UNI;
1380 result = IP_CHECK_IF_DROP;
1381 } else {
1382 result = IP_CHECK_IF_OURS;
1383 }
1384 ip_input_setdst_chain(m, 0, best_ia);
1385 }
1386 }
1387 lck_rw_done(&in_ifaddr_rwlock);
1388
1389 if (result == IP_CHECK_IF_NONE && (inifp->if_flags & IFF_BROADCAST)) {
1390 /*
1391 * Check for broadcast addresses.
1392 *
1393 * Only accept broadcast packets that arrive via the matching
1394 * interface. Reception of forwarded directed broadcasts would be
1395 * handled via ip_forward() and ether_frameout() with the loopback
1396 * into the stack for SIMPLEX interfaces handled by ether_frameout().
1397 */
1398 struct ifaddr *__single ifa;
1399
1400 ifnet_lock_shared(inifp);
1401 TAILQ_FOREACH(ifa, &inifp->if_addrhead, ifa_link) {
1402 if (ifa->ifa_addr->sa_family != AF_INET) {
1403 continue;
1404 }
1405 ia = ifatoia(ifa);
1406 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == ip->ip_dst.s_addr ||
1407 ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) {
1408 ip_input_setdst_chain(m, 0, ia);
1409 result = IP_CHECK_IF_OURS;
1410 match_ifp = inifp;
1411 break;
1412 }
1413 }
1414 ifnet_lock_done(inifp);
1415 }
1416
1417 /* Allow DHCP/BootP responses through */
1418 if (result == IP_CHECK_IF_NONE && (inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
1419 ip->ip_p == IPPROTO_UDP && (IP_VHL_HL(ip->ip_vhl) << 2) == sizeof(struct ip)) {
1420 struct udpiphdr *__single ui;
1421
1422 if (m->m_len < sizeof(struct udpiphdr)) {
1423 if ((m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) {
1424 OSAddAtomic(1, &udpstat.udps_hdrops);
1425 *mp = NULL;
1426 return IP_CHECK_IF_DROP;
1427 }
1428 /*
1429 * m_pullup can return a different mbuf
1430 */
1431 *mp = m;
1432 ip = mtod(m, struct ip *);
1433 }
1434 ui = mtod(m, struct udpiphdr *);
1435 if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
1436 ip_input_setdst_chain(m, inifp->if_index, NULL);
1437 result = IP_CHECK_IF_OURS;
1438 match_ifp = inifp;
1439 }
1440 }
1441
1442 if (result == IP_CHECK_IF_NONE) {
1443 if (ipforwarding == 0) {
1444 result = IP_CHECK_IF_DROP;
1445 } else {
1446 result = IP_CHECK_IF_FORWARD;
1447 ip_input_setdst_chain(m, inifp->if_index, NULL);
1448 }
1449 }
1450
1451 if (result == IP_CHECK_IF_OURS && match_ifp != inifp) {
1452 ipstat.ips_rcv_if_weak_match++;
1453
1454 /* Logging is too noisy when forwarding is enabled */
1455 if (ip_checkinterface_debug != 0 && ipforwarding == 0) {
1456 char src_str[MAX_IPv4_STR_LEN];
1457 char dst_str[MAX_IPv4_STR_LEN];
1458
1459 inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str));
1460 inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str));
1461 os_log_info(OS_LOG_DEFAULT,
1462 "%s: weak ES interface match to %s for packet from %s to %s proto %u received via %s",
1463 __func__, best_ia->ia_ifp->if_xname, src_str, dst_str, ip->ip_p, inifp->if_xname);
1464 }
1465 } else if (result == IP_CHECK_IF_DROP) {
1466 if (ip_checkinterface_debug > 0) {
1467 char src_str[MAX_IPv4_STR_LEN];
1468 char dst_str[MAX_IPv4_STR_LEN];
1469
1470 inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str));
1471 inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str));
1472 if (drop_reason == DROP_REASON_IP_RCV_IF_NO_MATCH) {
1473 os_log(OS_LOG_DEFAULT,
1474 "%s: no interface match for packet from %s to %s proto %u received via %s",
1475 __func__, src_str, dst_str, ip->ip_p, inifp->if_xname);
1476 } else if (drop_reason == DROP_REASON_FSW_DEMUX_L2_MULTI_L3_UNI) {
1477 os_log(OS_LOG_DEFAULT,
1478 "%s: Layer 3 unicast dst sent to layer 2 non unicast dst: from %s to %s proto %u received via %s",
1479 __func__, src_str, dst_str, ip->ip_p, inifp->if_xname);
1480 }
1481 }
1482 if (drop_reason == DROP_REASON_IP_RCV_IF_NO_MATCH) {
1483 mbuf_ref_t tmp_mbuf = m;
1484 while (tmp_mbuf != NULL) {
1485 ipstat.ips_rcv_if_no_match++;
1486 tmp_mbuf = tmp_mbuf->m_nextpkt;
1487 }
1488 }
1489 m_drop_list(m, NULL, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, drop_reason, NULL, 0);
1490 *mp = NULL;
1491 }
1492
1493 return result;
1494 }
1495
1496 static void
ip_input_second_pass(struct mbuf * m,struct ifnet * inifp,int npkts_in_chain,int bytes_in_chain,struct ip_fw_in_args * args)1497 ip_input_second_pass(struct mbuf *m, struct ifnet *inifp,
1498 int npkts_in_chain, int bytes_in_chain, struct ip_fw_in_args *args)
1499 {
1500 struct mbuf *tmp_mbuf = NULL;
1501 unsigned int hlen;
1502
1503 #pragma unused (args)
1504
1505 struct ip *__single ip = mtod(m, struct ip *);
1506 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1507
1508 OSAddAtomic(npkts_in_chain, &ipstat.ips_total);
1509
1510 /*
1511 * Naively assume we can attribute inbound data to the route we would
1512 * use to send to this destination. Asymmetric routing breaks this
1513 * assumption, but it still allows us to account for traffic from
1514 * a remote node in the routing table.
1515 * this has a very significant performance impact so we bypass
1516 * if nstat_collect is disabled. We may also bypass if the
1517 * protocol is tcp in the future because tcp will have a route that
1518 * we can use to attribute the data to. That does mean we would not
1519 * account for forwarded tcp traffic.
1520 */
1521 ip_input_update_nstat(inifp, ip->ip_src, npkts_in_chain,
1522 bytes_in_chain);
1523
1524 /*
1525 * Check our list of addresses, to see if the packet is for us.
1526 * If we don't have any addresses, assume any unicast packet
1527 * we receive might be for us (and let the upper layers deal
1528 * with it).
1529 */
1530 tmp_mbuf = m;
1531 if (TAILQ_EMPTY(&in_ifaddrhead)) {
1532 while (tmp_mbuf != NULL) {
1533 if (!(tmp_mbuf->m_flags & (M_MCAST | M_BCAST))) {
1534 ip_setdstifaddr_info(tmp_mbuf, inifp->if_index,
1535 NULL);
1536 }
1537 tmp_mbuf = mbuf_nextpkt(tmp_mbuf);
1538 }
1539 goto ours;
1540 }
1541
1542 /*
1543 * Enable a consistency check between the destination address
1544 * and the arrival interface for a unicast packet (the RFC 1122
1545 * strong ES model) if IP forwarding is disabled and the packet
1546 * is not locally generated
1547 *
1548 * XXX - Checking also should be disabled if the destination
1549 * address is ipnat'ed to a different interface.
1550 *
1551 * XXX - Checking is incompatible with IP aliases added
1552 * to the loopback interface instead of the interface where
1553 * the packets are received.
1554 */
1555 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
1556 ip_check_if_result_t ip_check_if_result = IP_CHECK_IF_NONE;
1557
1558 ip_check_if_result = ip_input_check_interface(&m, ip, inifp);
1559 ASSERT(ip_check_if_result != IP_CHECK_IF_NONE);
1560 if (ip_check_if_result == IP_CHECK_IF_OURS) {
1561 goto ours;
1562 } else if (ip_check_if_result == IP_CHECK_IF_DROP) {
1563 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1564 return;
1565 }
1566 } else {
1567 struct in_multi *__single inm;
1568 /*
1569 * See if we belong to the destination multicast group on the
1570 * arrival interface.
1571 */
1572 in_multihead_lock_shared();
1573 IN_LOOKUP_MULTI(&ip->ip_dst, inifp, inm);
1574 in_multihead_lock_done();
1575 if (inm == NULL) {
1576 OSAddAtomic(npkts_in_chain, &ipstat.ips_notmember);
1577 m_drop_list(m, NULL, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_UNKNOWN_MULTICAST_GROUP,
1578 NULL, 0);
1579 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1580 return;
1581 }
1582 ip_input_setdst_chain(m, inifp->if_index, NULL);
1583 INM_REMREF(inm);
1584 goto ours;
1585 }
1586
1587 tmp_mbuf = m;
1588 struct mbuf *__single nxt_mbuf = NULL;
1589 while (tmp_mbuf != NULL) {
1590 nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
1591 /*
1592 * Not for us; forward if possible and desirable.
1593 */
1594 mbuf_setnextpkt(tmp_mbuf, NULL);
1595 if (ipforwarding == 0) {
1596 OSAddAtomic(1, &ipstat.ips_cantforward);
1597 m_drop(tmp_mbuf, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_CANNOT_FORWARD,
1598 NULL, 0);
1599 } else {
1600 ip_forward(tmp_mbuf, 0, NULL);
1601 }
1602 tmp_mbuf = nxt_mbuf;
1603 }
1604 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1605 return;
1606 ours:
1607 ip = mtod(m, struct ip *); /* in case it changed */
1608 /*
1609 * If offset is set, must reassemble.
1610 */
1611 if (ip->ip_off & ~(IP_DF | IP_RF)) {
1612 VERIFY(npkts_in_chain == 1);
1613 m = ip_reass(m);
1614 if (m == NULL) {
1615 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1616 return;
1617 }
1618 ip = mtod(m, struct ip *);
1619 /* Get the header length of the reassembled packet */
1620 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1621 }
1622
1623 /*
1624 * Further protocols expect the packet length to be w/o the
1625 * IP header.
1626 */
1627 ip->ip_len -= hlen;
1628
1629 #if IPSEC
1630 /*
1631 * enforce IPsec policy checking if we are seeing last header.
1632 * note that we do not visit this with protocols with pcb layer
1633 * code - like udp/tcp/raw ip.
1634 */
1635 if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR)) {
1636 VERIFY(npkts_in_chain == 1);
1637 if (ipsec4_in_reject(m, NULL)) {
1638 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
1639 m_drop(tmp_mbuf, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IPSEC_REJECT,
1640 NULL, 0);
1641 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1642 return;
1643 }
1644 }
1645 #endif /* IPSEC */
1646
1647 /*
1648 * Switch out to protocol's input routine.
1649 */
1650 OSAddAtomic(npkts_in_chain, &ipstat.ips_delivered);
1651
1652 ip_input_dispatch_chain(m);
1653
1654 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1655 return;
1656 }
1657
1658 void
ip_proto_input(protocol_family_t protocol,mbuf_t packet_list)1659 ip_proto_input(protocol_family_t protocol, mbuf_t packet_list)
1660 {
1661 #pragma unused(protocol)
1662 pktchain_elm_t pktchain_tbl[PKTTBL_SZ];
1663
1664 mbuf_ref_t packet = NULL;
1665 mbuf_ref_t modm = NULL; /* modified mbuf */
1666 int retval = 0;
1667 #if (DEBUG || DEVELOPMENT)
1668 struct timeval start_tv;
1669 #endif /* (DEBUG || DEVELOPMENT) */
1670 int num_pkts = 0;
1671 int chain = 0;
1672 struct ip_fw_in_args args;
1673
1674 #if (DEBUG || DEVELOPMENT)
1675 if (ip_input_measure) {
1676 net_perf_start_time(&net_perf, &start_tv);
1677 }
1678 #endif /* (DEBUG || DEVELOPMENT) */
1679
1680 bzero(&pktchain_tbl, sizeof(pktchain_tbl));
1681 restart_list_process:
1682 chain = 0;
1683 for (packet = packet_list; packet; packet = packet_list) {
1684 m_add_crumb(packet, PKT_CRUMB_IP_INPUT);
1685
1686 packet_list = mbuf_nextpkt(packet);
1687 mbuf_setnextpkt(packet, NULL);
1688
1689 num_pkts++;
1690 modm = NULL;
1691 bzero(&args, sizeof(args));
1692
1693 retval = ip_input_first_pass(packet, &args, &modm);
1694
1695 if (retval == IPINPUT_DOCHAIN) {
1696 if (modm) {
1697 packet = modm;
1698 }
1699 packet = ip_chain_insert(packet, &pktchain_tbl[0]);
1700 if (packet == NULL) {
1701 ipstat.ips_rxc_chained++;
1702 chain++;
1703 if (chain > ip_chainsz) {
1704 break;
1705 }
1706 } else {
1707 ipstat.ips_rxc_collisions++;
1708 break;
1709 }
1710 } else if (retval == IPINPUT_DONTCHAIN) {
1711 /* in order to preserve order, exit from chaining */
1712 if (modm) {
1713 packet = modm;
1714 }
1715 ipstat.ips_rxc_notchain++;
1716 break;
1717 } else {
1718 /* packet was freed or delivered, do nothing. */
1719 }
1720 }
1721
1722 /* do second pass here for pktchain_tbl */
1723 if (chain) {
1724 ip_input_second_pass_loop_tbl(&pktchain_tbl[0], &args);
1725 }
1726
1727 if (packet) {
1728 /*
1729 * equivalent update in chaining case if performed in
1730 * ip_input_second_pass_loop_tbl().
1731 */
1732 #if (DEBUG || DEVELOPMENT)
1733 if (ip_input_measure) {
1734 net_perf_histogram(&net_perf, 1);
1735 }
1736 #endif /* (DEBUG || DEVELOPMENT) */
1737 ip_input_second_pass(packet, packet->m_pkthdr.rcvif,
1738 1, packet->m_pkthdr.len, &args);
1739 }
1740
1741 if (packet_list) {
1742 goto restart_list_process;
1743 }
1744
1745 #if (DEBUG || DEVELOPMENT)
1746 if (ip_input_measure) {
1747 net_perf_measure_time(&net_perf, &start_tv, num_pkts);
1748 }
1749 #endif /* (DEBUG || DEVELOPMENT) */
1750 }
1751
1752 static void
ipq_updateparams(void)1753 ipq_updateparams(void)
1754 {
1755 LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
1756 /*
1757 * -1 for unlimited allocation.
1758 */
1759 if (maxnipq < 0) {
1760 ipq_limit = 0;
1761 }
1762 /*
1763 * Positive number for specific bound.
1764 */
1765 if (maxnipq > 0) {
1766 ipq_limit = maxnipq;
1767 }
1768 /*
1769 * Zero specifies no further fragment queue allocation -- set the
1770 * bound very low, but rely on implementation elsewhere to actually
1771 * prevent allocation and reclaim current queues.
1772 */
1773 if (maxnipq == 0) {
1774 ipq_limit = 1;
1775 }
1776 /*
1777 * Arm the purge timer if not already and if there's work to do
1778 */
1779 frag_sched_timeout();
1780 }
1781
1782 static int
1783 sysctl_maxnipq SYSCTL_HANDLER_ARGS
1784 {
1785 #pragma unused(arg1, arg2)
1786 int error, i;
1787
1788 lck_mtx_lock(&ipqlock);
1789 i = maxnipq;
1790 error = sysctl_handle_int(oidp, &i, 0, req);
1791 if (error || req->newptr == USER_ADDR_NULL) {
1792 goto done;
1793 }
1794 /* impose bounds */
1795 if (i < -1) {
1796 error = EINVAL;
1797 goto done;
1798 }
1799 maxnipq = i;
1800 ipq_updateparams();
1801 done:
1802 lck_mtx_unlock(&ipqlock);
1803 return error;
1804 }
1805
1806 static int
1807 sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS
1808 {
1809 #pragma unused(arg1, arg2)
1810 int error, i;
1811
1812 lck_mtx_lock(&ipqlock);
1813 i = maxfragsperpacket;
1814 error = sysctl_handle_int(oidp, &i, 0, req);
1815 if (error || req->newptr == USER_ADDR_NULL) {
1816 goto done;
1817 }
1818 maxfragsperpacket = i;
1819 ipq_updateparams(); /* see if we need to arm timer */
1820 done:
1821 lck_mtx_unlock(&ipqlock);
1822 return error;
1823 }
1824
1825 /*
1826 * Take incoming datagram fragment and try to reassemble it into
1827 * whole datagram. If a chain for reassembly of this datagram already
1828 * exists, then it is given as fp; otherwise have to make a chain.
1829 *
1830 * The IP header is *NOT* adjusted out of iplen (but in host byte order).
1831 */
1832 static struct mbuf *
ip_reass(struct mbuf * m)1833 ip_reass(struct mbuf *m)
1834 {
1835 struct ip *__single ip;
1836 mbuf_ref_t p, q, nq, t;
1837 struct ipq *__single fp = NULL;
1838 struct ipqhead *__single head;
1839 int i, hlen, next;
1840 u_int8_t ecn, ecn0;
1841 uint32_t csum, csum_flags;
1842 uint16_t hash;
1843 struct fq_head dfq;
1844
1845 MBUFQ_INIT(&dfq); /* for deferred frees */
1846
1847 /* If maxnipq or maxfragsperpacket is 0, never accept fragments. */
1848 if (maxnipq == 0 || maxfragsperpacket == 0) {
1849 ipstat.ips_fragments++;
1850 ipstat.ips_fragdropped++;
1851 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_FRAG_NOT_ACCEPTED,
1852 NULL, 0);
1853 if (nipq > 0) {
1854 lck_mtx_lock(&ipqlock);
1855 frag_sched_timeout(); /* purge stale fragments */
1856 lck_mtx_unlock(&ipqlock);
1857 }
1858 return NULL;
1859 }
1860
1861 ip = mtod(m, struct ip *);
1862 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1863
1864 lck_mtx_lock(&ipqlock);
1865
1866 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
1867 head = &ipq[hash];
1868
1869 /*
1870 * Look for queue of fragments
1871 * of this datagram.
1872 */
1873 TAILQ_FOREACH(fp, head, ipq_list) {
1874 if (ip->ip_id == fp->ipq_id &&
1875 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
1876 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
1877 ip->ip_p == fp->ipq_p) {
1878 goto found;
1879 }
1880 }
1881
1882 fp = NULL;
1883
1884 /*
1885 * Attempt to trim the number of allocated fragment queues if it
1886 * exceeds the administrative limit.
1887 */
1888 if ((nipq > (unsigned)maxnipq) && (maxnipq > 0)) {
1889 /*
1890 * drop something from the tail of the current queue
1891 * before proceeding further
1892 */
1893 struct ipq *__single fq = TAILQ_LAST(head, ipqhead);
1894 if (fq == NULL) { /* gak */
1895 for (i = 0; i < IPREASS_NHASH; i++) {
1896 struct ipq *__single r = TAILQ_LAST(&ipq[i], ipqhead);
1897 if (r) {
1898 ipstat.ips_fragdropped += r->ipq_nfrags;
1899 frag_freef(&ipq[i], r, DROP_REASON_IP_FRAG_TOO_MANY);
1900 break;
1901 }
1902 }
1903 } else {
1904 ipstat.ips_fragdropped += fq->ipq_nfrags;
1905 frag_freef(head, fq, DROP_REASON_IP_FRAG_TOO_MANY);
1906 }
1907 }
1908
1909 found:
1910 /*
1911 * Leverage partial checksum offload for IP fragments. Narrow down
1912 * the scope to cover only UDP without IP options, as that is the
1913 * most common case.
1914 *
1915 * Perform 1's complement adjustment of octets that got included/
1916 * excluded in the hardware-calculated checksum value. Ignore cases
1917 * where the value includes the entire IPv4 header span, as the sum
1918 * for those octets would already be 0 by the time we get here; IP
1919 * has already performed its header checksum validation. Also take
1920 * care of any trailing bytes and subtract out their partial sum.
1921 */
1922 if (ip->ip_p == IPPROTO_UDP && hlen == sizeof(struct ip) &&
1923 (m->m_pkthdr.csum_flags &
1924 (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) ==
1925 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
1926 uint32_t start = m->m_pkthdr.csum_rx_start;
1927 int32_t trailer = (m_pktlen(m) - ip->ip_len);
1928 uint32_t swbytes = (uint32_t)trailer;
1929
1930 csum = m->m_pkthdr.csum_rx_val;
1931
1932 ASSERT(trailer >= 0);
1933 if ((start != 0 && start != hlen) || trailer != 0) {
1934 uint32_t datalen = ip->ip_len - hlen;
1935
1936 #if BYTE_ORDER != BIG_ENDIAN
1937 if (start < hlen) {
1938 HTONS(ip->ip_len);
1939 HTONS(ip->ip_off);
1940 }
1941 #endif /* BYTE_ORDER != BIG_ENDIAN */
1942 /* callee folds in sum */
1943 csum = m_adj_sum16(m, start, hlen, datalen, csum);
1944 if (hlen > start) {
1945 swbytes += (hlen - start);
1946 } else {
1947 swbytes += (start - hlen);
1948 }
1949 #if BYTE_ORDER != BIG_ENDIAN
1950 if (start < hlen) {
1951 NTOHS(ip->ip_off);
1952 NTOHS(ip->ip_len);
1953 }
1954 #endif /* BYTE_ORDER != BIG_ENDIAN */
1955 }
1956 csum_flags = m->m_pkthdr.csum_flags;
1957
1958 if (swbytes != 0) {
1959 udp_in_cksum_stats(swbytes);
1960 }
1961 if (trailer != 0) {
1962 m_adj(m, -trailer);
1963 }
1964 } else {
1965 csum = 0;
1966 csum_flags = 0;
1967 }
1968
1969 /* Invalidate checksum */
1970 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
1971
1972 ipstat.ips_fragments++;
1973
1974 /*
1975 * Adjust ip_len to not reflect header,
1976 * convert offset of this to bytes.
1977 */
1978 ip->ip_len -= hlen;
1979 if (ip->ip_off & IP_MF) {
1980 /*
1981 * Make sure that fragments have a data length
1982 * that's a non-zero multiple of 8 bytes.
1983 */
1984 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
1985 OSAddAtomic(1, &ipstat.ips_toosmall);
1986 /*
1987 * Reassembly queue may have been found if previous
1988 * fragments were valid; given that this one is bad,
1989 * we need to drop it. Make sure to set fp to NULL
1990 * if not already, since we don't want to decrement
1991 * ipq_nfrags as it doesn't include this packet.
1992 */
1993 fp = NULL;
1994 goto dropfrag;
1995 }
1996 m->m_flags |= M_FRAG;
1997 } else {
1998 /* Clear the flag in case packet comes from loopback */
1999 m->m_flags &= ~M_FRAG;
2000 }
2001 ip->ip_off = (u_short)(ip->ip_off << 3);
2002
2003 m->m_pkthdr.pkt_hdr = ip;
2004
2005 /* Previous ip_reass() started here. */
2006 /*
2007 * Presence of header sizes in mbufs
2008 * would confuse code below.
2009 */
2010 m->m_data += hlen;
2011 m->m_len -= hlen;
2012
2013 /*
2014 * If first fragment to arrive, create a reassembly queue.
2015 */
2016 if (fp == NULL) {
2017 fp = ipq_alloc();
2018 if (fp == NULL) {
2019 goto dropfrag;
2020 }
2021 TAILQ_INSERT_HEAD(head, fp, ipq_list);
2022 nipq++;
2023 fp->ipq_nfrags = 1;
2024 fp->ipq_ttl = IPFRAGTTL;
2025 fp->ipq_p = ip->ip_p;
2026 fp->ipq_id = ip->ip_id;
2027 fp->ipq_src = ip->ip_src;
2028 fp->ipq_dst = ip->ip_dst;
2029 fp->ipq_frags = m;
2030 m->m_nextpkt = NULL;
2031 /*
2032 * If the first fragment has valid checksum offload
2033 * info, the rest of fragments are eligible as well.
2034 */
2035 if (csum_flags != 0) {
2036 fp->ipq_csum = csum;
2037 fp->ipq_csum_flags = csum_flags;
2038 }
2039 m = NULL; /* nothing to return */
2040 goto done;
2041 } else {
2042 fp->ipq_nfrags++;
2043 }
2044
2045 #define GETIP(m) ((struct ip *)((m)->m_pkthdr.pkt_hdr))
2046
2047 /*
2048 * Handle ECN by comparing this segment with the first one;
2049 * if CE is set, do not lose CE.
2050 * drop if CE and not-ECT are mixed for the same packet.
2051 */
2052 ecn = ip->ip_tos & IPTOS_ECN_MASK;
2053 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
2054 if (ecn == IPTOS_ECN_CE) {
2055 if (ecn0 == IPTOS_ECN_NOTECT) {
2056 goto dropfrag;
2057 }
2058 if (ecn0 != IPTOS_ECN_CE) {
2059 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
2060 }
2061 }
2062 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
2063 goto dropfrag;
2064 }
2065
2066 /*
2067 * Find a segment which begins after this one does.
2068 */
2069 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
2070 if (GETIP(q)->ip_off > ip->ip_off) {
2071 break;
2072 }
2073 }
2074
2075 /*
2076 * If there is a preceding segment, it may provide some of
2077 * our data already. If so, drop the data from the incoming
2078 * segment. If it provides all of our data, drop us, otherwise
2079 * stick new segment in the proper place.
2080 *
2081 * If some of the data is dropped from the preceding
2082 * segment, then it's checksum is invalidated.
2083 */
2084 if (p) {
2085 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
2086 if (i > 0) {
2087 if (i >= ip->ip_len) {
2088 goto dropfrag;
2089 }
2090 m_adj(m, i);
2091 fp->ipq_csum_flags = 0;
2092 ip->ip_off += i;
2093 ip->ip_len -= i;
2094 }
2095 m->m_nextpkt = p->m_nextpkt;
2096 p->m_nextpkt = m;
2097 } else {
2098 m->m_nextpkt = fp->ipq_frags;
2099 fp->ipq_frags = m;
2100 }
2101
2102 /*
2103 * While we overlap succeeding segments trim them or,
2104 * if they are completely covered, dequeue them.
2105 */
2106 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
2107 q = nq) {
2108 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
2109 if (i < GETIP(q)->ip_len) {
2110 GETIP(q)->ip_len -= i;
2111 GETIP(q)->ip_off += i;
2112 m_adj(q, i);
2113 fp->ipq_csum_flags = 0;
2114 break;
2115 }
2116 nq = q->m_nextpkt;
2117 m->m_nextpkt = nq;
2118 ipstat.ips_fragdropped++;
2119 fp->ipq_nfrags--;
2120 /* defer freeing until after lock is dropped */
2121 MBUFQ_ENQUEUE(&dfq, q);
2122 }
2123
2124 /*
2125 * If this fragment contains similar checksum offload info
2126 * as that of the existing ones, accumulate checksum. Otherwise,
2127 * invalidate checksum offload info for the entire datagram.
2128 */
2129 if (csum_flags != 0 && csum_flags == fp->ipq_csum_flags) {
2130 fp->ipq_csum += csum;
2131 } else if (fp->ipq_csum_flags != 0) {
2132 fp->ipq_csum_flags = 0;
2133 }
2134
2135
2136 /*
2137 * Check for complete reassembly and perform frag per packet
2138 * limiting.
2139 *
2140 * Frag limiting is performed here so that the nth frag has
2141 * a chance to complete the packet before we drop the packet.
2142 * As a result, n+1 frags are actually allowed per packet, but
2143 * only n will ever be stored. (n = maxfragsperpacket.)
2144 *
2145 */
2146 next = 0;
2147 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
2148 if (GETIP(q)->ip_off != next) {
2149 if (fp->ipq_nfrags > maxfragsperpacket) {
2150 ipstat.ips_fragdropped += fp->ipq_nfrags;
2151 frag_freef(head, fp, DROP_REASON_IP_FRAG_TOO_MANY);
2152 }
2153 m = NULL; /* nothing to return */
2154 goto done;
2155 }
2156 next += GETIP(q)->ip_len;
2157 }
2158 /* Make sure the last packet didn't have the IP_MF flag */
2159 if (p->m_flags & M_FRAG) {
2160 if (fp->ipq_nfrags > maxfragsperpacket) {
2161 ipstat.ips_fragdropped += fp->ipq_nfrags;
2162 frag_freef(head, fp, DROP_REASON_IP_FRAG_TOO_MANY);
2163 }
2164 m = NULL; /* nothing to return */
2165 goto done;
2166 }
2167
2168 /*
2169 * Reassembly is complete. Make sure the packet is a sane size.
2170 */
2171 q = fp->ipq_frags;
2172 ip = GETIP(q);
2173 if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) {
2174 ipstat.ips_toolong++;
2175 ipstat.ips_fragdropped += fp->ipq_nfrags;
2176 frag_freef(head, fp, DROP_REASON_IP_FRAG_TOO_LONG);
2177 m = NULL; /* nothing to return */
2178 goto done;
2179 }
2180
2181 /*
2182 * Concatenate fragments.
2183 */
2184 m = q;
2185 t = m->m_next;
2186 m->m_next = NULL;
2187 m_cat(m, t);
2188 nq = q->m_nextpkt;
2189 q->m_nextpkt = NULL;
2190 for (q = nq; q != NULL; q = nq) {
2191 nq = q->m_nextpkt;
2192 q->m_nextpkt = NULL;
2193 m_cat(m, q);
2194 }
2195
2196 /*
2197 * Store partial hardware checksum info from the fragment queue;
2198 * the receive start offset is set to 20 bytes (see code at the
2199 * top of this routine.)
2200 */
2201 if (fp->ipq_csum_flags != 0) {
2202 csum = fp->ipq_csum;
2203
2204 ADDCARRY(csum);
2205
2206 m->m_pkthdr.csum_rx_val = (uint16_t)csum;
2207 m->m_pkthdr.csum_rx_start = sizeof(struct ip);
2208 m->m_pkthdr.csum_flags = fp->ipq_csum_flags;
2209 } else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) ||
2210 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
2211 /* loopback checksums are always OK */
2212 m->m_pkthdr.csum_data = 0xffff;
2213 m->m_pkthdr.csum_flags =
2214 CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
2215 CSUM_IP_CHECKED | CSUM_IP_VALID;
2216 }
2217
2218 /*
2219 * Create header for new ip packet by modifying header of first
2220 * packet; dequeue and discard fragment reassembly header.
2221 * Make header visible.
2222 */
2223 ip->ip_len = (u_short)((IP_VHL_HL(ip->ip_vhl) << 2) + next);
2224 ip->ip_src = fp->ipq_src;
2225 ip->ip_dst = fp->ipq_dst;
2226
2227 fp->ipq_frags = NULL; /* return to caller as 'm' */
2228 frag_freef(head, fp, DROP_REASON_UNSPECIFIED);
2229 fp = NULL;
2230
2231 m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2);
2232 m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2);
2233 /* some debugging cruft by sklower, below, will go away soon */
2234 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
2235 m_fixhdr(m);
2236 }
2237 ipstat.ips_reassembled++;
2238
2239 /* arm the purge timer if not already and if there's work to do */
2240 frag_sched_timeout();
2241 lck_mtx_unlock(&ipqlock);
2242 /* perform deferred free (if needed) now that lock is dropped */
2243 if (!MBUFQ_EMPTY(&dfq)) {
2244 MBUFQ_DRAIN(&dfq);
2245 }
2246 VERIFY(MBUFQ_EMPTY(&dfq));
2247 return m;
2248
2249 done:
2250 VERIFY(m == NULL);
2251 /* arm the purge timer if not already and if there's work to do */
2252 frag_sched_timeout();
2253 lck_mtx_unlock(&ipqlock);
2254 /* perform deferred free (if needed) */
2255 if (!MBUFQ_EMPTY(&dfq)) {
2256 MBUFQ_DRAIN(&dfq);
2257 }
2258 VERIFY(MBUFQ_EMPTY(&dfq));
2259 return NULL;
2260
2261 dropfrag:
2262 ipstat.ips_fragdropped++;
2263 if (fp != NULL) {
2264 fp->ipq_nfrags--;
2265 }
2266 /* arm the purge timer if not already and if there's work to do */
2267 frag_sched_timeout();
2268 lck_mtx_unlock(&ipqlock);
2269 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_FRAG_DROPPED,
2270 NULL, 0);
2271 /* perform deferred free (if needed) */
2272 if (!MBUFQ_EMPTY(&dfq)) {
2273 MBUFQ_DRAIN(&dfq);
2274 }
2275 VERIFY(MBUFQ_EMPTY(&dfq));
2276 return NULL;
2277 #undef GETIP
2278 }
2279
2280 /*
2281 * Free a fragment reassembly header and all
2282 * associated datagrams.
2283 */
2284 static void
frag_freef(struct ipqhead * fhp,struct ipq * fp,drop_reason_t drop_reason)2285 frag_freef(struct ipqhead *fhp, struct ipq *fp, drop_reason_t drop_reason)
2286 {
2287 LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2288
2289 fp->ipq_nfrags = 0;
2290 if (fp->ipq_frags != NULL) {
2291 if (drop_reason == DROP_REASON_UNSPECIFIED) {
2292 m_freem_list(fp->ipq_frags);
2293 } else {
2294 m_drop_list(fp->ipq_frags, NULL, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, drop_reason, NULL, 0);
2295 }
2296 fp->ipq_frags = NULL;
2297 }
2298 TAILQ_REMOVE(fhp, fp, ipq_list);
2299 nipq--;
2300 ipq_free(fp);
2301 }
2302
2303 /*
2304 * IP reassembly timer processing
2305 */
2306 static void
frag_timeout(void * arg)2307 frag_timeout(void *arg)
2308 {
2309 #pragma unused(arg)
2310 struct ipq *__single fp;
2311 int i;
2312
2313 /*
2314 * Update coarse-grained networking timestamp (in sec.); the idea
2315 * is to piggy-back on the timeout callout to update the counter
2316 * returnable via net_uptime().
2317 */
2318 net_update_uptime();
2319
2320 lck_mtx_lock(&ipqlock);
2321 for (i = 0; i < IPREASS_NHASH; i++) {
2322 for (fp = TAILQ_FIRST(&ipq[i]); fp;) {
2323 struct ipq *__single fpp;
2324
2325 fpp = fp;
2326 fp = TAILQ_NEXT(fp, ipq_list);
2327 if (--fpp->ipq_ttl == 0) {
2328 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
2329 frag_freef(&ipq[i], fpp, DROP_REASON_IP_FRAG_TIMEOUT);
2330 }
2331 }
2332 }
2333 /*
2334 * If we are over the maximum number of fragments
2335 * (due to the limit being lowered), drain off
2336 * enough to get down to the new limit.
2337 */
2338 if (maxnipq >= 0 && nipq > (unsigned)maxnipq) {
2339 for (i = 0; i < IPREASS_NHASH; i++) {
2340 while (nipq > (unsigned)maxnipq &&
2341 !TAILQ_EMPTY(&ipq[i])) {
2342 ipstat.ips_fragdropped +=
2343 TAILQ_FIRST(&ipq[i])->ipq_nfrags;
2344 frag_freef(&ipq[i], TAILQ_FIRST(&ipq[i]), DROP_REASON_IP_FRAG_DROPPED);
2345 }
2346 }
2347 }
2348 /* re-arm the purge timer if there's work to do */
2349 frag_timeout_run = 0;
2350 frag_sched_timeout();
2351 lck_mtx_unlock(&ipqlock);
2352 }
2353
2354 static void
frag_sched_timeout(void)2355 frag_sched_timeout(void)
2356 {
2357 LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2358
2359 if (!frag_timeout_run && nipq > 0) {
2360 frag_timeout_run = 1;
2361 timeout(frag_timeout, NULL, hz);
2362 }
2363 }
2364
2365 /*
2366 * Drain off all datagram fragments.
2367 */
2368 static void
frag_drain(void)2369 frag_drain(void)
2370 {
2371 int i;
2372
2373 lck_mtx_lock(&ipqlock);
2374 for (i = 0; i < IPREASS_NHASH; i++) {
2375 while (!TAILQ_EMPTY(&ipq[i])) {
2376 ipstat.ips_fragdropped +=
2377 TAILQ_FIRST(&ipq[i])->ipq_nfrags;
2378 frag_freef(&ipq[i], TAILQ_FIRST(&ipq[i]), DROP_REASON_IP_FRAG_DRAINED);
2379 }
2380 }
2381 lck_mtx_unlock(&ipqlock);
2382 }
2383
2384 static struct ipq *
ipq_alloc(void)2385 ipq_alloc(void)
2386 {
2387 struct ipq *__single fp;
2388
2389 /*
2390 * See comments in ipq_updateparams(). Keep the count separate
2391 * from nipq since the latter represents the elements already
2392 * in the reassembly queues.
2393 */
2394 if (ipq_limit > 0 && ipq_count > ipq_limit) {
2395 return NULL;
2396 }
2397
2398 fp = kalloc_type(struct ipq, Z_NOWAIT | Z_ZERO);
2399 if (fp != NULL) {
2400 os_atomic_inc(&ipq_count, relaxed);
2401 }
2402 return fp;
2403 }
2404
2405 static void
ipq_free(struct ipq * fp)2406 ipq_free(struct ipq *fp)
2407 {
2408 kfree_type(struct ipq, fp);
2409 os_atomic_dec(&ipq_count, relaxed);
2410 }
2411
2412 /*
2413 * Drain callback
2414 */
2415 void
ip_drain(void)2416 ip_drain(void)
2417 {
2418 frag_drain(); /* fragments */
2419 in_rtqdrain(); /* protocol cloned routes */
2420 in_arpdrain(NULL); /* cloned routes: ARP */
2421 }
2422
2423 /*
2424 * Do option processing on a datagram,
2425 * possibly discarding it if bad options are encountered,
2426 * or forwarding it if source-routed.
2427 * The pass argument is used when operating in the IPSTEALTH
2428 * mode to tell what options to process:
2429 * [LS]SRR (pass 0) or the others (pass 1).
2430 * The reason for as many as two passes is that when doing IPSTEALTH,
2431 * non-routing options should be processed only if the packet is for us.
2432 * Returns 1 if packet has been forwarded/freed,
2433 * 0 if the packet should be processed further.
2434 */
2435 static int
ip_dooptions(struct mbuf * m,int pass,struct sockaddr_in * next_hop)2436 ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop)
2437 {
2438 #pragma unused(pass)
2439 struct ip *ip = mtod(m, struct ip *);
2440 u_char *cp;
2441 struct ip_timestamp *__single ipt;
2442 struct in_ifaddr *__single ia;
2443 int opt, optlen, cnt, off, type = ICMP_PARAMPROB, forward = 0;
2444 uint8_t code = 0;
2445 struct in_addr *__single sin, dst;
2446 u_int32_t ntime;
2447 struct sockaddr_in ipaddr = {
2448 .sin_len = sizeof(ipaddr),
2449 .sin_family = AF_INET,
2450 .sin_port = 0,
2451 .sin_addr = { .s_addr = 0 },
2452 .sin_zero = { 0, }
2453 };
2454
2455 /* Expect 32-bit aligned data pointer on strict-align platforms */
2456 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
2457
2458 dst = ip->ip_dst;
2459 cp = (u_char *)(ip + 1);
2460 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
2461 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2462 opt = cp[IPOPT_OPTVAL];
2463 if (opt == IPOPT_EOL) {
2464 break;
2465 }
2466 if (opt == IPOPT_NOP) {
2467 optlen = 1;
2468 } else {
2469 if (cnt < IPOPT_OLEN + sizeof(*cp)) {
2470 code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2471 goto bad;
2472 }
2473 optlen = cp[IPOPT_OLEN];
2474 if (optlen < IPOPT_OLEN + sizeof(*cp) ||
2475 optlen > cnt) {
2476 code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2477 goto bad;
2478 }
2479 }
2480 switch (opt) {
2481 default:
2482 break;
2483
2484 /*
2485 * Source routing with record.
2486 * Find interface with current destination address.
2487 * If none on this machine then drop if strictly routed,
2488 * or do nothing if loosely routed.
2489 * Record interface address and bring up next address
2490 * component. If strictly routed make sure next
2491 * address is on directly accessible net.
2492 */
2493 case IPOPT_LSRR:
2494 case IPOPT_SSRR:
2495 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
2496 code = (uint8_t)(&cp[IPOPT_OLEN] - (u_char *)ip);
2497 goto bad;
2498 }
2499 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
2500 code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2501 goto bad;
2502 }
2503 ipaddr.sin_addr = ip->ip_dst;
2504 ia = ifatoia(ifa_ifwithaddr(SA(&ipaddr)));
2505 if (ia == NULL) {
2506 if (opt == IPOPT_SSRR) {
2507 type = ICMP_UNREACH;
2508 code = ICMP_UNREACH_SRCFAIL;
2509 goto bad;
2510 }
2511 if (!ip_dosourceroute) {
2512 goto nosourcerouting;
2513 }
2514 /*
2515 * Loose routing, and not at next destination
2516 * yet; nothing to do except forward.
2517 */
2518 break;
2519 } else {
2520 ifa_remref(&ia->ia_ifa);
2521 ia = NULL;
2522 }
2523 off--; /* 0 origin */
2524 if (off > optlen - (int)sizeof(struct in_addr)) {
2525 /*
2526 * End of source route. Should be for us.
2527 */
2528 if (!ip_acceptsourceroute) {
2529 goto nosourcerouting;
2530 }
2531 save_rte(cp, ip->ip_src);
2532 break;
2533 }
2534
2535 if (!ip_dosourceroute) {
2536 if (ipforwarding) {
2537 char buf[MAX_IPv4_STR_LEN];
2538 char buf2[MAX_IPv4_STR_LEN];
2539 /*
2540 * Acting as a router, so generate ICMP
2541 */
2542 nosourcerouting:
2543 log(LOG_WARNING,
2544 "attempted source route from %s "
2545 "to %s\n",
2546 inet_ntop(AF_INET, &ip->ip_src,
2547 buf, sizeof(buf)),
2548 inet_ntop(AF_INET, &ip->ip_dst,
2549 buf2, sizeof(buf2)));
2550 type = ICMP_UNREACH;
2551 code = ICMP_UNREACH_SRCFAIL;
2552 goto bad;
2553 } else {
2554 /*
2555 * Not acting as a router,
2556 * so silently drop.
2557 */
2558 OSAddAtomic(1, &ipstat.ips_cantforward);
2559 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_CANNOT_FORWARD,
2560 NULL, 0);
2561 return 1;
2562 }
2563 }
2564
2565 /*
2566 * locate outgoing interface
2567 */
2568 (void) memcpy(&ipaddr.sin_addr, cp + off,
2569 sizeof(ipaddr.sin_addr));
2570
2571 if (opt == IPOPT_SSRR) {
2572 #define INA struct in_ifaddr *
2573 if ((ia = (INA)ifa_ifwithdstaddr(
2574 SA(&ipaddr))) == NULL) {
2575 ia = (INA)ifa_ifwithnet(SA(&ipaddr));
2576 }
2577 } else {
2578 ia = ip_rtaddr(ipaddr.sin_addr);
2579 }
2580 if (ia == NULL) {
2581 type = ICMP_UNREACH;
2582 code = ICMP_UNREACH_SRCFAIL;
2583 goto bad;
2584 }
2585 ip->ip_dst = ipaddr.sin_addr;
2586 IFA_LOCK(&ia->ia_ifa);
2587 (void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
2588 sizeof(struct in_addr));
2589 IFA_UNLOCK(&ia->ia_ifa);
2590 ifa_remref(&ia->ia_ifa);
2591 ia = NULL;
2592 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
2593 /*
2594 * Let ip_intr's mcast routing check handle mcast pkts
2595 */
2596 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
2597 break;
2598
2599 case IPOPT_RR:
2600 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
2601 code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2602 goto bad;
2603 }
2604 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
2605 code = (uint8_t)(&cp[IPOPT_OFFSET] - (u_char *)ip);
2606 goto bad;
2607 }
2608 /*
2609 * If no space remains, ignore.
2610 */
2611 off--; /* 0 origin */
2612 if (off > optlen - (int)sizeof(struct in_addr)) {
2613 break;
2614 }
2615 (void) memcpy(&ipaddr.sin_addr, &ip->ip_dst,
2616 sizeof(ipaddr.sin_addr));
2617 /*
2618 * locate outgoing interface; if we're the destination,
2619 * use the incoming interface (should be same).
2620 */
2621 if ((ia = (INA)ifa_ifwithaddr(SA(&ipaddr))) == NULL) {
2622 if ((ia = ip_rtaddr(ipaddr.sin_addr)) == NULL) {
2623 type = ICMP_UNREACH;
2624 code = ICMP_UNREACH_HOST;
2625 goto bad;
2626 }
2627 }
2628 IFA_LOCK(&ia->ia_ifa);
2629 (void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
2630 sizeof(struct in_addr));
2631 IFA_UNLOCK(&ia->ia_ifa);
2632 ifa_remref(&ia->ia_ifa);
2633 ia = NULL;
2634 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
2635 break;
2636
2637 case IPOPT_TS:
2638 code = (uint8_t)(cp - (u_char *)ip);
2639 ipt = (struct ip_timestamp *)(void *)cp;
2640 if (ipt->ipt_len < 4 || ipt->ipt_len > 40) {
2641 code = (uint8_t)((u_char *)&ipt->ipt_len -
2642 (u_char *)ip);
2643 goto bad;
2644 }
2645 if (ipt->ipt_ptr < 5) {
2646 code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2647 (u_char *)ip);
2648 goto bad;
2649 }
2650 if (ipt->ipt_ptr >
2651 ipt->ipt_len - (int)sizeof(int32_t)) {
2652 if (++ipt->ipt_oflw == 0) {
2653 code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2654 (u_char *)ip);
2655 goto bad;
2656 }
2657 break;
2658 }
2659 sin = (struct in_addr *)(void *)(cp + ipt->ipt_ptr - 1);
2660 switch (ipt->ipt_flg) {
2661 case IPOPT_TS_TSONLY:
2662 break;
2663
2664 case IPOPT_TS_TSANDADDR:
2665 if (ipt->ipt_ptr - 1 + sizeof(n_time) +
2666 sizeof(struct in_addr) > ipt->ipt_len) {
2667 code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2668 (u_char *)ip);
2669 goto bad;
2670 }
2671 ipaddr.sin_addr = dst;
2672 ia = (INA)ifaof_ifpforaddr(SA(&ipaddr),
2673 m->m_pkthdr.rcvif);
2674 if (ia == NULL) {
2675 continue;
2676 }
2677 IFA_LOCK(&ia->ia_ifa);
2678 (void) memcpy(sin, &IA_SIN(ia)->sin_addr,
2679 sizeof(struct in_addr));
2680 IFA_UNLOCK(&ia->ia_ifa);
2681 ipt->ipt_ptr += sizeof(struct in_addr);
2682 ifa_remref(&ia->ia_ifa);
2683 ia = NULL;
2684 break;
2685
2686 case IPOPT_TS_PRESPEC:
2687 if (ipt->ipt_ptr - 1 + sizeof(n_time) +
2688 sizeof(struct in_addr) > ipt->ipt_len) {
2689 code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2690 (u_char *)ip);
2691 goto bad;
2692 }
2693 (void) memcpy(&ipaddr.sin_addr, sin,
2694 sizeof(struct in_addr));
2695 if ((ia = ifatoia(ifa_ifwithaddr(
2696 SA(&ipaddr)))) == NULL) {
2697 continue;
2698 }
2699 ifa_remref(&ia->ia_ifa);
2700 ia = NULL;
2701 ipt->ipt_ptr += sizeof(struct in_addr);
2702 break;
2703
2704 default:
2705 /* XXX can't take &ipt->ipt_flg */
2706 code = (uint8_t)((u_char *)&ipt->ipt_ptr -
2707 (u_char *)ip + 1);
2708 goto bad;
2709 }
2710 ntime = iptime();
2711 (void) memcpy(cp + ipt->ipt_ptr - 1, &ntime,
2712 sizeof(n_time));
2713 ipt->ipt_ptr += sizeof(n_time);
2714 }
2715 }
2716 if (forward && ipforwarding) {
2717 ip_forward(m, 1, next_hop);
2718 return 1;
2719 }
2720 return 0;
2721 bad:
2722 icmp_error(m, type, code, 0, 0);
2723 OSAddAtomic(1, &ipstat.ips_badoptions);
2724 return 1;
2725 }
2726
2727 /*
2728 * Check for the presence of the IP Router Alert option [RFC2113]
2729 * in the header of an IPv4 datagram.
2730 *
2731 * This call is not intended for use from the forwarding path; it is here
2732 * so that protocol domains may check for the presence of the option.
2733 * Given how FreeBSD's IPv4 stack is currently structured, the Router Alert
2734 * option does not have much relevance to the implementation, though this
2735 * may change in future.
2736 * Router alert options SHOULD be passed if running in IPSTEALTH mode and
2737 * we are not the endpoint.
2738 * Length checks on individual options should already have been peformed
2739 * by ip_dooptions() therefore they are folded under DIAGNOSTIC here.
2740 *
2741 * Return zero if not present or options are invalid, non-zero if present.
2742 */
2743 int
ip_checkrouteralert(struct mbuf * m)2744 ip_checkrouteralert(struct mbuf *m)
2745 {
2746 struct ip *ip = mtod(m, struct ip *);
2747 u_char *cp;
2748 int opt, optlen, cnt, found_ra;
2749
2750 found_ra = 0;
2751 cp = (u_char *)(ip + 1);
2752 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
2753 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2754 opt = cp[IPOPT_OPTVAL];
2755 if (opt == IPOPT_EOL) {
2756 break;
2757 }
2758 if (opt == IPOPT_NOP) {
2759 optlen = 1;
2760 } else {
2761 #ifdef DIAGNOSTIC
2762 if (cnt < IPOPT_OLEN + sizeof(*cp)) {
2763 break;
2764 }
2765 #endif
2766 optlen = cp[IPOPT_OLEN];
2767 #ifdef DIAGNOSTIC
2768 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
2769 break;
2770 }
2771 #endif
2772 }
2773 switch (opt) {
2774 case IPOPT_RA:
2775 #ifdef DIAGNOSTIC
2776 if (optlen != IPOPT_OFFSET + sizeof(uint16_t) ||
2777 (*((uint16_t *)(void *)&cp[IPOPT_OFFSET]) != 0)) {
2778 break;
2779 } else
2780 #endif
2781 found_ra = 1;
2782 break;
2783 default:
2784 break;
2785 }
2786 }
2787
2788 return found_ra;
2789 }
2790
2791 /*
2792 * Given address of next destination (final or next hop),
2793 * return internet address info of interface to be used to get there.
2794 */
2795 struct in_ifaddr *
ip_rtaddr(struct in_addr dst)2796 ip_rtaddr(struct in_addr dst)
2797 {
2798 struct sockaddr_in *__single sin;
2799 struct ifaddr *__single rt_ifa;
2800 struct route ro;
2801
2802 bzero(&ro, sizeof(ro));
2803 sin = SIN(&ro.ro_dst);
2804 sin->sin_family = AF_INET;
2805 sin->sin_len = sizeof(*sin);
2806 sin->sin_addr = dst;
2807
2808 rtalloc_ign(&ro, RTF_PRCLONING);
2809 if (ro.ro_rt == NULL) {
2810 ROUTE_RELEASE(&ro);
2811 return NULL;
2812 }
2813
2814 RT_LOCK(ro.ro_rt);
2815 if ((rt_ifa = ro.ro_rt->rt_ifa) != NULL) {
2816 ifa_addref(rt_ifa);
2817 }
2818 RT_UNLOCK(ro.ro_rt);
2819 ROUTE_RELEASE(&ro);
2820
2821 return ifatoia(rt_ifa);
2822 }
2823
2824 /*
2825 * Save incoming source route for use in replies,
2826 * to be picked up later by ip_srcroute if the receiver is interested.
2827 */
2828 static void
save_rte(u_char * __indexable option,struct in_addr dst)2829 save_rte(u_char *__indexable option, struct in_addr dst)
2830 {
2831 unsigned olen;
2832
2833 olen = option[IPOPT_OLEN];
2834 #if DIAGNOSTIC
2835 if (ipprintfs) {
2836 printf("save_rte: olen %d\n", olen);
2837 }
2838 #endif
2839 if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst))) {
2840 return;
2841 }
2842 bcopy(option, ip_srcrt.srcopt, olen);
2843 ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
2844 ip_srcrt.dst = dst;
2845 }
2846
2847 /*
2848 * Retrieve incoming source route for use in replies,
2849 * in the same form used by setsockopt.
2850 * The first hop is placed before the options, will be removed later.
2851 */
2852 struct mbuf *
ip_srcroute(void)2853 ip_srcroute(void)
2854 {
2855 struct in_addr *p, *q;
2856 struct mbuf *m;
2857
2858 if (ip_nhops == 0) {
2859 return NULL;
2860 }
2861
2862 m = m_get(M_DONTWAIT, MT_HEADER);
2863 if (m == NULL) {
2864 return NULL;
2865 }
2866
2867 #define OPTSIZ (sizeof (ip_srcrt.nop) + sizeof (ip_srcrt.srcopt))
2868
2869 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */
2870 m->m_len = ip_nhops * sizeof(struct in_addr) +
2871 sizeof(struct in_addr) + OPTSIZ;
2872 #if DIAGNOSTIC
2873 if (ipprintfs) {
2874 printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len);
2875 }
2876 #endif
2877
2878 /*
2879 * Notes: to the astute reader:
2880 * 1. The code is sequenced in the order
2881 * of writing to the mbuf contents.
2882 * 2. The order of addresses in `ip_srcrt.route`
2883 * is the reverse of the order in the wire format.
2884 */
2885 /*
2886 * First save first hop for return route
2887 */
2888 p = &ip_srcrt.route[ip_nhops - 1];
2889 *(mtod(m, struct in_addr *)) = *p;
2890 #if DIAGNOSTIC
2891 if (ipprintfs) {
2892 printf(" hops %lx",
2893 (u_int32_t)ntohl(mtod(m, struct in_addr *)->s_addr));
2894 }
2895 #endif
2896
2897 /*
2898 * Copy option fields and padding (nop) to mbuf.
2899 */
2900 ip_srcrt.nop = IPOPT_NOP;
2901 ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF;
2902 (void) __nochk_memcpy(mtod(m, caddr_t) + sizeof(struct in_addr),
2903 (caddr_t)&ip_srcrt + sizeof(struct in_addr), OPTSIZ);
2904 q = (struct in_addr *)(void *)(mtod(m, caddr_t) +
2905 sizeof(struct in_addr) + OPTSIZ);
2906 #undef OPTSIZ
2907 /*
2908 * If multiple return addresses were provided,
2909 * record the return path as an IP source route,
2910 * reversing the path.
2911 */
2912 for (int i = 0; i < (ip_nhops - 1); i++) {
2913 q[i] = ip_srcrt.route[ip_nhops - (i + 2)];
2914 #if DIAGNOSTIC
2915 if (ipprintfs) {
2916 printf(" %lx", (u_int32_t)ntohl(q[i].s_addr));
2917 }
2918 #endif
2919 }
2920 /*
2921 * Last hop goes to final destination.
2922 */
2923 q[ip_nhops - 1] = ip_srcrt.dst;
2924 #if DIAGNOSTIC
2925 if (ipprintfs) {
2926 printf(" %lx\n", (u_int32_t)ntohl(q[ip_nhops - 1].s_addr));
2927 }
2928 #endif
2929 return m;
2930 }
2931
2932 /*
2933 * Strip out IP options, at higher level protocol in the kernel.
2934 */
2935 void
ip_stripoptions(struct mbuf * m)2936 ip_stripoptions(struct mbuf *m)
2937 {
2938 int i;
2939 struct ip *ip = mtod(m, struct ip *);
2940 caddr_t opts;
2941 int olen;
2942
2943 /* Expect 32-bit aligned data pointer on strict-align platforms */
2944 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
2945
2946 /* use bcopy() since it supports overlapping range */
2947 olen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
2948 opts = (caddr_t)(ip + 1);
2949 i = m->m_len - (sizeof(struct ip) + olen);
2950 bcopy(opts + olen, opts, (unsigned)i);
2951 m->m_len -= olen;
2952 if (m->m_flags & M_PKTHDR) {
2953 m->m_pkthdr.len -= olen;
2954 }
2955 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2956
2957 /*
2958 * We expect ip_{off,len} to be in host order by now, and
2959 * that the original IP header length has been subtracted
2960 * out from ip_len. Temporarily adjust ip_len for checksum
2961 * recalculation, and restore it afterwards.
2962 */
2963 ip->ip_len += sizeof(struct ip);
2964
2965 /* recompute checksum now that IP header is smaller */
2966 #if BYTE_ORDER != BIG_ENDIAN
2967 HTONS(ip->ip_len);
2968 HTONS(ip->ip_off);
2969 #endif /* BYTE_ORDER != BIG_ENDIAN */
2970 ip->ip_sum = in_cksum_hdr(ip);
2971 #if BYTE_ORDER != BIG_ENDIAN
2972 NTOHS(ip->ip_off);
2973 NTOHS(ip->ip_len);
2974 #endif /* BYTE_ORDER != BIG_ENDIAN */
2975
2976 ip->ip_len -= sizeof(struct ip);
2977
2978 /*
2979 * Given that we've just stripped IP options from the header,
2980 * we need to adjust the start offset accordingly if this
2981 * packet had gone thru partial checksum offload.
2982 */
2983 if ((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
2984 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
2985 if (m->m_pkthdr.csum_rx_start >= (sizeof(struct ip) + olen)) {
2986 /* most common case */
2987 m->m_pkthdr.csum_rx_start -= olen;
2988 } else {
2989 /* compute checksum in software instead */
2990 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
2991 m->m_pkthdr.csum_data = 0;
2992 ipstat.ips_adj_hwcsum_clr++;
2993 }
2994 }
2995 }
2996
2997 u_char inetctlerrmap[PRC_NCMDS] = {
2998 0, 0, 0, 0,
2999 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
3000 ENETUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
3001 EMSGSIZE, EHOSTUNREACH, 0, 0,
3002 0, 0, EHOSTUNREACH, 0,
3003 ENOPROTOOPT, ECONNREFUSED
3004 };
3005
3006 static int
3007 sysctl_ipforwarding SYSCTL_HANDLER_ARGS
3008 {
3009 #pragma unused(arg1, arg2)
3010 int i, was_ipforwarding = ipforwarding;
3011
3012 i = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
3013 if (i != 0 || req->newptr == USER_ADDR_NULL) {
3014 return i;
3015 }
3016
3017 if (was_ipforwarding && !ipforwarding) {
3018 /* clean up IPv4 forwarding cached routes */
3019 ifnet_head_lock_shared();
3020 for (i = 0; i <= if_index; i++) {
3021 ifnet_ref_t ifp = ifindex2ifnet[i];
3022 if (ifp != NULL) {
3023 lck_mtx_lock(&ifp->if_cached_route_lock);
3024 ROUTE_RELEASE(&ifp->if_fwd_route);
3025 bzero(&ifp->if_fwd_route,
3026 sizeof(ifp->if_fwd_route));
3027 lck_mtx_unlock(&ifp->if_cached_route_lock);
3028 }
3029 }
3030 ifnet_head_done();
3031 }
3032
3033 return 0;
3034 }
3035
3036 /*
3037 * Similar to inp_route_{copyout,copyin} routines except that these copy
3038 * out the cached IPv4 forwarding route from struct ifnet instead of the
3039 * inpcb. See comments for those routines for explanations.
3040 */
3041 static void
ip_fwd_route_copyout(struct ifnet * ifp,struct route * dst)3042 ip_fwd_route_copyout(struct ifnet *ifp, struct route *dst)
3043 {
3044 struct route *src = &ifp->if_fwd_route;
3045
3046 lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3047 lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3048
3049 /* Minor sanity check */
3050 if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3051 panic("%s: wrong or corrupted route: %p", __func__, src);
3052 }
3053
3054 route_copyout(dst, src, sizeof(*dst));
3055
3056 lck_mtx_unlock(&ifp->if_cached_route_lock);
3057 }
3058
3059 static void
ip_fwd_route_copyin(struct ifnet * ifp,struct route * src)3060 ip_fwd_route_copyin(struct ifnet *ifp, struct route *src)
3061 {
3062 struct route *dst = &ifp->if_fwd_route;
3063
3064 lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3065 lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3066
3067 /* Minor sanity check */
3068 if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3069 panic("%s: wrong or corrupted route: %p", __func__, src);
3070 }
3071
3072 if (ifp->if_fwd_cacheok) {
3073 route_copyin(src, dst, sizeof(*src));
3074 }
3075
3076 lck_mtx_unlock(&ifp->if_cached_route_lock);
3077 }
3078
3079 /*
3080 * Forward a packet. If some error occurs return the sender
3081 * an icmp packet. Note we can't always generate a meaningful
3082 * icmp message because icmp doesn't have a large enough repertoire
3083 * of codes and types.
3084 *
3085 * If not forwarding, just drop the packet. This could be confusing
3086 * if ipforwarding was zero but some routing protocol was advancing
3087 * us as a gateway to somewhere. However, we must let the routing
3088 * protocol deal with that.
3089 *
3090 * The srcrt parameter indicates whether the packet is being forwarded
3091 * via a source route.
3092 */
3093 static void
ip_forward(struct mbuf * m,int srcrt,struct sockaddr_in * next_hop)3094 ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop)
3095 {
3096 #pragma unused(next_hop)
3097 struct ip *__single ip = mtod(m, struct ip *);
3098 struct sockaddr_in *__single sin;
3099 rtentry_ref_t rt;
3100 struct route fwd_rt;
3101 int error, type = 0, code = 0;
3102 mbuf_ref_t mcopy;
3103 n_long dest;
3104 struct in_addr pkt_dst;
3105 u_int32_t nextmtu = 0, len;
3106 struct ip_out_args ipoa;
3107 struct ifnet *__single rcvifp = m->m_pkthdr.rcvif;
3108
3109 bzero(&ipoa, sizeof(ipoa));
3110 ipoa.ipoa_boundif = IFSCOPE_NONE;
3111 ipoa.ipoa_sotc = SO_TC_UNSPEC;
3112 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3113
3114 #if IPSEC
3115 struct secpolicy *sp = NULL;
3116 int ipsecerror;
3117 #endif /* IPSEC */
3118 #if PF
3119 struct pf_mtag *pf_mtag;
3120 #endif /* PF */
3121
3122 dest = 0;
3123 pkt_dst = ip->ip_dst;
3124
3125 #if DIAGNOSTIC
3126 if (ipprintfs) {
3127 printf("forward: src %lx dst %lx ttl %x\n",
3128 (u_int32_t)ip->ip_src.s_addr, (u_int32_t)pkt_dst.s_addr,
3129 ip->ip_ttl);
3130 }
3131 #endif
3132
3133 if (m->m_flags & (M_BCAST | M_MCAST) || !in_canforward(pkt_dst)) {
3134 OSAddAtomic(1, &ipstat.ips_cantforward);
3135 m_drop(m, DROPTAP_FLAG_DIR_IN | DROPTAP_FLAG_L2_MISSING, DROP_REASON_IP_CANNOT_FORWARD,
3136 NULL, 0);
3137 return;
3138 }
3139 #if IPSTEALTH
3140 if (!ipstealth) {
3141 #endif /* IPSTEALTH */
3142 if (ip->ip_ttl <= IPTTLDEC) {
3143 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
3144 dest, 0);
3145 return;
3146 }
3147 #if IPSTEALTH
3148 }
3149 #endif /* IPSTEALTH */
3150
3151 #if PF
3152 pf_mtag = pf_find_mtag(m);
3153 if (pf_mtag != NULL && pf_mtag->pftag_rtableid != IFSCOPE_NONE) {
3154 ipoa.ipoa_boundif = pf_mtag->pftag_rtableid;
3155 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
3156 }
3157 #endif /* PF */
3158
3159 ip_fwd_route_copyout(rcvifp, &fwd_rt);
3160
3161 sin = SIN(&fwd_rt.ro_dst);
3162 if (ROUTE_UNUSABLE(&fwd_rt) || pkt_dst.s_addr != sin->sin_addr.s_addr) {
3163 ROUTE_RELEASE(&fwd_rt);
3164
3165 sin->sin_family = AF_INET;
3166 sin->sin_len = sizeof(*sin);
3167 sin->sin_addr = pkt_dst;
3168
3169 rtalloc_scoped_ign(&fwd_rt, RTF_PRCLONING, ipoa.ipoa_boundif);
3170 if (fwd_rt.ro_rt == NULL) {
3171 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0);
3172 goto done;
3173 }
3174 }
3175 rt = fwd_rt.ro_rt;
3176
3177 /*
3178 * Save the IP header and at most 8 bytes of the payload,
3179 * in case we need to generate an ICMP message to the src.
3180 *
3181 * We don't use m_copy() because it might return a reference
3182 * to a shared cluster. Both this function and ip_output()
3183 * assume exclusive access to the IP header in `m', so any
3184 * data in a cluster may change before we reach icmp_error().
3185 */
3186 MGET(mcopy, M_DONTWAIT, m->m_type);
3187 if (mcopy != NULL && m_dup_pkthdr(mcopy, m, M_DONTWAIT) == 0) {
3188 mcopy->m_len = imin((IP_VHL_HL(ip->ip_vhl) << 2) + 8,
3189 (int)ip->ip_len);
3190 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
3191 }
3192
3193 #if IPSTEALTH
3194 if (!ipstealth) {
3195 #endif /* IPSTEALTH */
3196 ip->ip_ttl -= IPTTLDEC;
3197 #if IPSTEALTH
3198 }
3199 #endif /* IPSTEALTH */
3200
3201 /*
3202 * If forwarding packet using same interface that it came in on,
3203 * perhaps should send a redirect to sender to shortcut a hop.
3204 * Only send redirect if source is sending directly to us,
3205 * and if packet was not source routed (or has any options).
3206 * Also, don't send redirect if forwarding using a default route
3207 * or a route modified by a redirect.
3208 */
3209 RT_LOCK_SPIN(rt);
3210 if (rt->rt_ifp == m->m_pkthdr.rcvif &&
3211 !(rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) &&
3212 satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY &&
3213 ipsendredirects && !srcrt && rt->rt_ifa != NULL) {
3214 struct in_ifaddr *ia = ifatoia(rt->rt_ifa);
3215 u_int32_t src = ntohl(ip->ip_src.s_addr);
3216
3217 /* Become a regular mutex */
3218 RT_CONVERT_LOCK(rt);
3219 IFA_LOCK_SPIN(&ia->ia_ifa);
3220 if ((src & ia->ia_subnetmask) == ia->ia_subnet) {
3221 if (rt->rt_flags & RTF_GATEWAY) {
3222 dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
3223 } else {
3224 dest = pkt_dst.s_addr;
3225 }
3226 /*
3227 * Router requirements says to only send
3228 * host redirects.
3229 */
3230 type = ICMP_REDIRECT;
3231 code = ICMP_REDIRECT_HOST;
3232 #if DIAGNOSTIC
3233 if (ipprintfs) {
3234 printf("redirect (%d) to %lx\n", code,
3235 (u_int32_t)dest);
3236 }
3237 #endif
3238 }
3239 IFA_UNLOCK(&ia->ia_ifa);
3240 }
3241 RT_UNLOCK(rt);
3242
3243
3244 /* Mark this packet as being forwarded from another interface */
3245 m->m_pkthdr.pkt_flags |= PKTF_FORWARDED;
3246 len = m_pktlen(m);
3247
3248 error = ip_output(m, NULL, &fwd_rt, IP_FORWARDING | IP_OUTARGS,
3249 NULL, &ipoa);
3250
3251 /* Refresh rt since the route could have changed while in IP */
3252 rt = fwd_rt.ro_rt;
3253
3254 if (error != 0) {
3255 OSAddAtomic(1, &ipstat.ips_cantforward);
3256 } else {
3257 /*
3258 * Increment stats on the source interface; the ones
3259 * for destination interface has been taken care of
3260 * during output above by virtue of PKTF_FORWARDED.
3261 */
3262 rcvifp->if_fpackets++;
3263 rcvifp->if_fbytes += len;
3264
3265 OSAddAtomic(1, &ipstat.ips_forward);
3266 if (type != 0) {
3267 OSAddAtomic(1, &ipstat.ips_redirectsent);
3268 } else {
3269 if (mcopy != NULL) {
3270 /*
3271 * If we didn't have to go thru ipflow and
3272 * the packet was successfully consumed by
3273 * ip_output, the mcopy is rather a waste;
3274 * this could be further optimized.
3275 */
3276 m_freem(mcopy);
3277 }
3278 goto done;
3279 }
3280 }
3281 if (mcopy == NULL) {
3282 goto done;
3283 }
3284
3285 switch (error) {
3286 case 0: /* forwarded, but need redirect */
3287 /* type, code set above */
3288 break;
3289
3290 case ENETUNREACH: /* shouldn't happen, checked above */
3291 case EHOSTUNREACH:
3292 case ENETDOWN:
3293 case EHOSTDOWN:
3294 default:
3295 type = ICMP_UNREACH;
3296 code = ICMP_UNREACH_HOST;
3297 break;
3298
3299 case EMSGSIZE:
3300 type = ICMP_UNREACH;
3301 code = ICMP_UNREACH_NEEDFRAG;
3302
3303 if (rt == NULL) {
3304 break;
3305 } else {
3306 RT_LOCK_SPIN(rt);
3307 if (rt->rt_ifp != NULL) {
3308 nextmtu = rt->rt_ifp->if_mtu;
3309 }
3310 RT_UNLOCK(rt);
3311 }
3312 #ifdef IPSEC
3313 if (ipsec_bypass) {
3314 break;
3315 }
3316
3317 /*
3318 * If the packet is routed over IPsec tunnel, tell the
3319 * originator the tunnel MTU.
3320 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
3321 * XXX quickhack!!!
3322 */
3323 sp = ipsec4_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND,
3324 IP_FORWARDING, &ipsecerror);
3325
3326 if (sp == NULL) {
3327 break;
3328 }
3329
3330 /*
3331 * find the correct route for outer IPv4
3332 * header, compute tunnel MTU.
3333 */
3334 nextmtu = 0;
3335
3336 if (sp->req != NULL &&
3337 sp->req->saidx.mode == IPSEC_MODE_TUNNEL) {
3338 struct secasindex saidx;
3339 struct secasvar *__single sav;
3340 struct route *__single ro;
3341 struct ip *__single ipm;
3342 size_t ipsechdr;
3343
3344 /* count IPsec header size */
3345 ipsechdr = ipsec_hdrsiz(sp);
3346
3347 ipm = mtod(mcopy, struct ip *);
3348 bcopy(&sp->req->saidx, &saidx, sizeof(saidx));
3349 saidx.mode = sp->req->saidx.mode;
3350 saidx.reqid = sp->req->saidx.reqid;
3351 sin = SIN(&saidx.src);
3352 if (sin->sin_len == 0) {
3353 sin->sin_len = sizeof(*sin);
3354 sin->sin_family = AF_INET;
3355 sin->sin_port = IPSEC_PORT_ANY;
3356 bcopy(&ipm->ip_src, &sin->sin_addr,
3357 sizeof(sin->sin_addr));
3358 }
3359 sin = SIN(&saidx.dst);
3360 if (sin->sin_len == 0) {
3361 sin->sin_len = sizeof(*sin);
3362 sin->sin_family = AF_INET;
3363 sin->sin_port = IPSEC_PORT_ANY;
3364 bcopy(&ipm->ip_dst, &sin->sin_addr,
3365 sizeof(sin->sin_addr));
3366 }
3367 sav = key_allocsa_policy(&saidx);
3368 if (sav != NULL) {
3369 lck_mtx_lock(sadb_mutex);
3370 if (sav->sah != NULL) {
3371 ro = (struct route *)&sav->sah->sa_route;
3372 if (ro->ro_rt != NULL) {
3373 RT_LOCK(ro->ro_rt);
3374 if (ro->ro_rt->rt_ifp != NULL) {
3375 nextmtu = ro->ro_rt->
3376 rt_ifp->if_mtu;
3377 nextmtu -= ipsechdr;
3378 }
3379 RT_UNLOCK(ro->ro_rt);
3380 }
3381 }
3382 key_freesav(sav, KEY_SADB_LOCKED);
3383 lck_mtx_unlock(sadb_mutex);
3384 }
3385 }
3386 key_freesp(sp, KEY_SADB_UNLOCKED);
3387 #endif /* IPSEC */
3388 break;
3389
3390 case ENOBUFS:
3391 /*
3392 * A router should not generate ICMP_SOURCEQUENCH as
3393 * required in RFC1812 Requirements for IP Version 4 Routers.
3394 * Source quench could be a big problem under DoS attacks,
3395 * or if the underlying interface is rate-limited.
3396 * Those who need source quench packets may re-enable them
3397 * via the net.inet.ip.sendsourcequench sysctl.
3398 */
3399 if (ip_sendsourcequench == 0) {
3400 m_freem(mcopy);
3401 goto done;
3402 } else {
3403 type = ICMP_SOURCEQUENCH;
3404 code = 0;
3405 }
3406 break;
3407
3408 case EACCES:
3409 m_freem(mcopy);
3410 goto done;
3411 }
3412
3413 if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG) {
3414 OSAddAtomic(1, &ipstat.ips_cantfrag);
3415 }
3416
3417 icmp_error(mcopy, type, code, dest, nextmtu);
3418 done:
3419 ip_fwd_route_copyin(rcvifp, &fwd_rt);
3420 }
3421
3422 int
ip_savecontrol(struct inpcb * inp,struct mbuf ** mp,struct ip * ip,struct mbuf * m)3423 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
3424 struct mbuf *m)
3425 {
3426 *mp = NULL;
3427 if (inp->inp_socket->so_options & SO_TIMESTAMP) {
3428 struct timeval tv;
3429
3430 getmicrotime(&tv);
3431 mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof(tv),
3432 SCM_TIMESTAMP, SOL_SOCKET, mp);
3433 if (*mp == NULL) {
3434 goto no_mbufs;
3435 }
3436 }
3437 if (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) {
3438 uint64_t time;
3439
3440 time = mach_absolute_time();
3441 mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
3442 SCM_TIMESTAMP_MONOTONIC, SOL_SOCKET, mp);
3443 if (*mp == NULL) {
3444 goto no_mbufs;
3445 }
3446 }
3447 if (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) {
3448 uint64_t time;
3449
3450 time = mach_continuous_time();
3451 mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
3452 SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp);
3453 if (*mp == NULL) {
3454 goto no_mbufs;
3455 }
3456 }
3457 if (inp->inp_socket->so_flags & SOF_RECV_TRAFFIC_CLASS) {
3458 int tc = m_get_traffic_class(m);
3459
3460 mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof(tc),
3461 SO_TRAFFIC_CLASS, SOL_SOCKET, mp);
3462 if (*mp == NULL) {
3463 goto no_mbufs;
3464 }
3465 }
3466 if ((inp->inp_socket->so_flags & SOF_RECV_WAKE_PKT) &&
3467 (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) {
3468 int flag = 1;
3469
3470 mp = sbcreatecontrol_mbuf((caddr_t)&flag, sizeof(flag),
3471 SO_RECV_WAKE_PKT, SOL_SOCKET, mp);
3472 if (*mp == NULL) {
3473 goto no_mbufs;
3474 }
3475 }
3476
3477 if (inp->inp_flags & INP_RECVDSTADDR || SOFLOW_ENABLED(inp->inp_socket)) {
3478 mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_dst,
3479 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP, mp);
3480 if (*mp == NULL) {
3481 goto no_mbufs;
3482 }
3483 }
3484 #ifdef notyet
3485 /*
3486 * XXX
3487 * Moving these out of udp_input() made them even more broken
3488 * than they already were.
3489 */
3490 /* options were tossed already */
3491 if (inp->inp_flags & INP_RECVOPTS) {
3492 mp = sbcreatecontrol_mbuf((caddr_t)opts_deleted_above,
3493 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP, mp);
3494 if (*mp == NULL) {
3495 goto no_mbufs;
3496 }
3497 }
3498 /* ip_srcroute doesn't do what we want here, need to fix */
3499 if (inp->inp_flags & INP_RECVRETOPTS) {
3500 mp = sbcreatecontrol_mbuf((caddr_t)ip_srcroute(),
3501 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP, mp);
3502 if (*mp == NULL) {
3503 goto no_mbufs;
3504 }
3505 }
3506 #endif /* notyet */
3507 if (inp->inp_flags & INP_RECVIF) {
3508 ifnet_ref_t ifp;
3509 uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
3510 struct sockaddr_dl *sdl2 = SDL(sdlbuf);
3511
3512 /*
3513 * Make sure to accomodate the largest possible
3514 * size of SA(if_lladdr)->sa_len.
3515 */
3516 static_assert(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
3517
3518 ifnet_head_lock_shared();
3519 if ((ifp = m->m_pkthdr.rcvif) != NULL &&
3520 ifp->if_index && IF_INDEX_IN_RANGE(ifp->if_index)) {
3521 struct ifaddr *__single ifa = ifnet_addrs[ifp->if_index - 1];
3522 struct sockaddr_dl *sdp;
3523
3524 if (!ifa || !ifa->ifa_addr) {
3525 goto makedummy;
3526 }
3527
3528 IFA_LOCK_SPIN(ifa);
3529 sdp = SDL(ifa->ifa_addr);
3530 /*
3531 * Change our mind and don't try copy.
3532 */
3533 if (sdp->sdl_family != AF_LINK) {
3534 IFA_UNLOCK(ifa);
3535 goto makedummy;
3536 }
3537 /* the above static_assert() ensures sdl_len fits in sdlbuf */
3538 SOCKADDR_COPY(sdp, sdl2, sdp->sdl_len);
3539 IFA_UNLOCK(ifa);
3540 } else {
3541 makedummy:
3542 sdl2->sdl_len =
3543 offsetof(struct sockaddr_dl, sdl_data[0]);
3544 sdl2->sdl_family = AF_LINK;
3545 sdl2->sdl_index = 0;
3546 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
3547 }
3548 ifnet_head_done();
3549 mp = sbcreatecontrol_mbuf((caddr_t)SA_BYTES(sdl2), sdl2->sdl_len,
3550 IP_RECVIF, IPPROTO_IP, mp);
3551 if (*mp == NULL) {
3552 goto no_mbufs;
3553 }
3554 }
3555 if (inp->inp_flags & INP_RECVTTL) {
3556 mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_ttl,
3557 sizeof(ip->ip_ttl), IP_RECVTTL, IPPROTO_IP, mp);
3558 if (*mp == NULL) {
3559 goto no_mbufs;
3560 }
3561 }
3562 if (inp->inp_flags & INP_PKTINFO) {
3563 struct in_pktinfo pi;
3564
3565 bzero(&pi, sizeof(struct in_pktinfo));
3566 bcopy(&ip->ip_dst, &pi.ipi_addr, sizeof(struct in_addr));
3567 pi.ipi_ifindex = (m != NULL && m->m_pkthdr.rcvif != NULL) ?
3568 m->m_pkthdr.rcvif->if_index : 0;
3569
3570 mp = sbcreatecontrol_mbuf((caddr_t)&pi,
3571 sizeof(struct in_pktinfo), IP_RECVPKTINFO, IPPROTO_IP, mp);
3572 if (*mp == NULL) {
3573 goto no_mbufs;
3574 }
3575 }
3576 if (inp->inp_flags & INP_RECVTOS) {
3577 mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_tos,
3578 sizeof(u_char), IP_RECVTOS, IPPROTO_IP, mp);
3579 if (*mp == NULL) {
3580 goto no_mbufs;
3581 }
3582 }
3583 if (inp->inp_flags2 & INP2_RECV_LINK_ADDR_TYPE) {
3584 int mode = IP_RECV_LINK_ADDR_UNICAST;
3585
3586 if (m->m_flags & M_BCAST) {
3587 mode = IP_RECV_LINK_ADDR_BROADCAST;
3588 } else if (m->m_flags & M_MCAST) {
3589 mode = IP_RECV_LINK_ADDR_MULTICAST;
3590 }
3591
3592 mp = sbcreatecontrol_mbuf((caddr_t)&mode,
3593 sizeof(int), IP_RECV_LINK_ADDR_TYPE, IPPROTO_IP, mp);
3594 if (*mp == NULL) {
3595 goto no_mbufs;
3596 }
3597 }
3598 return 0;
3599
3600 no_mbufs:
3601 ipstat.ips_pktdropcntrl++;
3602 return ENOBUFS;
3603 }
3604
3605 static inline u_short
ip_cksum(struct mbuf * m,int hlen)3606 ip_cksum(struct mbuf *m, int hlen)
3607 {
3608 u_short sum;
3609
3610 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3611 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3612 } else if (!(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) &&
3613 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
3614 /*
3615 * The packet arrived on an interface which isn't capable
3616 * of performing IP header checksum; compute it now.
3617 */
3618 sum = ip_cksum_hdr_in(m, hlen);
3619 } else {
3620 sum = 0;
3621 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
3622 CSUM_IP_CHECKED | CSUM_IP_VALID);
3623 m->m_pkthdr.csum_data = 0xffff;
3624 }
3625
3626 if (sum != 0) {
3627 OSAddAtomic(1, &ipstat.ips_badsum);
3628 }
3629
3630 return sum;
3631 }
3632
3633 static int
3634 ip_getstat SYSCTL_HANDLER_ARGS
3635 {
3636 #pragma unused(oidp, arg1, arg2)
3637 if (req->oldptr == USER_ADDR_NULL) {
3638 req->oldlen = (size_t)sizeof(struct ipstat);
3639 }
3640
3641 return SYSCTL_OUT(req, &ipstat, MIN(sizeof(ipstat), req->oldlen));
3642 }
3643
3644 void
ip_setsrcifaddr_info(struct mbuf * m,uint16_t src_idx,struct in_ifaddr * ia)3645 ip_setsrcifaddr_info(struct mbuf *m, uint16_t src_idx, struct in_ifaddr *ia)
3646 {
3647 VERIFY(m->m_flags & M_PKTHDR);
3648
3649 /*
3650 * If the source ifaddr is specified, pick up the information
3651 * from there; otherwise just grab the passed-in ifindex as the
3652 * caller may not have the ifaddr available.
3653 */
3654 if (ia != NULL) {
3655 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3656 m->m_pkthdr.src_ifindex = ia->ia_ifp->if_index;
3657 } else {
3658 m->m_pkthdr.src_ifindex = src_idx;
3659 if (src_idx != 0) {
3660 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3661 }
3662 }
3663 }
3664
3665 void
ip_setdstifaddr_info(struct mbuf * m,uint16_t dst_idx,struct in_ifaddr * ia)3666 ip_setdstifaddr_info(struct mbuf *m, uint16_t dst_idx, struct in_ifaddr *ia)
3667 {
3668 VERIFY(m->m_flags & M_PKTHDR);
3669
3670 /*
3671 * If the destination ifaddr is specified, pick up the information
3672 * from there; otherwise just grab the passed-in ifindex as the
3673 * caller may not have the ifaddr available.
3674 */
3675 if (ia != NULL) {
3676 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3677 m->m_pkthdr.dst_ifindex = ia->ia_ifp->if_index;
3678 } else {
3679 m->m_pkthdr.dst_ifindex = dst_idx;
3680 if (dst_idx != 0) {
3681 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
3682 }
3683 }
3684 }
3685
3686 int
ip_getsrcifaddr_info(struct mbuf * m,uint32_t * src_idx,uint32_t * iaf)3687 ip_getsrcifaddr_info(struct mbuf *m, uint32_t *src_idx, uint32_t *iaf)
3688 {
3689 VERIFY(m->m_flags & M_PKTHDR);
3690
3691 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
3692 return -1;
3693 }
3694
3695 if (src_idx != NULL) {
3696 *src_idx = m->m_pkthdr.src_ifindex;
3697 }
3698
3699 if (iaf != NULL) {
3700 *iaf = 0;
3701 }
3702
3703 return 0;
3704 }
3705
3706 int
ip_getdstifaddr_info(struct mbuf * m,uint32_t * dst_idx,uint32_t * iaf)3707 ip_getdstifaddr_info(struct mbuf *m, uint32_t *dst_idx, uint32_t *iaf)
3708 {
3709 VERIFY(m->m_flags & M_PKTHDR);
3710
3711 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
3712 return -1;
3713 }
3714
3715 if (dst_idx != NULL) {
3716 *dst_idx = m->m_pkthdr.dst_ifindex;
3717 }
3718
3719 if (iaf != NULL) {
3720 *iaf = 0;
3721 }
3722
3723 return 0;
3724 }
3725
3726 /*
3727 * Protocol input handler for IPPROTO_GRE.
3728 */
3729 void
gre_input(struct mbuf * m,int off)3730 gre_input(struct mbuf *m, int off)
3731 {
3732 gre_input_func_t fn = gre_input_func;
3733
3734 /*
3735 * If there is a registered GRE input handler, pass mbuf to it.
3736 */
3737 if (fn != NULL) {
3738 lck_mtx_unlock(inet_domain_mutex);
3739 m = fn(m, off, (mtod(m, struct ip *))->ip_p);
3740 lck_mtx_lock(inet_domain_mutex);
3741 }
3742
3743 /*
3744 * If no matching tunnel that is up is found, we inject
3745 * the mbuf to raw ip socket to see if anyone picks it up.
3746 */
3747 if (m != NULL) {
3748 rip_input(m, off);
3749 }
3750 }
3751
3752 /*
3753 * Private KPI for PPP/PPTP.
3754 */
3755 int
ip_gre_register_input(gre_input_func_t fn)3756 ip_gre_register_input(gre_input_func_t fn)
3757 {
3758 lck_mtx_lock(inet_domain_mutex);
3759 gre_input_func = fn;
3760 lck_mtx_unlock(inet_domain_mutex);
3761
3762 return 0;
3763 }
3764
3765 #if (DEBUG || DEVELOPMENT)
3766 static int
3767 sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS
3768 {
3769 #pragma unused(arg1, arg2)
3770 int error, i;
3771
3772 i = ip_input_measure;
3773 error = sysctl_handle_int(oidp, &i, 0, req);
3774 if (error || req->newptr == USER_ADDR_NULL) {
3775 goto done;
3776 }
3777 /* impose bounds */
3778 if (i < 0 || i > 1) {
3779 error = EINVAL;
3780 goto done;
3781 }
3782 if (ip_input_measure != i && i == 1) {
3783 net_perf_initialize(&net_perf, ip_input_measure_bins);
3784 }
3785 ip_input_measure = i;
3786 done:
3787 return error;
3788 }
3789
3790 static int
3791 sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS
3792 {
3793 #pragma unused(arg1, arg2)
3794 int error;
3795 uint64_t i;
3796
3797 i = ip_input_measure_bins;
3798 error = sysctl_handle_quad(oidp, &i, 0, req);
3799 if (error || req->newptr == USER_ADDR_NULL) {
3800 goto done;
3801 }
3802 /* validate data */
3803 if (!net_perf_validate_bins(i)) {
3804 error = EINVAL;
3805 goto done;
3806 }
3807 ip_input_measure_bins = i;
3808 done:
3809 return error;
3810 }
3811
3812 static int
3813 sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS
3814 {
3815 #pragma unused(oidp, arg1, arg2)
3816 if (req->oldptr == USER_ADDR_NULL) {
3817 req->oldlen = (size_t)sizeof(struct ipstat);
3818 }
3819
3820 return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen));
3821 }
3822 #endif /* (DEBUG || DEVELOPMENT) */
3823
3824 static int
3825 sysctl_ip_checkinterface SYSCTL_HANDLER_ARGS
3826 {
3827 #pragma unused(arg1, arg2)
3828 int error, i;
3829
3830 i = ip_checkinterface;
3831 error = sysctl_handle_int(oidp, &i, 0, req);
3832 if (error != 0 || req->newptr == USER_ADDR_NULL) {
3833 return error;
3834 }
3835
3836 switch (i) {
3837 case IP_CHECKINTERFACE_WEAK_ES:
3838 case IP_CHECKINTERFACE_HYBRID_ES:
3839 case IP_CHECKINTERFACE_STRONG_ES:
3840 if (ip_checkinterface != i) {
3841 ip_checkinterface = i;
3842 os_log(OS_LOG_DEFAULT, "%s: ip_checkinterface is now %d\n",
3843 __func__, ip_checkinterface);
3844 }
3845 break;
3846 default:
3847 error = EINVAL;
3848 break;
3849 }
3850 return error;
3851 }
3852