1 /*
2 * Copyright (c) 2004-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 */
61
62 #include <kern/debug.h>
63 #include <netinet/in_arp.h>
64 #include <sys/types.h>
65 #include <sys/param.h>
66 #include <sys/kernel_types.h>
67 #include <sys/syslog.h>
68 #include <sys/systm.h>
69 #include <sys/time.h>
70 #include <sys/kernel.h>
71 #include <sys/mbuf.h>
72 #include <sys/sysctl.h>
73 #include <sys/mcache.h>
74 #include <sys/protosw.h>
75 #include <string.h>
76 #include <net/if_arp.h>
77 #include <net/if_dl.h>
78 #include <net/dlil.h>
79 #include <net/if_types.h>
80 #include <net/if_llreach.h>
81 #include <net/route.h>
82 #include <net/nwk_wq.h>
83
84 #include <netinet/if_ether.h>
85 #include <netinet/in_var.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip6.h>
88
89 #include <kern/sched_prim.h>
90 #include <kern/thread.h>
91 #include <kern/uipc_domain.h>
92 #include <kern/zalloc.h>
93
94 #include <net/sockaddr_utils.h>
95
96 static const size_t MAX_HW_LEN = 10;
97
98 /*
99 * Synchronization notes:
100 *
101 * The global list of ARP entries are stored in llinfo_arp; an entry
102 * gets inserted into the list when the route is created and gets
103 * removed from the list when it is deleted; this is done as part
104 * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in arp_rtrequest().
105 *
106 * Because rnh_lock and rt_lock for the entry are held during those
107 * operations, the same locks (and thus lock ordering) must be used
108 * elsewhere to access the relevant data structure fields:
109 *
110 * la_le.{le_next,le_prev}, la_rt
111 *
112 * - Routing lock (rnh_lock)
113 *
114 * la_holdq, la_asked, la_llreach, la_lastused, la_flags
115 *
116 * - Routing entry lock (rt_lock)
117 *
118 * Due to the dependency on rt_lock, llinfo_arp has the same lifetime
119 * as the route entry itself. When a route is deleted (RTM_DELETE),
120 * it is simply removed from the global list but the memory is not
121 * freed until the route itself is freed.
122 */
123 struct llinfo_arp {
124 /*
125 * The following are protected by rnh_lock
126 */
127 LIST_ENTRY(llinfo_arp) la_le;
128 struct rtentry *la_rt;
129 /*
130 * The following are protected by rt_lock
131 */
132 class_queue_t la_holdq; /* packets awaiting resolution */
133 struct if_llreach *la_llreach; /* link-layer reachability record */
134 u_int64_t la_lastused; /* last used timestamp */
135 u_int32_t la_asked; /* # of requests sent */
136 u_int32_t la_maxtries; /* retry limit */
137 u_int64_t la_probeexp; /* probe deadline timestamp */
138 u_int32_t la_prbreq_cnt; /* probe request count */
139 u_int32_t la_flags;
140 #define LLINFO_RTRFAIL_EVTSENT 0x1 /* sent an ARP event */
141 #define LLINFO_PROBING 0x2 /* waiting for an ARP reply */
142 };
143
144 static LIST_HEAD(, llinfo_arp) llinfo_arp = LIST_HEAD_INITIALIZER(llinfo_arp);
145
146 static thread_call_t arp_timeout_tcall;
147 static int arp_timeout_run; /* arp_timeout is scheduled to run */
148 static void arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1);
149 static void arp_sched_timeout(struct timeval *);
150
151 static thread_call_t arp_probe_tcall;
152 static int arp_probe_run; /* arp_probe is scheduled to run */
153 static void arp_probe(thread_call_param_t arg0, thread_call_param_t arg1);
154 static void arp_sched_probe(struct timeval *);
155
156 static void arptfree(struct llinfo_arp *, void *);
157 static errno_t arp_lookup_route(const struct in_addr *, int,
158 int, route_t *, unsigned int);
159 static int arp_getstat SYSCTL_HANDLER_ARGS;
160
161 static struct llinfo_arp *arp_llinfo_alloc(zalloc_flags_t);
162 static void arp_llinfo_free(void *);
163 static uint32_t arp_llinfo_flushq(struct llinfo_arp *);
164 static void arp_llinfo_purge(struct rtentry *);
165 static void arp_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
166 static void arp_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
167 static void arp_llinfo_refresh(struct rtentry *);
168
169 static __inline void arp_llreach_use(struct llinfo_arp *);
170 static __inline int arp_llreach_reachable(struct llinfo_arp *);
171 static void arp_llreach_alloc(struct rtentry *, struct ifnet *,
172 void *__sized_by(alen)addr,
173 unsigned int alen, boolean_t, uint32_t *);
174
175 extern int tvtohz(struct timeval *);
176
177 SYSCTL_DECL(_net_link_ether);
178 SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
179
180 static int arpt_prune = (5 * 60 * 1); /* walk list every 5 minutes */
181 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, prune_intvl,
182 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_prune, 0, "");
183
184 #define ARP_PROBE_TIME 7 /* seconds */
185 static u_int32_t arpt_probe = ARP_PROBE_TIME;
186 SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, probe_intvl,
187 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_probe, 0, "");
188
189 static int arpt_keep = (20 * 60); /* once resolved, good for 20 more minutes */
190 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age,
191 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_keep, 0, "");
192
193 static int arpt_down = 20; /* once declared down, don't send for 20 sec */
194 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, host_down_time,
195 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_down, 0, "");
196
197 static int arp_llreach_base = 120; /* seconds */
198 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_llreach_base,
199 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_llreach_base, 0,
200 "default ARP link-layer reachability max lifetime (in seconds)");
201
202 #define ARP_UNICAST_LIMIT 3 /* # of probes until ARP refresh broadcast */
203 static u_int32_t arp_unicast_lim = ARP_UNICAST_LIMIT;
204 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_unicast_lim,
205 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_unicast_lim, ARP_UNICAST_LIMIT,
206 "number of unicast ARP refresh probes before using broadcast");
207
208 static u_int32_t arp_maxtries = 5;
209 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries,
210 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxtries, 0, "");
211
212 static u_int32_t arp_maxhold = 16;
213 SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, maxhold,
214 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold, 0, "");
215
216 static int useloopback = 1; /* use loopback interface for local traffic */
217 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, useloopback,
218 CTLFLAG_RW | CTLFLAG_LOCKED, &useloopback, 0, "");
219
220 static int arp_proxyall = 0;
221 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall,
222 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_proxyall, 0, "");
223
224 static int arp_sendllconflict = 0;
225 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, sendllconflict,
226 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_sendllconflict, 0, "");
227
228 static int log_arp_warnings = 0; /* Thread safe: no accumulated state */
229 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_warnings,
230 CTLFLAG_RW | CTLFLAG_LOCKED,
231 &log_arp_warnings, 0,
232 "log arp warning messages");
233
234 static int keep_announcements = 1; /* Thread safe: no aging of state */
235 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, keep_announcements,
236 CTLFLAG_RW | CTLFLAG_LOCKED,
237 &keep_announcements, 0,
238 "keep arp announcements");
239
240 static int send_conflicting_probes = 1; /* Thread safe: no accumulated state */
241 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, send_conflicting_probes,
242 CTLFLAG_RW | CTLFLAG_LOCKED,
243 &send_conflicting_probes, 0,
244 "send conflicting link-local arp probes");
245
246 static int arp_verbose;
247 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, verbose,
248 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_verbose, 0, "");
249
250 static uint32_t arp_maxhold_total = 1024; /* max total packets in the holdq */
251 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxhold_total,
252 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold_total, 0, "");
253
254
255 /*
256 * Generally protected by rnh_lock; use atomic operations on fields
257 * that are also modified outside of that lock (if needed).
258 */
259 struct arpstat arpstat __attribute__((aligned(sizeof(uint64_t))));
260 SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, stats,
261 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
262 0, 0, arp_getstat, "S,arpstat",
263 "ARP statistics (struct arpstat, net/if_arp.h)");
264
265 static struct llinfo_arp *
arp_llinfo_alloc(zalloc_flags_t how)266 arp_llinfo_alloc(zalloc_flags_t how)
267 {
268 struct llinfo_arp *la = kalloc_type(struct llinfo_arp, how | Z_ZERO);
269
270 if (la != NULL) {
271 /*
272 * The type of queue (Q_DROPHEAD) here is just a hint;
273 * the actual logic that works on this queue performs
274 * a head drop, details in arp_llinfo_addq().
275 */
276 _qinit(&la->la_holdq, Q_DROPHEAD, (arp_maxhold == 0) ?
277 (uint32_t)-1 : arp_maxhold, QP_MBUF);
278 }
279 return la;
280 }
281
282 static void
arp_llinfo_free(void * arg)283 arp_llinfo_free(void *arg)
284 {
285 struct llinfo_arp *__single la = arg;
286
287 if (la->la_le.le_next != NULL || la->la_le.le_prev != NULL) {
288 panic("%s: trying to free %p when it is in use", __func__, la);
289 /* NOTREACHED */
290 }
291
292 /* Free any held packets */
293 (void) arp_llinfo_flushq(la);
294
295 /* Purge any link-layer info caching */
296 VERIFY(la->la_rt->rt_llinfo == la);
297 if (la->la_rt->rt_llinfo_purge != NULL) {
298 la->la_rt->rt_llinfo_purge(la->la_rt);
299 }
300
301 kfree_type(struct llinfo_arp, la);
302 }
303
304 static bool
arp_llinfo_addq(struct llinfo_arp * la,struct mbuf * m)305 arp_llinfo_addq(struct llinfo_arp *la, struct mbuf *m)
306 {
307 classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
308
309 if (arpstat.held >= arp_maxhold_total) {
310 if (arp_verbose) {
311 log(LOG_DEBUG,
312 "%s: dropping packet due to maxhold_total\n",
313 __func__);
314 }
315 os_atomic_inc(&arpstat.dropped, relaxed);
316 return false;
317 }
318
319 if (qlen(&la->la_holdq) >= qlimit(&la->la_holdq)) {
320 struct mbuf *_m;
321 /* prune less than CTL, else take what's at the head */
322 _getq_scidx_lt(&la->la_holdq, &pkt, SCIDX_CTL);
323 _m = pkt.cp_mbuf;
324 if (_m == NULL) {
325 _getq(&la->la_holdq, &pkt);
326 _m = pkt.cp_mbuf;
327 }
328 VERIFY(_m != NULL);
329 if (arp_verbose) {
330 log(LOG_DEBUG, "%s: dropping packet (scidx %u)\n",
331 __func__, MBUF_SCIDX(mbuf_get_service_class(_m)));
332 }
333 m_freem(_m);
334 os_atomic_inc(&arpstat.dropped, relaxed);
335 os_atomic_dec(&arpstat.held, relaxed);
336 }
337 CLASSQ_PKT_INIT_MBUF(&pkt, m);
338 _addq(&la->la_holdq, &pkt);
339 os_atomic_inc(&arpstat.held, relaxed);
340 if (arp_verbose) {
341 log(LOG_DEBUG, "%s: enqueued packet (scidx %u), qlen now %u\n",
342 __func__, MBUF_SCIDX(mbuf_get_service_class(m)),
343 qlen(&la->la_holdq));
344 }
345
346 return true;
347 }
348
349 static uint32_t
arp_llinfo_flushq(struct llinfo_arp * la)350 arp_llinfo_flushq(struct llinfo_arp *la)
351 {
352 uint32_t held = qlen(&la->la_holdq);
353
354 if (held != 0) {
355 os_atomic_add(&arpstat.purged, held, relaxed);
356 os_atomic_add(&arpstat.held, -held, relaxed);
357 _flushq(&la->la_holdq);
358 }
359 la->la_prbreq_cnt = 0;
360 VERIFY(qempty(&la->la_holdq));
361 return held;
362 }
363
364 static void
arp_llinfo_purge(struct rtentry * rt)365 arp_llinfo_purge(struct rtentry *rt)
366 {
367 struct llinfo_arp *__single la = rt->rt_llinfo;
368
369 RT_LOCK_ASSERT_HELD(rt);
370 VERIFY(rt->rt_llinfo_purge == arp_llinfo_purge && la != NULL);
371
372 if (la->la_llreach != NULL) {
373 RT_CONVERT_LOCK(rt);
374 ifnet_llreach_free(la->la_llreach);
375 la->la_llreach = NULL;
376 }
377 la->la_lastused = 0;
378 }
379
380 static void
arp_llinfo_get_ri(struct rtentry * rt,struct rt_reach_info * ri)381 arp_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
382 {
383 struct llinfo_arp *__single la = rt->rt_llinfo;
384 struct if_llreach *lr = la->la_llreach;
385
386 if (lr == NULL) {
387 bzero(ri, sizeof(*ri));
388 ri->ri_rssi = IFNET_RSSI_UNKNOWN;
389 ri->ri_lqm = IFNET_LQM_THRESH_OFF;
390 ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
391 } else {
392 IFLR_LOCK(lr);
393 /* Export to rt_reach_info structure */
394 ifnet_lr2ri(lr, ri);
395 /* Export ARP send expiration (calendar) time */
396 ri->ri_snd_expire =
397 ifnet_llreach_up2calexp(lr, la->la_lastused);
398 IFLR_UNLOCK(lr);
399 }
400 }
401
402 static void
arp_llinfo_get_iflri(struct rtentry * rt,struct ifnet_llreach_info * iflri)403 arp_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
404 {
405 struct llinfo_arp *__single la = rt->rt_llinfo;
406 struct if_llreach *lr = la->la_llreach;
407
408 if (lr == NULL) {
409 bzero(iflri, sizeof(*iflri));
410 iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
411 iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
412 iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
413 } else {
414 IFLR_LOCK(lr);
415 /* Export to ifnet_llreach_info structure */
416 ifnet_lr2iflri(lr, iflri);
417 /* Export ARP send expiration (uptime) time */
418 iflri->iflri_snd_expire =
419 ifnet_llreach_up2upexp(lr, la->la_lastused);
420 IFLR_UNLOCK(lr);
421 }
422 }
423
424 static void
arp_llinfo_refresh(struct rtentry * rt)425 arp_llinfo_refresh(struct rtentry *rt)
426 {
427 uint64_t timenow = net_uptime();
428 /*
429 * If route entry is permanent or if expiry is less
430 * than timenow and extra time taken for unicast probe
431 * we can't expedite the refresh
432 */
433 if ((rt->rt_expire == 0) ||
434 (rt->rt_flags & RTF_STATIC) ||
435 !(rt->rt_flags & RTF_LLINFO)) {
436 return;
437 }
438
439 if (rt->rt_expire > timenow) {
440 rt->rt_expire = timenow;
441 }
442 return;
443 }
444
445 void
arp_llreach_set_reachable(struct ifnet * ifp,void * __sized_by (alen)addr,unsigned int alen)446 arp_llreach_set_reachable(struct ifnet *ifp, void *__sized_by(alen)addr,
447 unsigned int alen)
448 {
449 /* Nothing more to do if it's disabled */
450 if (arp_llreach_base == 0) {
451 return;
452 }
453
454 ifnet_llreach_set_reachable(ifp, ETHERTYPE_IP, addr, alen);
455 }
456
457 static __inline void
arp_llreach_use(struct llinfo_arp * la)458 arp_llreach_use(struct llinfo_arp *la)
459 {
460 if (la->la_llreach != NULL) {
461 la->la_lastused = net_uptime();
462 }
463 }
464
465 static __inline int
arp_llreach_reachable(struct llinfo_arp * la)466 arp_llreach_reachable(struct llinfo_arp *la)
467 {
468 struct if_llreach *lr;
469 const char *why = NULL;
470
471 /* Nothing more to do if it's disabled; pretend it's reachable */
472 if (arp_llreach_base == 0) {
473 return 1;
474 }
475
476 if ((lr = la->la_llreach) == NULL) {
477 /*
478 * Link-layer reachability record isn't present for this
479 * ARP entry; pretend it's reachable and use it as is.
480 */
481 return 1;
482 } else if (ifnet_llreach_reachable(lr)) {
483 /*
484 * Record is present, it's not shared with other ARP
485 * entries and a packet has recently been received
486 * from the remote host; consider it reachable.
487 */
488 if (lr->lr_reqcnt == 1) {
489 return 1;
490 }
491
492 /* Prime it up, if this is the first time */
493 if (la->la_lastused == 0) {
494 VERIFY(la->la_llreach != NULL);
495 arp_llreach_use(la);
496 }
497
498 /*
499 * Record is present and shared with one or more ARP
500 * entries, and a packet has recently been received
501 * from the remote host. Since it's shared by more
502 * than one IP addresses, we can't rely on the link-
503 * layer reachability alone; consider it reachable if
504 * this ARP entry has been used "recently."
505 */
506 if (ifnet_llreach_reachable_delta(lr, la->la_lastused)) {
507 return 1;
508 }
509
510 why = "has alias(es) and hasn't been used in a while";
511 } else {
512 why = "haven't heard from it in a while";
513 }
514
515 if (arp_verbose > 1) {
516 char tmp[MAX_IPv4_STR_LEN];
517 u_int64_t now = net_uptime();
518
519 log(LOG_DEBUG, "%s: ARP probe(s) needed for %s; "
520 "%s [lastused %lld, lastrcvd %lld] secs ago\n",
521 if_name(lr->lr_ifp), inet_ntop(AF_INET,
522 &SIN(rt_key(la->la_rt))->sin_addr, tmp, sizeof(tmp)), why,
523 (la->la_lastused ? (int64_t)(now - la->la_lastused) : -1),
524 (lr->lr_lastrcvd ? (int64_t)(now - lr->lr_lastrcvd) : -1));
525 }
526 return 0;
527 }
528
529 /*
530 * Obtain a link-layer source cache entry for the sender.
531 *
532 * NOTE: This is currently only for ARP/Ethernet.
533 */
534 static void
arp_llreach_alloc(struct rtentry * rt,struct ifnet * ifp,void * __sized_by (alen)addr,unsigned int alen,boolean_t solicited,uint32_t * p_rt_event_code)535 arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *__sized_by(alen)addr,
536 unsigned int alen, boolean_t solicited, uint32_t *p_rt_event_code)
537 {
538 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
539 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
540
541 if (arp_llreach_base != 0 && rt->rt_expire != 0 &&
542 !(rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
543 ifp->if_addrlen == IF_LLREACH_MAXLEN && /* Ethernet */
544 alen == ifp->if_addrlen) {
545 struct llinfo_arp *__single la = rt->rt_llinfo;
546 struct if_llreach *lr;
547 const char *why = NULL, *type = "";
548
549 /* Become a regular mutex, just in case */
550 RT_CONVERT_LOCK(rt);
551
552 if ((lr = la->la_llreach) != NULL) {
553 type = (solicited ? "ARP reply" : "ARP announcement");
554 /*
555 * If target has changed, create a new record;
556 * otherwise keep existing record.
557 */
558 IFLR_LOCK(lr);
559 if (bcmp(addr, lr->lr_key.addr, alen) != 0) {
560 IFLR_UNLOCK(lr);
561 /* Purge any link-layer info caching */
562 VERIFY(rt->rt_llinfo_purge != NULL);
563 rt->rt_llinfo_purge(rt);
564 lr = NULL;
565 why = " for different target HW address; "
566 "using new llreach record";
567 *p_rt_event_code = ROUTE_LLENTRY_CHANGED;
568 } else {
569 /*
570 * If we were doing unicast probing, we need to
571 * deliver an event for neighbor cache resolution
572 */
573 if (lr->lr_probes != 0) {
574 *p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
575 }
576
577 lr->lr_probes = 0; /* reset probe count */
578 IFLR_UNLOCK(lr);
579 if (solicited) {
580 why = " for same target HW address; "
581 "keeping existing llreach record";
582 }
583 }
584 }
585
586 if (lr == NULL) {
587 lr = la->la_llreach = ifnet_llreach_alloc(ifp,
588 ETHERTYPE_IP, addr, alen, arp_llreach_base);
589 if (lr != NULL) {
590 lr->lr_probes = 0; /* reset probe count */
591 if (why == NULL) {
592 why = "creating new llreach record";
593 }
594 }
595 *p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
596 }
597
598 if (arp_verbose > 1 && lr != NULL && why != NULL) {
599 char tmp[MAX_IPv4_STR_LEN];
600
601 log(LOG_DEBUG, "%s: %s%s for %s\n", if_name(ifp),
602 type, why, inet_ntop(AF_INET,
603 &SIN(rt_key(rt))->sin_addr, tmp, sizeof(tmp)));
604 }
605 }
606 }
607
608 struct arptf_arg {
609 boolean_t draining;
610 boolean_t probing;
611 uint32_t killed;
612 uint32_t aging;
613 uint32_t sticky;
614 uint32_t found;
615 uint32_t qlen;
616 uint32_t qsize;
617 };
618
619 /*
620 * Free an arp entry.
621 */
622 static void
arptfree(struct llinfo_arp * la,void * arg)623 arptfree(struct llinfo_arp *la, void *arg)
624 {
625 struct arptf_arg *__single ap = arg;
626 struct rtentry *rt = la->la_rt;
627 uint64_t timenow;
628
629 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
630
631 /* rnh_lock acquired by caller protects rt from going away */
632 RT_LOCK(rt);
633
634 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
635 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
636
637 ap->found++;
638 timenow = net_uptime();
639
640 /* If we're probing, flush out held packets upon probe expiration */
641 if (ap->probing && (la->la_flags & LLINFO_PROBING) &&
642 la->la_probeexp <= timenow) {
643 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
644 if (sdl != NULL) {
645 sdl->sdl_alen = 0;
646 }
647 (void) arp_llinfo_flushq(la);
648 /*
649 * Enqueue work item to invoke callback for this route entry
650 */
651 route_event_enqueue_nwk_wq_entry(rt, NULL,
652 ROUTE_LLENTRY_UNREACH, NULL, TRUE);
653 }
654
655 /*
656 * The following is mostly being used to arm the timer
657 * again and for logging.
658 * qlen is used to re-arm the timer. Therefore, pure probe
659 * requests can be considered as 0 length packets
660 * contributing only to length but not to the size.
661 */
662 ap->qlen += qlen(&la->la_holdq);
663 ap->qlen += la->la_prbreq_cnt;
664 ap->qsize += qsize(&la->la_holdq);
665
666 if (rt->rt_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
667 ap->sticky++;
668 /* ARP entry is permanent? */
669 if (rt->rt_expire == 0) {
670 RT_UNLOCK(rt);
671 return;
672 }
673 }
674
675 /* ARP entry hasn't expired and we're not draining? */
676 if (!ap->draining && rt->rt_expire > timenow) {
677 RT_UNLOCK(rt);
678 ap->aging++;
679 return;
680 }
681
682 if (rt->rt_refcnt > 0) {
683 /*
684 * ARP entry has expired, with outstanding refcnt.
685 * If we're not draining, force ARP query to be
686 * generated next time this entry is used.
687 */
688 if (!ap->draining && !ap->probing) {
689 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
690 if (sdl != NULL) {
691 sdl->sdl_alen = 0;
692 }
693 la->la_asked = 0;
694 rt->rt_flags &= ~RTF_REJECT;
695 }
696 RT_UNLOCK(rt);
697 } else if (!(rt->rt_flags & RTF_STATIC) && !ap->probing) {
698 /*
699 * ARP entry has no outstanding refcnt, and we're either
700 * draining or it has expired; delete it from the routing
701 * table. Safe to drop rt_lock and use rt_key, since holding
702 * rnh_lock here prevents another thread from calling
703 * rt_setgate() on this route.
704 */
705 RT_UNLOCK(rt);
706 rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
707 rt_mask(rt), 0, NULL);
708 arpstat.timeouts++;
709 ap->killed++;
710 } else {
711 /* ARP entry is static; let it linger */
712 RT_UNLOCK(rt);
713 }
714 }
715
716 void
in_arpdrain(void * arg)717 in_arpdrain(void *arg)
718 {
719 #pragma unused(arg)
720 struct llinfo_arp *la, *ola;
721 struct arptf_arg farg;
722
723 if (arp_verbose) {
724 log(LOG_DEBUG, "%s: draining ARP entries\n", __func__);
725 }
726
727 lck_mtx_lock(rnh_lock);
728 la = llinfo_arp.lh_first;
729 bzero(&farg, sizeof(farg));
730 farg.draining = TRUE;
731 while ((ola = la) != NULL) {
732 la = la->la_le.le_next;
733 arptfree(ola, &farg);
734 }
735 if (arp_verbose) {
736 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
737 "%u pkts held (%u bytes)\n", __func__, farg.found,
738 farg.aging, farg.sticky, farg.killed, farg.qlen,
739 farg.qsize);
740 }
741 lck_mtx_unlock(rnh_lock);
742 }
743
744 /*
745 * Timeout routine. Age arp_tab entries periodically.
746 */
747 static void
arp_timeout(thread_call_param_t arg0,thread_call_param_t arg1)748 arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1)
749 {
750 #pragma unused(arg0, arg1)
751 struct llinfo_arp *la, *ola;
752 struct timeval atv;
753 struct arptf_arg farg;
754
755 lck_mtx_lock(rnh_lock);
756 la = llinfo_arp.lh_first;
757 bzero(&farg, sizeof(farg));
758 while ((ola = la) != NULL) {
759 la = la->la_le.le_next;
760 arptfree(ola, &farg);
761 }
762 if (arp_verbose) {
763 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
764 "%u pkts held (%u bytes)\n", __func__, farg.found,
765 farg.aging, farg.sticky, farg.killed, farg.qlen,
766 farg.qsize);
767 }
768 atv.tv_usec = 0;
769 atv.tv_sec = MAX(arpt_prune, 5);
770 /* re-arm the timer if there's work to do */
771 arp_timeout_run = 0;
772 if (farg.aging > 0) {
773 arp_sched_timeout(&atv);
774 } else if (arp_verbose) {
775 log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__);
776 }
777 lck_mtx_unlock(rnh_lock);
778 }
779
780 static void
arp_sched_timeout(struct timeval * atv)781 arp_sched_timeout(struct timeval *atv)
782 {
783 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
784
785 if (!arp_timeout_run) {
786 struct timeval tv;
787 uint64_t deadline = 0;
788
789 if (arp_timeout_tcall == NULL) {
790 arp_timeout_tcall =
791 thread_call_allocate(arp_timeout, NULL);
792 VERIFY(arp_timeout_tcall != NULL);
793 }
794
795 if (atv == NULL) {
796 tv.tv_usec = 0;
797 tv.tv_sec = MAX(arpt_prune / 5, 1);
798 atv = &tv;
799 }
800 if (arp_verbose) {
801 log(LOG_DEBUG, "%s: timer scheduled in "
802 "T+%llus.%lluu\n", __func__,
803 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
804 }
805 arp_timeout_run = 1;
806
807 clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
808 mach_absolute_time(), &deadline);
809 (void) thread_call_enter_delayed(arp_timeout_tcall, deadline);
810 }
811 }
812
813 /*
814 * Probe routine.
815 */
816 static void
arp_probe(thread_call_param_t arg0,thread_call_param_t arg1)817 arp_probe(thread_call_param_t arg0, thread_call_param_t arg1)
818 {
819 #pragma unused(arg0, arg1)
820 struct llinfo_arp *la, *ola;
821 struct timeval atv;
822 struct arptf_arg farg;
823
824 lck_mtx_lock(rnh_lock);
825 la = llinfo_arp.lh_first;
826 bzero(&farg, sizeof(farg));
827 farg.probing = TRUE;
828 while ((ola = la) != NULL) {
829 la = la->la_le.le_next;
830 arptfree(ola, &farg);
831 }
832 if (arp_verbose) {
833 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
834 "%u pkts held (%u bytes)\n", __func__, farg.found,
835 farg.aging, farg.sticky, farg.killed, farg.qlen,
836 farg.qsize);
837 }
838 atv.tv_usec = 0;
839 atv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
840 /* re-arm the probe if there's work to do */
841 arp_probe_run = 0;
842 if (farg.qlen > 0) {
843 arp_sched_probe(&atv);
844 } else if (arp_verbose) {
845 log(LOG_DEBUG, "%s: not rescheduling probe\n", __func__);
846 }
847 lck_mtx_unlock(rnh_lock);
848 }
849
850 static void
arp_sched_probe(struct timeval * atv)851 arp_sched_probe(struct timeval *atv)
852 {
853 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
854
855 if (!arp_probe_run) {
856 struct timeval tv;
857 uint64_t deadline = 0;
858
859 if (arp_probe_tcall == NULL) {
860 arp_probe_tcall =
861 thread_call_allocate(arp_probe, NULL);
862 VERIFY(arp_probe_tcall != NULL);
863 }
864
865 if (atv == NULL) {
866 tv.tv_usec = 0;
867 tv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
868 atv = &tv;
869 }
870 if (arp_verbose) {
871 log(LOG_DEBUG, "%s: probe scheduled in "
872 "T+%llus.%lluu\n", __func__,
873 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
874 }
875 arp_probe_run = 1;
876
877 clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
878 mach_absolute_time(), &deadline);
879 (void) thread_call_enter_delayed(arp_probe_tcall, deadline);
880 }
881 }
882
883 /*
884 * ifa_rtrequest() callback
885 */
886 static void
arp_rtrequest(int req,struct rtentry * rt,struct sockaddr * sa)887 arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa)
888 {
889 #pragma unused(sa)
890 struct sockaddr *gate = rt->rt_gateway;
891 struct llinfo_arp *__single la = rt->rt_llinfo;
892 static struct sockaddr_dl null_sdl =
893 { .sdl_len = sizeof(null_sdl), .sdl_family = AF_LINK };
894 uint64_t timenow;
895 char buf[MAX_IPv4_STR_LEN];
896
897 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
898 RT_LOCK_ASSERT_HELD(rt);
899
900 if (rt->rt_flags & RTF_GATEWAY) {
901 return;
902 }
903
904 timenow = net_uptime();
905 switch (req) {
906 case RTM_ADD:
907 /*
908 * XXX: If this is a manually added route to interface
909 * such as older version of routed or gated might provide,
910 * restore cloning bit.
911 */
912 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL &&
913 SIN(rt_mask(rt))->sin_addr.s_addr != INADDR_BROADCAST) {
914 rt->rt_flags |= RTF_CLONING;
915 }
916
917 if (rt->rt_flags & RTF_CLONING) {
918 /*
919 * Case 1: This route should come from a route to iface.
920 */
921 if (rt_setgate(rt, rt_key(rt), SA(&null_sdl)) == 0) {
922 gate = rt->rt_gateway;
923 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
924 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
925 /*
926 * In case we're called before 1.0 sec.
927 * has elapsed.
928 */
929 rt_setexpire(rt, MAX(timenow, 1));
930 }
931 break;
932 }
933 /* Announce a new entry if requested. */
934 if (rt->rt_flags & RTF_ANNOUNCE) {
935 if (la != NULL) {
936 arp_llreach_use(la); /* Mark use timestamp */
937 }
938 if ((rt->rt_ifp->if_flags & IFF_NOARP) == 0) {
939 RT_UNLOCK(rt);
940 dlil_send_arp(rt->rt_ifp, ARPOP_REQUEST,
941 SDL(gate), rt_key(rt), NULL, rt_key(rt), 0);
942 RT_LOCK(rt);
943 arpstat.txannounces++;
944 }
945 }
946 OS_FALLTHROUGH;
947 case RTM_RESOLVE:
948 if (gate->sa_family != AF_LINK ||
949 gate->sa_len < sizeof(null_sdl)) {
950 arpstat.invalidreqs++;
951 log(LOG_ERR, "%s: route to %s has bad gateway address "
952 "(sa_family %u sa_len %u) on %s\n",
953 __func__, inet_ntop(AF_INET,
954 &SIN(rt_key(rt))->sin_addr.s_addr, buf,
955 sizeof(buf)), gate->sa_family, gate->sa_len,
956 if_name(rt->rt_ifp));
957 break;
958 }
959 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
960 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
961
962 if (la != NULL) {
963 break; /* This happens on a route change */
964 }
965 /*
966 * Case 2: This route may come from cloning, or a manual route
967 * add with a LL address.
968 */
969 rt->rt_llinfo = la = arp_llinfo_alloc(Z_WAITOK);
970
971 rt->rt_llinfo_get_ri = arp_llinfo_get_ri;
972 rt->rt_llinfo_get_iflri = arp_llinfo_get_iflri;
973 rt->rt_llinfo_purge = arp_llinfo_purge;
974 rt->rt_llinfo_free = arp_llinfo_free;
975 rt->rt_llinfo_refresh = arp_llinfo_refresh;
976 rt->rt_flags |= RTF_LLINFO;
977 la->la_rt = rt;
978 LIST_INSERT_HEAD(&llinfo_arp, la, la_le);
979 arpstat.inuse++;
980
981 /* We have at least one entry; arm the timer if not already */
982 arp_sched_timeout(NULL);
983
984 /*
985 * This keeps the multicast addresses from showing up
986 * in `arp -a' listings as unresolved. It's not actually
987 * functional. Then the same for broadcast. For IPv4
988 * link-local address, keep the entry around even after
989 * it has expired.
990 */
991 if (IN_MULTICAST(ntohl(SIN(rt_key(rt))->sin_addr.s_addr))) {
992 RT_UNLOCK(rt);
993 dlil_resolve_multi(rt->rt_ifp, rt_key(rt), gate,
994 sizeof(struct sockaddr_dl));
995 RT_LOCK(rt);
996 rt_setexpire(rt, 0);
997 } else if (in_broadcast(SIN(rt_key(rt))->sin_addr,
998 rt->rt_ifp)) {
999 struct sockaddr_dl *gate_ll = SDL(gate);
1000 size_t broadcast_len;
1001 int ret = ifnet_llbroadcast_copy_bytes(rt->rt_ifp,
1002 LLADDR(gate_ll), sizeof(gate_ll->sdl_data),
1003 &broadcast_len);
1004 if (ret == 0 && broadcast_len <= UINT8_MAX) {
1005 gate_ll->sdl_alen = (u_char)broadcast_len;
1006 gate_ll->sdl_family = AF_LINK;
1007 gate_ll->sdl_len = sizeof(struct sockaddr_dl);
1008 }
1009 /* In case we're called before 1.0 sec. has elapsed */
1010 rt_setexpire(rt, MAX(timenow, 1));
1011 } else if (IN_LINKLOCAL(ntohl(SIN(rt_key(rt))->
1012 sin_addr.s_addr))) {
1013 rt->rt_flags |= RTF_STATIC;
1014 }
1015
1016 /* Set default maximum number of retries */
1017 la->la_maxtries = arp_maxtries;
1018
1019 /* Become a regular mutex, just in case */
1020 RT_CONVERT_LOCK(rt);
1021 IFA_LOCK_SPIN(rt->rt_ifa);
1022 if (SIN(rt_key(rt))->sin_addr.s_addr ==
1023 (IA_SIN(rt->rt_ifa))->sin_addr.s_addr) {
1024 IFA_UNLOCK(rt->rt_ifa);
1025 /*
1026 * This test used to be
1027 * if (loif.if_flags & IFF_UP)
1028 * It allowed local traffic to be forced through the
1029 * hardware by configuring the loopback down. However,
1030 * it causes problems during network configuration
1031 * for boards that can't receive packets they send.
1032 * It is now necessary to clear "useloopback" and
1033 * remove the route to force traffic out to the
1034 * hardware.
1035 */
1036 rt_setexpire(rt, 0);
1037 struct sockaddr_dl *gate_ll = SDL(gate);
1038 ifnet_lladdr_copy_bytes(rt->rt_ifp, LLADDR(gate_ll),
1039 SDL(gate)->sdl_alen = rt->rt_ifp->if_addrlen);
1040 if (useloopback) {
1041 if (rt->rt_ifp != lo_ifp) {
1042 /*
1043 * Purge any link-layer info caching.
1044 */
1045 if (rt->rt_llinfo_purge != NULL) {
1046 rt->rt_llinfo_purge(rt);
1047 }
1048
1049 /*
1050 * Adjust route ref count for the
1051 * interfaces.
1052 */
1053 if (rt->rt_if_ref_fn != NULL) {
1054 rt->rt_if_ref_fn(lo_ifp, 1);
1055 rt->rt_if_ref_fn(rt->rt_ifp, -1);
1056 }
1057 }
1058 rt->rt_ifp = lo_ifp;
1059 /*
1060 * If rmx_mtu is not locked, update it
1061 * to the MTU used by the new interface.
1062 */
1063 if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) {
1064 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
1065 }
1066 }
1067 } else {
1068 IFA_UNLOCK(rt->rt_ifa);
1069 }
1070 break;
1071
1072 case RTM_DELETE:
1073 if (la == NULL) {
1074 break;
1075 }
1076 /*
1077 * Unchain it but defer the actual freeing until the route
1078 * itself is to be freed. rt->rt_llinfo still points to
1079 * llinfo_arp, and likewise, la->la_rt still points to this
1080 * route entry, except that RTF_LLINFO is now cleared.
1081 */
1082 LIST_REMOVE(la, la_le);
1083 la->la_le.le_next = NULL;
1084 la->la_le.le_prev = NULL;
1085 arpstat.inuse--;
1086
1087 /*
1088 * Purge any link-layer info caching.
1089 */
1090 if (rt->rt_llinfo_purge != NULL) {
1091 rt->rt_llinfo_purge(rt);
1092 }
1093
1094 rt->rt_flags &= ~RTF_LLINFO;
1095 (void) arp_llinfo_flushq(la);
1096 }
1097 }
1098
1099 /*
1100 * convert hardware address to hex string for logging errors.
1101 */
1102 static const char *__bidi_indexable
sdl_addr_to_hex(const struct sockaddr_dl * sdl_orig,char * __sized_by (buflen)orig_buf,int buflen)1103 sdl_addr_to_hex(const struct sockaddr_dl *sdl_orig,
1104 char *__sized_by(buflen)orig_buf, int buflen)
1105 {
1106 char *buf = orig_buf;
1107 int i;
1108 const struct sockaddr_dl *sdl = SDL(sdl_orig);
1109 const uint8_t *lladdr = CONST_LLADDR(sdl);
1110 int maxbytes = buflen / 3;
1111
1112 if (maxbytes > sdl->sdl_alen) {
1113 maxbytes = sdl->sdl_alen;
1114 }
1115 *buf = '\0';
1116 for (i = 0; i < maxbytes; i++) {
1117 snprintf(buf, 3, "%02x", lladdr[i]);
1118 buf += 2;
1119 *buf = (i == maxbytes - 1) ? '\0' : ':';
1120 buf++;
1121 }
1122 return orig_buf;
1123 }
1124
1125 /*
1126 * arp_lookup_route will lookup the route for a given address.
1127 *
1128 * The address must be for a host on a local network on this interface.
1129 * If the returned route is non-NULL, the route is locked and the caller
1130 * is responsible for unlocking it and releasing its reference.
1131 */
1132 static errno_t
arp_lookup_route(const struct in_addr * addr,int create,int proxy,route_t * route,unsigned int ifscope)1133 arp_lookup_route(const struct in_addr *addr, int create, int proxy,
1134 route_t *route, unsigned int ifscope)
1135 {
1136 struct sockaddr_inarp sin =
1137 { sizeof(sin), AF_INET, 0, { 0 }, { 0 }, 0, 0 };
1138 const char *why = NULL;
1139 errno_t error = 0;
1140 route_t rt;
1141
1142 *route = NULL;
1143
1144 sin.sin_addr.s_addr = addr->s_addr;
1145 sin.sin_other = proxy ? SIN_PROXY : 0;
1146
1147 /*
1148 * If the destination is a link-local address, don't
1149 * constrain the lookup (don't scope it).
1150 */
1151 if (IN_LINKLOCAL(ntohl(addr->s_addr))) {
1152 ifscope = IFSCOPE_NONE;
1153 }
1154
1155 rt = rtalloc1_scoped(SA(&sin), create, 0, ifscope);
1156 if (rt == NULL) {
1157 return ENETUNREACH;
1158 }
1159
1160 RT_LOCK(rt);
1161
1162 if (rt->rt_flags & RTF_GATEWAY) {
1163 why = "host is not on local network";
1164 error = ENETUNREACH;
1165 } else if (!(rt->rt_flags & RTF_LLINFO)) {
1166 why = "could not allocate llinfo";
1167 error = ENOMEM;
1168 } else if (rt->rt_gateway->sa_family != AF_LINK) {
1169 why = "gateway route is not ours";
1170 error = EPROTONOSUPPORT;
1171 }
1172
1173 if (error != 0) {
1174 if (create && (arp_verbose || log_arp_warnings)) {
1175 char tmp[MAX_IPv4_STR_LEN];
1176 log(LOG_DEBUG, "%s: link#%d %s failed: %s\n",
1177 __func__, ifscope, inet_ntop(AF_INET, addr, tmp,
1178 sizeof(tmp)), why);
1179 }
1180
1181 /*
1182 * If there are no references to this route, and it is
1183 * a cloned route, and not static, and ARP had created
1184 * the route, then purge it from the routing table as
1185 * it is probably bogus.
1186 */
1187 if (rt->rt_refcnt == 1 &&
1188 (rt->rt_flags & (RTF_WASCLONED | RTF_STATIC)) ==
1189 RTF_WASCLONED) {
1190 /*
1191 * Prevent another thread from modiying rt_key,
1192 * rt_gateway via rt_setgate() after rt_lock is
1193 * dropped by marking the route as defunct.
1194 */
1195 rt->rt_flags |= RTF_CONDEMNED;
1196 RT_UNLOCK(rt);
1197 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1198 rt_mask(rt), rt->rt_flags, NULL);
1199 rtfree(rt);
1200 } else {
1201 RT_REMREF_LOCKED(rt);
1202 RT_UNLOCK(rt);
1203 }
1204 return error;
1205 }
1206
1207 /*
1208 * Caller releases reference and does RT_UNLOCK(rt).
1209 */
1210 *route = rt;
1211 return 0;
1212 }
1213
1214 boolean_t
arp_is_entry_probing(route_t p_route)1215 arp_is_entry_probing(route_t p_route)
1216 {
1217 struct llinfo_arp *__single llinfo = p_route->rt_llinfo;
1218
1219 if (llinfo != NULL &&
1220 llinfo->la_llreach != NULL &&
1221 llinfo->la_llreach->lr_probes != 0) {
1222 return TRUE;
1223 }
1224
1225 return FALSE;
1226 }
1227
1228 __attribute__((noinline))
1229 static void
post_kev_in_arpfailure(struct ifnet * ifp)1230 post_kev_in_arpfailure(struct ifnet *ifp)
1231 {
1232 struct kev_msg ev_msg = {};
1233 struct kev_in_arpfailure in_arpfailure = {};
1234
1235 in_arpfailure.link_data.if_family = ifp->if_family;
1236 in_arpfailure.link_data.if_unit = ifp->if_unit;
1237 strlcpy(in_arpfailure.link_data.if_name, ifp->if_name, IFNAMSIZ);
1238 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1239 ev_msg.kev_class = KEV_NETWORK_CLASS;
1240 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1241 ev_msg.event_code = KEV_INET_ARPRTRFAILURE;
1242 ev_msg.dv[0].data_ptr = &in_arpfailure;
1243 ev_msg.dv[0].data_length = sizeof(struct kev_in_arpfailure);
1244 dlil_post_complete_msg(NULL, &ev_msg);
1245 }
1246
1247 __attribute__((noinline))
1248 static void
arp_send_probe_notification(route_t route)1249 arp_send_probe_notification(route_t route)
1250 {
1251 route_event_enqueue_nwk_wq_entry(route, NULL,
1252 ROUTE_LLENTRY_PROBED, NULL, TRUE);
1253
1254 if (route->rt_flags & RTF_ROUTER) {
1255 struct radix_node_head *rnh = NULL;
1256 struct route_event rt_ev;
1257 route_event_init(&rt_ev, route, NULL, ROUTE_LLENTRY_PROBED);
1258 /*
1259 * We already have a reference on rt. The function
1260 * frees it before returning.
1261 */
1262 RT_UNLOCK(route);
1263 lck_mtx_lock(rnh_lock);
1264 rnh = rt_tables[AF_INET];
1265
1266 if (rnh != NULL) {
1267 (void) rnh->rnh_walktree(rnh,
1268 route_event_walktree, (void *)&rt_ev);
1269 }
1270 lck_mtx_unlock(rnh_lock);
1271 RT_LOCK(route);
1272 }
1273 }
1274
1275 /*
1276 * This is the ARP pre-output routine; care must be taken to ensure that
1277 * the "hint" route never gets freed via rtfree(), since the caller may
1278 * have stored it inside a struct route with a reference held for that
1279 * placeholder.
1280 */
1281 errno_t
arp_lookup_ip(ifnet_t ifp,const struct sockaddr_in * net_dest,struct sockaddr_dl * __sized_by (ll_dest_len)ll_dest,size_t ll_dest_len,route_t hint,mbuf_t packet)1282 arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest,
1283 struct sockaddr_dl *__sized_by(ll_dest_len)ll_dest,
1284 size_t ll_dest_len, route_t hint,
1285 mbuf_t packet)
1286 {
1287 route_t route __single = NULL; /* output route */
1288 errno_t result = 0;
1289 struct sockaddr_dl *gateway;
1290 struct llinfo_arp *__single llinfo = NULL;
1291 boolean_t usable, probing = FALSE;
1292 uint64_t timenow;
1293 struct if_llreach *lr;
1294 struct ifaddr *rt_ifa;
1295 struct sockaddr *sa;
1296 uint32_t rtflags;
1297 struct sockaddr_dl sdl = {};
1298 boolean_t send_probe_notif = FALSE;
1299 boolean_t enqueued = FALSE;
1300
1301 if (ifp == NULL || net_dest == NULL) {
1302 return EINVAL;
1303 }
1304
1305 if (net_dest->sin_family != AF_INET) {
1306 return EAFNOSUPPORT;
1307 }
1308
1309 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) {
1310 return ENETDOWN;
1311 }
1312
1313 /*
1314 * If we were given a route, verify the route and grab the gateway
1315 */
1316 if (hint != NULL) {
1317 /*
1318 * Callee holds a reference on the route and returns
1319 * with the route entry locked, upon success.
1320 */
1321 result = route_to_gwroute(SA(net_dest), hint, &route);
1322 if (result != 0) {
1323 return result;
1324 }
1325 if (route != NULL) {
1326 RT_LOCK_ASSERT_HELD(route);
1327 }
1328 }
1329
1330 if ((packet != NULL && (packet->m_flags & M_BCAST)) ||
1331 in_broadcast(net_dest->sin_addr, ifp)) {
1332 size_t broadcast_len;
1333 SOCKADDR_ZERO(ll_dest, ll_dest_len);
1334 result = ifnet_llbroadcast_copy_bytes(ifp, LLADDR(ll_dest),
1335 ll_dest_len - offsetof(struct sockaddr_dl, sdl_data),
1336 &broadcast_len);
1337 if (result == 0 && broadcast_len <= UINT8_MAX) {
1338 ll_dest->sdl_alen = (u_char)broadcast_len;
1339 ll_dest->sdl_family = AF_LINK;
1340 ll_dest->sdl_len = sizeof(struct sockaddr_dl);
1341 }
1342 goto release;
1343 }
1344 if ((packet != NULL && (packet->m_flags & M_MCAST)) ||
1345 ((ifp->if_flags & IFF_MULTICAST) &&
1346 IN_MULTICAST(ntohl(net_dest->sin_addr.s_addr)))) {
1347 if (route != NULL) {
1348 RT_UNLOCK(route);
1349 }
1350 result = dlil_resolve_multi(ifp,
1351 SA(net_dest),
1352 SA(ll_dest), ll_dest_len);
1353 if (route != NULL) {
1354 RT_LOCK(route);
1355 }
1356 goto release;
1357 }
1358
1359 /*
1360 * If we didn't find a route, or the route doesn't have
1361 * link layer information, trigger the creation of the
1362 * route and link layer information.
1363 */
1364 if (route == NULL || route->rt_llinfo == NULL) {
1365 /* Clean up now while we can */
1366 if (route != NULL) {
1367 if (route == hint) {
1368 RT_REMREF_LOCKED(route);
1369 RT_UNLOCK(route);
1370 } else {
1371 RT_UNLOCK(route);
1372 rtfree(route);
1373 }
1374 }
1375 /*
1376 * Callee holds a reference on the route and returns
1377 * with the route entry locked, upon success.
1378 */
1379 result = arp_lookup_route(&net_dest->sin_addr, 1, 0, &route,
1380 ifp->if_index);
1381 if (result == 0) {
1382 RT_LOCK_ASSERT_HELD(route);
1383 }
1384 }
1385
1386 if (result || route == NULL || (llinfo = route->rt_llinfo) == NULL) {
1387 /* In case result is 0 but no route, return an error */
1388 if (result == 0) {
1389 result = EHOSTUNREACH;
1390 }
1391
1392 if (route != NULL && route->rt_llinfo == NULL) {
1393 char tmp[MAX_IPv4_STR_LEN];
1394 log(LOG_ERR, "%s: can't allocate llinfo for %s\n",
1395 __func__, inet_ntop(AF_INET, &net_dest->sin_addr,
1396 tmp, sizeof(tmp)));
1397 }
1398 goto release;
1399 }
1400
1401 if ((ifp->if_flags & IFF_NOARP) != 0) {
1402 result = ENOTSUP;
1403 goto release;
1404 }
1405
1406 /*
1407 * Now that we have the right route, is it filled in?
1408 */
1409 gateway = SDL(route->rt_gateway);
1410 timenow = net_uptime();
1411 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1412 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1413
1414 usable = ((route->rt_expire == 0 || route->rt_expire > timenow) &&
1415 gateway != NULL && gateway->sdl_family == AF_LINK &&
1416 gateway->sdl_alen != 0);
1417
1418 if (usable) {
1419 boolean_t unreachable = !arp_llreach_reachable(llinfo);
1420
1421 /* Entry is usable, so fill in info for caller */
1422 SOCKADDR_COPY(gateway, ll_dest, MIN(gateway->sdl_len, ll_dest_len));
1423 result = 0;
1424 arp_llreach_use(llinfo); /* Mark use timestamp */
1425
1426 lr = llinfo->la_llreach;
1427 if (lr == NULL) {
1428 goto release;
1429 }
1430 rt_ifa = route->rt_ifa;
1431
1432 if (unreachable || (llinfo->la_flags & LLINFO_PROBING)) {
1433 /* Become a regular mutex, just in case */
1434 RT_CONVERT_LOCK(route);
1435 IFLR_LOCK_SPIN(lr);
1436
1437 if (lr->lr_probes < arp_unicast_lim) {
1438 /*
1439 * Thus mark the entry with la_probeexp deadline to
1440 * trigger the probe timer to be scheduled (if not
1441 * already). This gets cleared the moment we get
1442 * an ARP reply.
1443 */
1444 probing = TRUE;
1445 if (lr->lr_probes == 0) {
1446 llinfo->la_probeexp = (timenow + arpt_probe);
1447 llinfo->la_flags |= LLINFO_PROBING;
1448 /*
1449 * Provide notification that ARP unicast
1450 * probing has started.
1451 * We only do it for the first unicast probe
1452 * attempt.
1453 */
1454 send_probe_notif = TRUE;
1455 }
1456
1457 /*
1458 * Start the unicast probe and anticipate a reply;
1459 * afterwards, return existing entry to caller and
1460 * let it be used anyway. If peer is non-existent
1461 * we'll broadcast ARP next time around.
1462 */
1463 lr->lr_probes++;
1464 SOCKADDR_ZERO(&sdl, sizeof(sdl));
1465 sdl.sdl_alen = ifp->if_addrlen;
1466 bcopy(&lr->lr_key.addr, LLADDR(&sdl),
1467 ifp->if_addrlen);
1468 IFLR_UNLOCK(lr);
1469 IFA_LOCK_SPIN(rt_ifa);
1470 ifa_addref(rt_ifa);
1471 sa = rt_ifa->ifa_addr;
1472 IFA_UNLOCK(rt_ifa);
1473 rtflags = route->rt_flags;
1474 RT_UNLOCK(route);
1475 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1476 SDL(&sdl),
1477 SA(net_dest), rtflags);
1478 ifa_remref(rt_ifa);
1479 RT_LOCK(route);
1480 goto release;
1481 }
1482
1483 IFLR_UNLOCK(lr);
1484 }
1485 if (!unreachable &&
1486 !(llinfo->la_flags & LLINFO_PROBING)) {
1487 /*
1488 * Normal case where peer is still reachable,
1489 * we're not probing and if_addrlen is anything
1490 * but IF_LLREACH_MAXLEN.
1491 */
1492 goto release;
1493 }
1494 }
1495
1496 /*
1497 * Route wasn't complete/valid; we need to send out ARP request.
1498 * If we've exceeded the limit of la_holdq, drop from the head
1499 * of queue and add this packet to the tail. If we end up with
1500 * RTF_REJECT below, we'll dequeue this from tail and have the
1501 * caller free the packet instead. It's safe to do that since
1502 * we still hold the route's rt_lock.
1503 */
1504 if (packet != NULL) {
1505 enqueued = arp_llinfo_addq(llinfo, packet);
1506 } else {
1507 llinfo->la_prbreq_cnt++;
1508 }
1509 /*
1510 * Regardless of permanent vs. expirable entry, we need to
1511 * avoid having packets sit in la_holdq forever; thus mark the
1512 * entry with la_probeexp deadline to trigger the probe timer
1513 * to be scheduled (if not already). This gets cleared the
1514 * moment we get an ARP reply.
1515 */
1516 probing = TRUE;
1517 if ((qlen(&llinfo->la_holdq) + llinfo->la_prbreq_cnt) == 1) {
1518 llinfo->la_probeexp = (timenow + arpt_probe);
1519 llinfo->la_flags |= LLINFO_PROBING;
1520 }
1521
1522 if (route->rt_expire) {
1523 route->rt_flags &= ~RTF_REJECT;
1524 if (llinfo->la_asked == 0 || route->rt_expire != timenow) {
1525 rt_setexpire(route, timenow);
1526 if (llinfo->la_asked++ < llinfo->la_maxtries) {
1527 boolean_t sendkev = FALSE;
1528
1529 rt_ifa = route->rt_ifa;
1530 lr = llinfo->la_llreach;
1531 /* Become a regular mutex, just in case */
1532 RT_CONVERT_LOCK(route);
1533 /* Update probe count, if applicable */
1534 if (lr != NULL) {
1535 IFLR_LOCK_SPIN(lr);
1536 lr->lr_probes++;
1537 IFLR_UNLOCK(lr);
1538 }
1539 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
1540 route->rt_flags & RTF_ROUTER &&
1541 llinfo->la_asked > 1) {
1542 sendkev = TRUE;
1543 llinfo->la_flags |= LLINFO_RTRFAIL_EVTSENT;
1544 }
1545 IFA_LOCK_SPIN(rt_ifa);
1546 ifa_addref(rt_ifa);
1547 sa = rt_ifa->ifa_addr;
1548 IFA_UNLOCK(rt_ifa);
1549 arp_llreach_use(llinfo); /* Mark use tstamp */
1550 rtflags = route->rt_flags;
1551 RT_UNLOCK(route);
1552 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1553 NULL, SA(net_dest),
1554 rtflags);
1555 ifa_remref(rt_ifa);
1556 if (sendkev) {
1557 post_kev_in_arpfailure(ifp);
1558 }
1559 RT_LOCK(route);
1560 goto release_just_return;
1561 } else {
1562 route->rt_flags |= RTF_REJECT;
1563 rt_setexpire(route,
1564 route->rt_expire + arpt_down);
1565 llinfo->la_asked = 0;
1566 /*
1567 * Remove the packet that was just added above;
1568 * don't free it since we're not returning
1569 * EJUSTRETURN. The caller will handle the
1570 * freeing. Since we haven't dropped rt_lock
1571 * from the time of _addq() above, this packet
1572 * must be at the tail.
1573 */
1574 if (packet != NULL && enqueued) {
1575 classq_pkt_t pkt =
1576 CLASSQ_PKT_INITIALIZER(pkt);
1577
1578 _getq_tail(&llinfo->la_holdq, &pkt);
1579 os_atomic_dec(&arpstat.held, relaxed);
1580 VERIFY(pkt.cp_mbuf == packet);
1581 }
1582 result = EHOSTUNREACH;
1583 /*
1584 * Enqueue work item to invoke callback for this route entry
1585 */
1586 route_event_enqueue_nwk_wq_entry(route, NULL,
1587 ROUTE_LLENTRY_UNREACH, NULL, TRUE);
1588 goto release;
1589 }
1590 }
1591 }
1592
1593
1594 release_just_return:
1595 /* The packet is now held inside la_holdq or dropped */
1596 result = EJUSTRETURN;
1597 if (packet != NULL && !enqueued) {
1598 m_freem(packet);
1599 packet = NULL;
1600 }
1601
1602 release:
1603 if (result == EHOSTUNREACH) {
1604 os_atomic_inc(&arpstat.dropped, relaxed);
1605 }
1606
1607 if (route != NULL) {
1608 /* Set qset id only if there are traffic rules. Else, for bridge
1609 * use cases, the flag will be set and traffic rules won't be
1610 * run on the downstream interface.
1611 */
1612 if (result == 0 && ifp->if_eth_traffic_rule_count) {
1613 uint64_t qset_id = rt_lookup_qset_id(route, true);
1614 if (packet != NULL) {
1615 packet->m_pkthdr.pkt_ext_flags |= PKTF_EXT_QSET_ID_VALID;
1616 packet->m_pkthdr.pkt_mpriv_qsetid = qset_id;
1617 }
1618 }
1619
1620 if (send_probe_notif) {
1621 arp_send_probe_notification(route);
1622 }
1623
1624 if (route == hint) {
1625 RT_REMREF_LOCKED(route);
1626 RT_UNLOCK(route);
1627 } else {
1628 RT_UNLOCK(route);
1629 rtfree(route);
1630 }
1631 }
1632 if (probing) {
1633 /* Do this after we drop rt_lock to preserve ordering */
1634 lck_mtx_lock(rnh_lock);
1635 arp_sched_probe(NULL);
1636 lck_mtx_unlock(rnh_lock);
1637 }
1638 return result;
1639 }
1640
1641 errno_t
arp_ip_handle_input(ifnet_t ifp,u_short arpop,const struct sockaddr_dl * sender_hw_orig,const struct sockaddr_in * sender_ip,const struct sockaddr_in * target_ip)1642 arp_ip_handle_input(ifnet_t ifp, u_short arpop,
1643 const struct sockaddr_dl *sender_hw_orig, const struct sockaddr_in *sender_ip,
1644 const struct sockaddr_in *target_ip)
1645 {
1646 char ipv4str[MAX_IPv4_STR_LEN];
1647 struct sockaddr_dl proxied = {};
1648 struct sockaddr_dl *gateway, *target_hw = NULL;
1649 struct ifaddr *ifa;
1650 struct in_ifaddr *ia;
1651 struct in_ifaddr *best_ia = NULL;
1652 struct sockaddr_in best_ia_sin;
1653 route_t __single route = NULL;
1654 char buf[3 * MAX_HW_LEN]; /* enough for MAX_HW_LEN byte hw address */
1655 struct llinfo_arp *__single llinfo;
1656 errno_t error;
1657 int created_announcement = 0;
1658 int bridged = 0, is_bridge = 0;
1659 uint32_t rt_evcode = 0;
1660
1661 /*
1662 * Forge the sender_hw sockaddr to extract the
1663 * complete hardware address.
1664 */
1665 const struct sockaddr_dl *sender_hw = SDL(sender_hw_orig);
1666 /*
1667 * Here and other places within this routine where we don't hold
1668 * rnh_lock, trade accuracy for speed for the common scenarios
1669 * and avoid the use of atomic updates.
1670 */
1671 arpstat.received++;
1672
1673 /* Do not respond to requests for 0.0.0.0 */
1674 if (target_ip->sin_addr.s_addr == INADDR_ANY && arpop == ARPOP_REQUEST) {
1675 goto done;
1676 }
1677
1678 if (ifp->if_bridge) {
1679 bridged = 1;
1680 }
1681 if (ifp->if_type == IFT_BRIDGE) {
1682 is_bridge = 1;
1683 }
1684
1685 if (arpop == ARPOP_REPLY) {
1686 arpstat.rxreplies++;
1687 }
1688
1689 /*
1690 * Determine if this ARP is for us
1691 */
1692 lck_rw_lock_shared(&in_ifaddr_rwlock);
1693 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr), ia_hash) {
1694 IFA_LOCK_SPIN(&ia->ia_ifa);
1695 if (ia->ia_ifp == ifp &&
1696 ia->ia_addr.sin_addr.s_addr == target_ip->sin_addr.s_addr) {
1697 best_ia = ia;
1698 best_ia_sin = best_ia->ia_addr;
1699 ifa_addref(&ia->ia_ifa);
1700 IFA_UNLOCK(&ia->ia_ifa);
1701 lck_rw_done(&in_ifaddr_rwlock);
1702 goto match;
1703 }
1704 IFA_UNLOCK(&ia->ia_ifa);
1705 }
1706
1707 TAILQ_FOREACH(ia, INADDR_HASH(sender_ip->sin_addr.s_addr), ia_hash) {
1708 IFA_LOCK_SPIN(&ia->ia_ifa);
1709 if (ia->ia_ifp == ifp &&
1710 ia->ia_addr.sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1711 best_ia = ia;
1712 best_ia_sin = best_ia->ia_addr;
1713 ifa_addref(&ia->ia_ifa);
1714 IFA_UNLOCK(&ia->ia_ifa);
1715 lck_rw_done(&in_ifaddr_rwlock);
1716 goto match;
1717 }
1718 IFA_UNLOCK(&ia->ia_ifa);
1719 }
1720
1721 #define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia) \
1722 (ia->ia_ifp->if_bridge == ifp->if_softc && \
1723 bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) == 0 && \
1724 addr == ia->ia_addr.sin_addr.s_addr)
1725 /*
1726 * Check the case when bridge shares its MAC address with
1727 * some of its children, so packets are claimed by bridge
1728 * itself (bridge_input() does it first), but they are really
1729 * meant to be destined to the bridge member.
1730 */
1731 if (is_bridge) {
1732 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr),
1733 ia_hash) {
1734 IFA_LOCK_SPIN(&ia->ia_ifa);
1735 if (BDG_MEMBER_MATCHES_ARP(target_ip->sin_addr.s_addr,
1736 ifp, ia)) {
1737 ifp = ia->ia_ifp;
1738 best_ia = ia;
1739 best_ia_sin = best_ia->ia_addr;
1740 ifa_addref(&ia->ia_ifa);
1741 IFA_UNLOCK(&ia->ia_ifa);
1742 lck_rw_done(&in_ifaddr_rwlock);
1743 goto match;
1744 }
1745 IFA_UNLOCK(&ia->ia_ifa);
1746 }
1747 }
1748 #undef BDG_MEMBER_MATCHES_ARP
1749 lck_rw_done(&in_ifaddr_rwlock);
1750
1751 /*
1752 * No match, use the first inet address on the receive interface
1753 * as a dummy address for the rest of the function; we may be
1754 * proxying for another address.
1755 */
1756 ifnet_lock_shared(ifp);
1757 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1758 IFA_LOCK_SPIN(ifa);
1759 if (ifa->ifa_addr->sa_family != AF_INET) {
1760 IFA_UNLOCK(ifa);
1761 continue;
1762 }
1763 best_ia = (struct in_ifaddr *__single)ifa;
1764 best_ia_sin = best_ia->ia_addr;
1765 ifa_addref(ifa);
1766 IFA_UNLOCK(ifa);
1767 ifnet_lock_done(ifp);
1768 goto match;
1769 }
1770 ifnet_lock_done(ifp);
1771
1772 /*
1773 * If we're not a bridge member, or if we are but there's no
1774 * IPv4 address to use for the interface, drop the packet.
1775 */
1776 if (!bridged || best_ia == NULL) {
1777 goto done;
1778 }
1779
1780 match:
1781 /* If the packet is from this interface, ignore the packet */
1782 if (bcmp(CONST_LLADDR(sender_hw), IF_LLADDR(ifp),
1783 sender_hw->sdl_alen) == 0) {
1784 goto done;
1785 }
1786
1787 /* Check for a conflict */
1788 if (!bridged &&
1789 sender_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr) {
1790 struct kev_msg ev_msg;
1791 struct kev_in_collision *in_collision;
1792 u_char storage[sizeof(struct kev_in_collision) + MAX_HW_LEN];
1793
1794 bzero(&ev_msg, sizeof(struct kev_msg));
1795 bzero(storage, (sizeof(struct kev_in_collision) + MAX_HW_LEN));
1796 in_collision = (struct kev_in_collision *)(void *)storage;
1797 log(LOG_ERR, "%s duplicate IP address %s sent from "
1798 "address %s\n", if_name(ifp),
1799 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
1800 sizeof(ipv4str)), sdl_addr_to_hex(sender_hw, buf,
1801 (int)sizeof(buf)));
1802
1803 /* Send a kernel event so anyone can learn of the conflict */
1804 in_collision->link_data.if_family = ifp->if_family;
1805 in_collision->link_data.if_unit = ifp->if_unit;
1806 strlcpy(&in_collision->link_data.if_name[0],
1807 ifp->if_name, IFNAMSIZ);
1808 in_collision->ia_ipaddr = sender_ip->sin_addr;
1809 in_collision->hw_len = (sender_hw->sdl_alen < MAX_HW_LEN) ?
1810 sender_hw->sdl_alen : MAX_HW_LEN;
1811 bcopy(CONST_LLADDR(sender_hw), (caddr_t)in_collision->hw_addr,
1812 in_collision->hw_len);
1813 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1814 ev_msg.kev_class = KEV_NETWORK_CLASS;
1815 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1816 ev_msg.event_code = KEV_INET_ARPCOLLISION;
1817 ev_msg.dv[0].data_ptr = in_collision;
1818 ev_msg.dv[0].data_length =
1819 sizeof(struct kev_in_collision) + in_collision->hw_len;
1820 ev_msg.dv[1].data_length = 0;
1821 dlil_post_complete_msg(NULL, &ev_msg);
1822 os_atomic_inc(&arpstat.dupips, relaxed);
1823 goto respond;
1824 }
1825
1826 /*
1827 * Look up the routing entry. If it doesn't exist and we are the
1828 * target, and the sender isn't 0.0.0.0, go ahead and create one.
1829 * Callee holds a reference on the route and returns with the route
1830 * entry locked, upon success.
1831 */
1832 error = arp_lookup_route(&sender_ip->sin_addr,
1833 (target_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr &&
1834 sender_ip->sin_addr.s_addr != 0), 0, &route, ifp->if_index);
1835
1836 if (error == 0) {
1837 RT_LOCK_ASSERT_HELD(route);
1838 }
1839
1840 if (error || route == NULL || route->rt_gateway == NULL) {
1841 if (arpop != ARPOP_REQUEST) {
1842 goto respond;
1843 }
1844
1845 if (arp_sendllconflict && send_conflicting_probes != 0 &&
1846 (ifp->if_eflags & IFEF_ARPLL) &&
1847 IN_LINKLOCAL(ntohl(target_ip->sin_addr.s_addr)) &&
1848 sender_ip->sin_addr.s_addr == INADDR_ANY) {
1849 /*
1850 * Verify this ARP probe doesn't conflict with
1851 * an IPv4LL we know of on another interface.
1852 */
1853 if (route != NULL) {
1854 RT_REMREF_LOCKED(route);
1855 RT_UNLOCK(route);
1856 route = NULL;
1857 }
1858 /*
1859 * Callee holds a reference on the route and returns
1860 * with the route entry locked, upon success.
1861 */
1862 error = arp_lookup_route(&target_ip->sin_addr, 0, 0,
1863 &route, ifp->if_index);
1864
1865 if (error != 0 || route == NULL ||
1866 route->rt_gateway == NULL) {
1867 goto respond;
1868 }
1869
1870 RT_LOCK_ASSERT_HELD(route);
1871
1872 gateway = SDL(route->rt_gateway);
1873 if (route->rt_ifp != ifp && gateway->sdl_alen != 0 &&
1874 (gateway->sdl_alen != sender_hw->sdl_alen ||
1875 bcmp(CONST_LLADDR(gateway), CONST_LLADDR(sender_hw),
1876 gateway->sdl_alen) != 0)) {
1877 /*
1878 * A node is probing for an IPv4LL we know
1879 * exists on a different interface. We respond
1880 * with a conflicting probe to force the new
1881 * device to pick a different IPv4LL address.
1882 */
1883 if (arp_verbose || log_arp_warnings) {
1884 log(LOG_INFO, "arp: %s on %s sent "
1885 "probe for %s, already on %s\n",
1886 sdl_addr_to_hex(sender_hw, buf,
1887 (int)sizeof(buf)), if_name(ifp),
1888 inet_ntop(AF_INET,
1889 &target_ip->sin_addr, ipv4str,
1890 sizeof(ipv4str)),
1891 if_name(route->rt_ifp));
1892 log(LOG_INFO, "arp: sending "
1893 "conflicting probe to %s on %s\n",
1894 sdl_addr_to_hex(sender_hw, buf,
1895 (int)sizeof(buf)), if_name(ifp));
1896 }
1897 /* Mark use timestamp */
1898 if (route->rt_llinfo != NULL) {
1899 arp_llreach_use(route->rt_llinfo);
1900 }
1901 /* We're done with the route */
1902 RT_REMREF_LOCKED(route);
1903 RT_UNLOCK(route);
1904 route = NULL;
1905 /*
1906 * Send a conservative unicast "ARP probe".
1907 * This should force the other device to pick
1908 * a new number. This will not force the
1909 * device to pick a new number if the device
1910 * has already assigned that number. This will
1911 * not imply to the device that we own that
1912 * address. The link address is always
1913 * present; it's never freed.
1914 */
1915 ifnet_lock_shared(ifp);
1916 ifa = ifp->if_lladdr;
1917 ifa_addref(ifa);
1918 ifnet_lock_done(ifp);
1919 dlil_send_arp_internal(ifp, ARPOP_REQUEST,
1920 SDL(ifa->ifa_addr),
1921 SA(sender_ip),
1922 sender_hw,
1923 SA(target_ip));
1924 ifa_remref(ifa);
1925 ifa = NULL;
1926 os_atomic_inc(&arpstat.txconflicts, relaxed);
1927 }
1928 goto respond;
1929 } else if (keep_announcements != 0 &&
1930 target_ip->sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1931 /*
1932 * Don't create entry if link-local address and
1933 * link-local is disabled
1934 */
1935 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1936 (ifp->if_eflags & IFEF_ARPLL)) {
1937 if (route != NULL) {
1938 RT_REMREF_LOCKED(route);
1939 RT_UNLOCK(route);
1940 route = NULL;
1941 }
1942 /*
1943 * Callee holds a reference on the route and
1944 * returns with the route entry locked, upon
1945 * success.
1946 */
1947 error = arp_lookup_route(&sender_ip->sin_addr,
1948 1, 0, &route, ifp->if_index);
1949
1950 if (error == 0) {
1951 RT_LOCK_ASSERT_HELD(route);
1952 }
1953
1954 if (error == 0 && route != NULL &&
1955 route->rt_gateway != NULL) {
1956 created_announcement = 1;
1957 }
1958 }
1959 if (created_announcement == 0) {
1960 goto respond;
1961 }
1962 } else {
1963 goto respond;
1964 }
1965 }
1966
1967 RT_LOCK_ASSERT_HELD(route);
1968 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1969 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1970
1971 gateway = SDL(route->rt_gateway);
1972 if (!bridged && route->rt_ifp != ifp) {
1973 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1974 !(ifp->if_eflags & IFEF_ARPLL)) {
1975 if (arp_verbose || log_arp_warnings) {
1976 log(LOG_ERR, "arp: %s is on %s but got "
1977 "reply from %s on %s\n",
1978 inet_ntop(AF_INET, &sender_ip->sin_addr,
1979 ipv4str, sizeof(ipv4str)),
1980 if_name(route->rt_ifp),
1981 sdl_addr_to_hex(sender_hw, buf,
1982 (int)sizeof(buf)), if_name(ifp));
1983 }
1984 goto respond;
1985 } else {
1986 /* Don't change a permanent address */
1987 if (route->rt_expire == 0) {
1988 goto respond;
1989 }
1990
1991 /*
1992 * We're about to check and/or change the route's ifp
1993 * and ifa, so do the lock dance: drop rt_lock, hold
1994 * rnh_lock and re-hold rt_lock to avoid violating the
1995 * lock ordering. We have an extra reference on the
1996 * route, so it won't go away while we do this.
1997 */
1998 RT_UNLOCK(route);
1999 lck_mtx_lock(rnh_lock);
2000 RT_LOCK(route);
2001 /*
2002 * Don't change the cloned route away from the
2003 * parent's interface if the address did resolve
2004 * or if the route is defunct. rt_ifp on both
2005 * the parent and the clone can now be freely
2006 * accessed now that we have acquired rnh_lock.
2007 */
2008 gateway = SDL(route->rt_gateway);
2009 if ((gateway->sdl_alen != 0 &&
2010 route->rt_parent != NULL &&
2011 route->rt_parent->rt_ifp == route->rt_ifp) ||
2012 (route->rt_flags & RTF_CONDEMNED)) {
2013 RT_REMREF_LOCKED(route);
2014 RT_UNLOCK(route);
2015 route = NULL;
2016 lck_mtx_unlock(rnh_lock);
2017 goto respond;
2018 }
2019 if (route->rt_ifp != ifp) {
2020 /*
2021 * Purge any link-layer info caching.
2022 */
2023 if (route->rt_llinfo_purge != NULL) {
2024 route->rt_llinfo_purge(route);
2025 }
2026
2027 /* Adjust route ref count for the interfaces */
2028 if (route->rt_if_ref_fn != NULL) {
2029 route->rt_if_ref_fn(ifp, 1);
2030 route->rt_if_ref_fn(route->rt_ifp, -1);
2031 }
2032 }
2033 /* Change the interface when the existing route is on */
2034 route->rt_ifp = ifp;
2035 /*
2036 * If rmx_mtu is not locked, update it
2037 * to the MTU used by the new interface.
2038 */
2039 if (!(route->rt_rmx.rmx_locks & RTV_MTU)) {
2040 route->rt_rmx.rmx_mtu = route->rt_ifp->if_mtu;
2041 if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
2042 route->rt_rmx.rmx_mtu = IN6_LINKMTU(route->rt_ifp);
2043 /* Further adjust the size for CLAT46 expansion */
2044 route->rt_rmx.rmx_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
2045 }
2046 }
2047
2048 rtsetifa(route, &best_ia->ia_ifa);
2049 gateway->sdl_index = ifp->if_index;
2050 RT_UNLOCK(route);
2051 lck_mtx_unlock(rnh_lock);
2052 RT_LOCK(route);
2053 /* Don't bother if the route is down */
2054 if (!(route->rt_flags & RTF_UP)) {
2055 goto respond;
2056 }
2057 /* Refresh gateway pointer */
2058 gateway = SDL(route->rt_gateway);
2059 }
2060 RT_LOCK_ASSERT_HELD(route);
2061 }
2062
2063 if (gateway->sdl_alen != 0 && bcmp(LLADDR(gateway),
2064 CONST_LLADDR(sender_hw), gateway->sdl_alen) != 0) {
2065 if (route->rt_expire != 0 &&
2066 (arp_verbose || log_arp_warnings)) {
2067 char buf2[3 * MAX_HW_LEN];
2068 log(LOG_INFO, "arp: %s moved from %s to %s on %s\n",
2069 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
2070 sizeof(ipv4str)),
2071 sdl_addr_to_hex(gateway, buf, (int)sizeof(buf)),
2072 sdl_addr_to_hex(sender_hw, buf2, (int)sizeof(buf2)),
2073 if_name(ifp));
2074 } else if (route->rt_expire == 0) {
2075 if (arp_verbose || log_arp_warnings) {
2076 log(LOG_ERR, "arp: %s attempts to modify "
2077 "permanent entry for %s on %s\n",
2078 sdl_addr_to_hex(sender_hw, buf,
2079 (int)sizeof(buf)),
2080 inet_ntop(AF_INET, &sender_ip->sin_addr,
2081 ipv4str, sizeof(ipv4str)),
2082 if_name(ifp));
2083 }
2084 goto respond;
2085 }
2086 }
2087
2088 /* Copy the sender hardware address in to the route's gateway address */
2089 gateway->sdl_alen = sender_hw->sdl_alen;
2090 bcopy(CONST_LLADDR(sender_hw), LLADDR(gateway), gateway->sdl_alen);
2091
2092 /* Update the expire time for the route and clear the reject flag */
2093 if (route->rt_expire != 0) {
2094 rt_setexpire(route, net_uptime() + arpt_keep);
2095 }
2096 route->rt_flags &= ~RTF_REJECT;
2097
2098 /* cache the gateway (sender HW) address */
2099 arp_llreach_alloc(route, ifp, LLADDR(gateway), gateway->sdl_alen,
2100 (arpop == ARPOP_REPLY), &rt_evcode);
2101
2102 llinfo = route->rt_llinfo;
2103 /* send a notification that the route is back up */
2104 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
2105 route->rt_flags & RTF_ROUTER &&
2106 llinfo->la_flags & LLINFO_RTRFAIL_EVTSENT) {
2107 struct kev_msg ev_msg;
2108 struct kev_in_arpalive in_arpalive;
2109
2110 llinfo->la_flags &= ~LLINFO_RTRFAIL_EVTSENT;
2111 RT_UNLOCK(route);
2112 bzero(&ev_msg, sizeof(ev_msg));
2113 bzero(&in_arpalive, sizeof(in_arpalive));
2114 in_arpalive.link_data.if_family = ifp->if_family;
2115 in_arpalive.link_data.if_unit = ifp->if_unit;
2116 strlcpy(in_arpalive.link_data.if_name, ifp->if_name, IFNAMSIZ);
2117 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2118 ev_msg.kev_class = KEV_NETWORK_CLASS;
2119 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
2120 ev_msg.event_code = KEV_INET_ARPRTRALIVE;
2121 ev_msg.dv[0].data_ptr = &in_arpalive;
2122 ev_msg.dv[0].data_length = sizeof(struct kev_in_arpalive);
2123 dlil_post_complete_msg(NULL, &ev_msg);
2124 RT_LOCK(route);
2125 }
2126 /* Update the llinfo, send out all queued packets at once */
2127 llinfo->la_asked = 0;
2128 llinfo->la_flags &= ~LLINFO_PROBING;
2129 llinfo->la_prbreq_cnt = 0;
2130
2131 if (rt_evcode) {
2132 rt_lookup_qset_id(route, false);
2133 /*
2134 * Enqueue work item to invoke callback for this route entry
2135 */
2136 route_event_enqueue_nwk_wq_entry(route, NULL, rt_evcode, NULL, TRUE);
2137
2138 if (route->rt_flags & RTF_ROUTER) {
2139 struct radix_node_head *rnh = NULL;
2140 struct route_event rt_ev;
2141 route_event_init(&rt_ev, route, NULL, rt_evcode);
2142 /*
2143 * We already have a reference on rt. The function
2144 * frees it before returning.
2145 */
2146 RT_UNLOCK(route);
2147 lck_mtx_lock(rnh_lock);
2148 rnh = rt_tables[AF_INET];
2149
2150 if (rnh != NULL) {
2151 (void) rnh->rnh_walktree(rnh, route_event_walktree,
2152 (void *)&rt_ev);
2153 }
2154 lck_mtx_unlock(rnh_lock);
2155 RT_LOCK(route);
2156 }
2157 }
2158
2159 if (!qempty(&llinfo->la_holdq)) {
2160 uint32_t held;
2161 struct mbuf *m0;
2162 classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
2163
2164 _getq_all(&llinfo->la_holdq, &pkt, NULL, &held, NULL);
2165 m0 = pkt.cp_mbuf;
2166 if (arp_verbose) {
2167 log(LOG_DEBUG, "%s: sending %u held packets\n",
2168 __func__, held);
2169 }
2170 os_atomic_add(&arpstat.held, -held, relaxed);
2171 VERIFY(qempty(&llinfo->la_holdq));
2172 RT_UNLOCK(route);
2173 dlil_output(ifp, PF_INET, m0, (caddr_t)route,
2174 rt_key(route), 0, NULL);
2175 RT_REMREF(route);
2176 route = NULL;
2177 }
2178
2179 respond:
2180 if (route != NULL) {
2181 /* Mark use timestamp if we're going to send a reply */
2182 if (arpop == ARPOP_REQUEST && route->rt_llinfo != NULL) {
2183 arp_llreach_use(route->rt_llinfo);
2184 }
2185 RT_REMREF_LOCKED(route);
2186 RT_UNLOCK(route);
2187 route = NULL;
2188 }
2189
2190 if (arpop != ARPOP_REQUEST) {
2191 goto done;
2192 }
2193
2194 /* See comments at the beginning of this routine */
2195 arpstat.rxrequests++;
2196
2197 /* If we are not the target, check if we should proxy */
2198 if (target_ip->sin_addr.s_addr != best_ia_sin.sin_addr.s_addr) {
2199 /*
2200 * Find a proxy route; callee holds a reference on the
2201 * route and returns with the route entry locked, upon
2202 * success.
2203 */
2204 error = arp_lookup_route(&target_ip->sin_addr, 0, SIN_PROXY,
2205 &route, ifp->if_index);
2206
2207 if (error == 0) {
2208 RT_LOCK_ASSERT_HELD(route);
2209 /*
2210 * Return proxied ARP replies only on the interface
2211 * or bridge cluster where this network resides.
2212 * Otherwise we may conflict with the host we are
2213 * proxying for.
2214 */
2215 if (route->rt_ifp != ifp &&
2216 (route->rt_ifp->if_bridge != ifp->if_bridge ||
2217 ifp->if_bridge == NULL)) {
2218 RT_REMREF_LOCKED(route);
2219 RT_UNLOCK(route);
2220 goto done;
2221 }
2222 proxied = *SDL(route->rt_gateway);
2223 target_hw = &proxied;
2224 } else {
2225 /*
2226 * We don't have a route entry indicating we should
2227 * use proxy. If we aren't supposed to proxy all,
2228 * we are done.
2229 */
2230 if (!arp_proxyall) {
2231 goto done;
2232 }
2233
2234 /*
2235 * See if we have a route to the target ip before
2236 * we proxy it.
2237 */
2238 route = rtalloc1_scoped(__DECONST_SA(target_ip), 0, 0, ifp->if_index);
2239 if (!route) {
2240 goto done;
2241 }
2242
2243 /*
2244 * Don't proxy for hosts already on the same interface.
2245 */
2246 RT_LOCK(route);
2247 if (route->rt_ifp == ifp) {
2248 RT_UNLOCK(route);
2249 rtfree(route);
2250 goto done;
2251 }
2252 }
2253 /* Mark use timestamp */
2254 if (route->rt_llinfo != NULL) {
2255 arp_llreach_use(route->rt_llinfo);
2256 }
2257 RT_REMREF_LOCKED(route);
2258 RT_UNLOCK(route);
2259 }
2260
2261 dlil_send_arp(ifp, ARPOP_REPLY,
2262 target_hw, SA(target_ip),
2263 sender_hw, SA(sender_ip), 0);
2264
2265 done:
2266 if (best_ia != NULL) {
2267 ifa_remref(&best_ia->ia_ifa);
2268 }
2269 return 0;
2270 }
2271
2272 void
arp_ifinit(struct ifnet * ifp,struct ifaddr * ifa)2273 arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
2274 {
2275 struct sockaddr *sa;
2276
2277 IFA_LOCK(ifa);
2278 ifa->ifa_rtrequest = arp_rtrequest;
2279 ifa->ifa_flags |= RTF_CLONING;
2280 sa = ifa->ifa_addr;
2281 IFA_UNLOCK(ifa);
2282 if ((ifp->if_flags & IFF_NOARP) == 0) {
2283 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa, NULL, sa, 0);
2284 }
2285 }
2286
2287 static int
2288 arp_getstat SYSCTL_HANDLER_ARGS
2289 {
2290 #pragma unused(oidp, arg1, arg2)
2291 if (req->oldptr == USER_ADDR_NULL) {
2292 req->oldlen = (size_t)sizeof(struct arpstat);
2293 }
2294
2295 return SYSCTL_OUT(req, &arpstat, MIN(sizeof(arpstat), req->oldlen));
2296 }
2297