xref: /xnu-8020.121.3/bsd/netinet/in_arp.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2004-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1989, 1993
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  */
61 
62 #include <kern/debug.h>
63 #include <netinet/in_arp.h>
64 #include <sys/types.h>
65 #include <sys/param.h>
66 #include <sys/kernel_types.h>
67 #include <sys/syslog.h>
68 #include <sys/systm.h>
69 #include <sys/time.h>
70 #include <sys/kernel.h>
71 #include <sys/mbuf.h>
72 #include <sys/sysctl.h>
73 #include <sys/mcache.h>
74 #include <sys/protosw.h>
75 #include <string.h>
76 #include <net/if_arp.h>
77 #include <net/if_dl.h>
78 #include <net/dlil.h>
79 #include <net/if_types.h>
80 #include <net/if_llreach.h>
81 #include <net/route.h>
82 #include <net/nwk_wq.h>
83 
84 #include <netinet/if_ether.h>
85 #include <netinet/in_var.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip6.h>
88 #include <kern/zalloc.h>
89 
90 #include <kern/thread.h>
91 #include <kern/sched_prim.h>
92 
93 #define CONST_LLADDR(s) ((const u_char*)((s)->sdl_data + (s)->sdl_nlen))
94 
95 static const size_t MAX_HW_LEN = 10;
96 
97 /*
98  * Synchronization notes:
99  *
100  * The global list of ARP entries are stored in llinfo_arp; an entry
101  * gets inserted into the list when the route is created and gets
102  * removed from the list when it is deleted; this is done as part
103  * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in arp_rtrequest().
104  *
105  * Because rnh_lock and rt_lock for the entry are held during those
106  * operations, the same locks (and thus lock ordering) must be used
107  * elsewhere to access the relevant data structure fields:
108  *
109  * la_le.{le_next,le_prev}, la_rt
110  *
111  *	- Routing lock (rnh_lock)
112  *
113  * la_holdq, la_asked, la_llreach, la_lastused, la_flags
114  *
115  *	- Routing entry lock (rt_lock)
116  *
117  * Due to the dependency on rt_lock, llinfo_arp has the same lifetime
118  * as the route entry itself.  When a route is deleted (RTM_DELETE),
119  * it is simply removed from the global list but the memory is not
120  * freed until the route itself is freed.
121  */
122 struct llinfo_arp {
123 	/*
124 	 * The following are protected by rnh_lock
125 	 */
126 	LIST_ENTRY(llinfo_arp) la_le;
127 	struct  rtentry *la_rt;
128 	/*
129 	 * The following are protected by rt_lock
130 	 */
131 	class_queue_t la_holdq;         /* packets awaiting resolution */
132 	struct  if_llreach *la_llreach; /* link-layer reachability record */
133 	u_int64_t la_lastused;          /* last used timestamp */
134 	u_int32_t la_asked;             /* # of requests sent */
135 	u_int32_t la_maxtries;          /* retry limit */
136 	u_int64_t la_probeexp;          /* probe deadline timestamp */
137 	u_int32_t la_prbreq_cnt;        /* probe request count */
138 	u_int32_t la_flags;
139 #define LLINFO_RTRFAIL_EVTSENT         0x1 /* sent an ARP event */
140 #define LLINFO_PROBING                 0x2 /* waiting for an ARP reply */
141 };
142 
143 static LIST_HEAD(, llinfo_arp) llinfo_arp;
144 
145 static thread_call_t arp_timeout_tcall;
146 static int arp_timeout_run;             /* arp_timeout is scheduled to run */
147 static void arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1);
148 static void arp_sched_timeout(struct timeval *);
149 
150 static thread_call_t arp_probe_tcall;
151 static int arp_probe_run;               /* arp_probe is scheduled to run */
152 static void arp_probe(thread_call_param_t arg0, thread_call_param_t arg1);
153 static void arp_sched_probe(struct timeval *);
154 
155 static void arptfree(struct llinfo_arp *, void *);
156 static errno_t arp_lookup_route(const struct in_addr *, int,
157     int, route_t *, unsigned int);
158 static int arp_getstat SYSCTL_HANDLER_ARGS;
159 
160 static struct llinfo_arp *arp_llinfo_alloc(zalloc_flags_t);
161 static void arp_llinfo_free(void *);
162 static uint32_t arp_llinfo_flushq(struct llinfo_arp *);
163 static void arp_llinfo_purge(struct rtentry *);
164 static void arp_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
165 static void arp_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
166 static void arp_llinfo_refresh(struct rtentry *);
167 
168 static __inline void arp_llreach_use(struct llinfo_arp *);
169 static __inline int arp_llreach_reachable(struct llinfo_arp *);
170 static void arp_llreach_alloc(struct rtentry *, struct ifnet *, void *,
171     unsigned int, boolean_t, uint32_t *);
172 
173 extern int tvtohz(struct timeval *);
174 
175 static int arpinit_done;
176 
177 SYSCTL_DECL(_net_link_ether);
178 SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
179 
180 static int arpt_prune = (5 * 60 * 1); /* walk list every 5 minutes */
181 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, prune_intvl,
182     CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_prune, 0, "");
183 
184 #define ARP_PROBE_TIME         7 /* seconds */
185 static u_int32_t arpt_probe = ARP_PROBE_TIME;
186 SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, probe_intvl,
187     CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_probe, 0, "");
188 
189 static int arpt_keep = (20 * 60); /* once resolved, good for 20 more minutes */
190 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age,
191     CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_keep, 0, "");
192 
193 static int arpt_down = 20;      /* once declared down, don't send for 20 sec */
194 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, host_down_time,
195     CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_down, 0, "");
196 
197 static int arp_llreach_base = 120;      /* seconds */
198 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_llreach_base,
199     CTLFLAG_RW | CTLFLAG_LOCKED, &arp_llreach_base, 0,
200     "default ARP link-layer reachability max lifetime (in seconds)");
201 
202 #define ARP_UNICAST_LIMIT 3     /* # of probes until ARP refresh broadcast */
203 static u_int32_t arp_unicast_lim = ARP_UNICAST_LIMIT;
204 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_unicast_lim,
205     CTLFLAG_RW | CTLFLAG_LOCKED, &arp_unicast_lim, ARP_UNICAST_LIMIT,
206     "number of unicast ARP refresh probes before using broadcast");
207 
208 static u_int32_t arp_maxtries = 5;
209 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries,
210     CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxtries, 0, "");
211 
212 static u_int32_t arp_maxhold = 16;
213 SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, maxhold,
214     CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold, 0, "");
215 
216 static int useloopback = 1;     /* use loopback interface for local traffic */
217 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, useloopback,
218     CTLFLAG_RW | CTLFLAG_LOCKED, &useloopback, 0, "");
219 
220 static int arp_proxyall = 0;
221 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall,
222     CTLFLAG_RW | CTLFLAG_LOCKED, &arp_proxyall, 0, "");
223 
224 static int arp_sendllconflict = 0;
225 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, sendllconflict,
226     CTLFLAG_RW | CTLFLAG_LOCKED, &arp_sendllconflict, 0, "");
227 
228 static int log_arp_warnings = 0;        /* Thread safe: no accumulated state */
229 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_warnings,
230     CTLFLAG_RW | CTLFLAG_LOCKED,
231     &log_arp_warnings, 0,
232     "log arp warning messages");
233 
234 static int keep_announcements = 1;      /* Thread safe: no aging of state */
235 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, keep_announcements,
236     CTLFLAG_RW | CTLFLAG_LOCKED,
237     &keep_announcements, 0,
238     "keep arp announcements");
239 
240 static int send_conflicting_probes = 1; /* Thread safe: no accumulated state */
241 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, send_conflicting_probes,
242     CTLFLAG_RW | CTLFLAG_LOCKED,
243     &send_conflicting_probes, 0,
244     "send conflicting link-local arp probes");
245 
246 static int arp_verbose;
247 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, verbose,
248     CTLFLAG_RW | CTLFLAG_LOCKED, &arp_verbose, 0, "");
249 
250 static uint32_t arp_maxhold_total = 1024; /* max total packets in the holdq */
251 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxhold_total,
252     CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold_total, 0, "");
253 
254 
255 /*
256  * Generally protected by rnh_lock; use atomic operations on fields
257  * that are also modified outside of that lock (if needed).
258  */
259 struct arpstat arpstat __attribute__((aligned(sizeof(uint64_t))));
260 SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, stats,
261     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
262     0, 0, arp_getstat, "S,arpstat",
263     "ARP statistics (struct arpstat, net/if_arp.h)");
264 
265 static ZONE_DEFINE(llinfo_arp_zone, "llinfo_arp",
266     sizeof(struct llinfo_arp), ZC_ZFREE_CLEARMEM);
267 
268 void
arp_init(void)269 arp_init(void)
270 {
271 	VERIFY(!arpinit_done);
272 
273 	LIST_INIT(&llinfo_arp);
274 
275 	arpinit_done = 1;
276 }
277 
278 static struct llinfo_arp *
arp_llinfo_alloc(zalloc_flags_t how)279 arp_llinfo_alloc(zalloc_flags_t how)
280 {
281 	struct llinfo_arp *la = zalloc_flags(llinfo_arp_zone, how | Z_ZERO);
282 
283 	if (la) {
284 		/*
285 		 * The type of queue (Q_DROPHEAD) here is just a hint;
286 		 * the actual logic that works on this queue performs
287 		 * a head drop, details in arp_llinfo_addq().
288 		 */
289 		_qinit(&la->la_holdq, Q_DROPHEAD, (arp_maxhold == 0) ?
290 		    (uint32_t)-1 : arp_maxhold, QP_MBUF);
291 	}
292 	return la;
293 }
294 
295 static void
arp_llinfo_free(void * arg)296 arp_llinfo_free(void *arg)
297 {
298 	struct llinfo_arp *la = arg;
299 
300 	if (la->la_le.le_next != NULL || la->la_le.le_prev != NULL) {
301 		panic("%s: trying to free %p when it is in use", __func__, la);
302 		/* NOTREACHED */
303 	}
304 
305 	/* Free any held packets */
306 	(void) arp_llinfo_flushq(la);
307 
308 	/* Purge any link-layer info caching */
309 	VERIFY(la->la_rt->rt_llinfo == la);
310 	if (la->la_rt->rt_llinfo_purge != NULL) {
311 		la->la_rt->rt_llinfo_purge(la->la_rt);
312 	}
313 
314 	zfree(llinfo_arp_zone, la);
315 }
316 
317 static bool
arp_llinfo_addq(struct llinfo_arp * la,struct mbuf * m)318 arp_llinfo_addq(struct llinfo_arp *la, struct mbuf *m)
319 {
320 	classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
321 
322 	if (arpstat.held >= arp_maxhold_total) {
323 		if (arp_verbose) {
324 			log(LOG_DEBUG,
325 			    "%s: dropping packet due to maxhold_total\n",
326 			    __func__);
327 		}
328 		atomic_add_32(&arpstat.dropped, 1);
329 		return false;
330 	}
331 
332 	if (qlen(&la->la_holdq) >= qlimit(&la->la_holdq)) {
333 		struct mbuf *_m;
334 		/* prune less than CTL, else take what's at the head */
335 		_getq_scidx_lt(&la->la_holdq, &pkt, SCIDX_CTL);
336 		_m = pkt.cp_mbuf;
337 		if (_m == NULL) {
338 			_getq(&la->la_holdq, &pkt);
339 			_m = pkt.cp_mbuf;
340 		}
341 		VERIFY(_m != NULL);
342 		if (arp_verbose) {
343 			log(LOG_DEBUG, "%s: dropping packet (scidx %u)\n",
344 			    __func__, MBUF_SCIDX(mbuf_get_service_class(_m)));
345 		}
346 		m_freem(_m);
347 		atomic_add_32(&arpstat.dropped, 1);
348 		atomic_add_32(&arpstat.held, -1);
349 	}
350 	CLASSQ_PKT_INIT_MBUF(&pkt, m);
351 	_addq(&la->la_holdq, &pkt);
352 	atomic_add_32(&arpstat.held, 1);
353 	if (arp_verbose) {
354 		log(LOG_DEBUG, "%s: enqueued packet (scidx %u), qlen now %u\n",
355 		    __func__, MBUF_SCIDX(mbuf_get_service_class(m)),
356 		    qlen(&la->la_holdq));
357 	}
358 
359 	return true;
360 }
361 
362 static uint32_t
arp_llinfo_flushq(struct llinfo_arp * la)363 arp_llinfo_flushq(struct llinfo_arp *la)
364 {
365 	uint32_t held = qlen(&la->la_holdq);
366 
367 	if (held != 0) {
368 		atomic_add_32(&arpstat.purged, held);
369 		atomic_add_32(&arpstat.held, -held);
370 		_flushq(&la->la_holdq);
371 	}
372 	la->la_prbreq_cnt = 0;
373 	VERIFY(qempty(&la->la_holdq));
374 	return held;
375 }
376 
377 static void
arp_llinfo_purge(struct rtentry * rt)378 arp_llinfo_purge(struct rtentry *rt)
379 {
380 	struct llinfo_arp *la = rt->rt_llinfo;
381 
382 	RT_LOCK_ASSERT_HELD(rt);
383 	VERIFY(rt->rt_llinfo_purge == arp_llinfo_purge && la != NULL);
384 
385 	if (la->la_llreach != NULL) {
386 		RT_CONVERT_LOCK(rt);
387 		ifnet_llreach_free(la->la_llreach);
388 		la->la_llreach = NULL;
389 	}
390 	la->la_lastused = 0;
391 }
392 
393 static void
arp_llinfo_get_ri(struct rtentry * rt,struct rt_reach_info * ri)394 arp_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
395 {
396 	struct llinfo_arp *la = rt->rt_llinfo;
397 	struct if_llreach *lr = la->la_llreach;
398 
399 	if (lr == NULL) {
400 		bzero(ri, sizeof(*ri));
401 		ri->ri_rssi = IFNET_RSSI_UNKNOWN;
402 		ri->ri_lqm = IFNET_LQM_THRESH_OFF;
403 		ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
404 	} else {
405 		IFLR_LOCK(lr);
406 		/* Export to rt_reach_info structure */
407 		ifnet_lr2ri(lr, ri);
408 		/* Export ARP send expiration (calendar) time */
409 		ri->ri_snd_expire =
410 		    ifnet_llreach_up2calexp(lr, la->la_lastused);
411 		IFLR_UNLOCK(lr);
412 	}
413 }
414 
415 static void
arp_llinfo_get_iflri(struct rtentry * rt,struct ifnet_llreach_info * iflri)416 arp_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
417 {
418 	struct llinfo_arp *la = rt->rt_llinfo;
419 	struct if_llreach *lr = la->la_llreach;
420 
421 	if (lr == NULL) {
422 		bzero(iflri, sizeof(*iflri));
423 		iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
424 		iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
425 		iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
426 	} else {
427 		IFLR_LOCK(lr);
428 		/* Export to ifnet_llreach_info structure */
429 		ifnet_lr2iflri(lr, iflri);
430 		/* Export ARP send expiration (uptime) time */
431 		iflri->iflri_snd_expire =
432 		    ifnet_llreach_up2upexp(lr, la->la_lastused);
433 		IFLR_UNLOCK(lr);
434 	}
435 }
436 
437 static void
arp_llinfo_refresh(struct rtentry * rt)438 arp_llinfo_refresh(struct rtentry *rt)
439 {
440 	uint64_t timenow = net_uptime();
441 	/*
442 	 * If route entry is permanent or if expiry is less
443 	 * than timenow and extra time taken for unicast probe
444 	 * we can't expedite the refresh
445 	 */
446 	if ((rt->rt_expire == 0) ||
447 	    (rt->rt_flags & RTF_STATIC) ||
448 	    !(rt->rt_flags & RTF_LLINFO)) {
449 		return;
450 	}
451 
452 	if (rt->rt_expire > timenow) {
453 		rt->rt_expire = timenow;
454 	}
455 	return;
456 }
457 
458 void
arp_llreach_set_reachable(struct ifnet * ifp,void * addr,unsigned int alen)459 arp_llreach_set_reachable(struct ifnet *ifp, void *addr, unsigned int alen)
460 {
461 	/* Nothing more to do if it's disabled */
462 	if (arp_llreach_base == 0) {
463 		return;
464 	}
465 
466 	ifnet_llreach_set_reachable(ifp, ETHERTYPE_IP, addr, alen);
467 }
468 
469 static __inline void
arp_llreach_use(struct llinfo_arp * la)470 arp_llreach_use(struct llinfo_arp *la)
471 {
472 	if (la->la_llreach != NULL) {
473 		la->la_lastused = net_uptime();
474 	}
475 }
476 
477 static __inline int
arp_llreach_reachable(struct llinfo_arp * la)478 arp_llreach_reachable(struct llinfo_arp *la)
479 {
480 	struct if_llreach *lr;
481 	const char *why = NULL;
482 
483 	/* Nothing more to do if it's disabled; pretend it's reachable  */
484 	if (arp_llreach_base == 0) {
485 		return 1;
486 	}
487 
488 	if ((lr = la->la_llreach) == NULL) {
489 		/*
490 		 * Link-layer reachability record isn't present for this
491 		 * ARP entry; pretend it's reachable and use it as is.
492 		 */
493 		return 1;
494 	} else if (ifnet_llreach_reachable(lr)) {
495 		/*
496 		 * Record is present, it's not shared with other ARP
497 		 * entries and a packet has recently been received
498 		 * from the remote host; consider it reachable.
499 		 */
500 		if (lr->lr_reqcnt == 1) {
501 			return 1;
502 		}
503 
504 		/* Prime it up, if this is the first time */
505 		if (la->la_lastused == 0) {
506 			VERIFY(la->la_llreach != NULL);
507 			arp_llreach_use(la);
508 		}
509 
510 		/*
511 		 * Record is present and shared with one or more ARP
512 		 * entries, and a packet has recently been received
513 		 * from the remote host.  Since it's shared by more
514 		 * than one IP addresses, we can't rely on the link-
515 		 * layer reachability alone; consider it reachable if
516 		 * this ARP entry has been used "recently."
517 		 */
518 		if (ifnet_llreach_reachable_delta(lr, la->la_lastused)) {
519 			return 1;
520 		}
521 
522 		why = "has alias(es) and hasn't been used in a while";
523 	} else {
524 		why = "haven't heard from it in a while";
525 	}
526 
527 	if (arp_verbose > 1) {
528 		char tmp[MAX_IPv4_STR_LEN];
529 		u_int64_t now = net_uptime();
530 
531 		log(LOG_DEBUG, "%s: ARP probe(s) needed for %s; "
532 		    "%s [lastused %lld, lastrcvd %lld] secs ago\n",
533 		    if_name(lr->lr_ifp), inet_ntop(AF_INET,
534 		    &SIN(rt_key(la->la_rt))->sin_addr, tmp, sizeof(tmp)), why,
535 		    (la->la_lastused ? (int64_t)(now - la->la_lastused) : -1),
536 		    (lr->lr_lastrcvd ? (int64_t)(now - lr->lr_lastrcvd) : -1));
537 	}
538 	return 0;
539 }
540 
541 /*
542  * Obtain a link-layer source cache entry for the sender.
543  *
544  * NOTE: This is currently only for ARP/Ethernet.
545  */
546 static void
arp_llreach_alloc(struct rtentry * rt,struct ifnet * ifp,void * addr,unsigned int alen,boolean_t solicited,uint32_t * p_rt_event_code)547 arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr,
548     unsigned int alen, boolean_t solicited, uint32_t *p_rt_event_code)
549 {
550 	VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
551 	VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
552 
553 	if (arp_llreach_base != 0 && rt->rt_expire != 0 &&
554 	    !(rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
555 	    ifp->if_addrlen == IF_LLREACH_MAXLEN &&     /* Ethernet */
556 	    alen == ifp->if_addrlen) {
557 		struct llinfo_arp *la = rt->rt_llinfo;
558 		struct if_llreach *lr;
559 		const char *why = NULL, *type = "";
560 
561 		/* Become a regular mutex, just in case */
562 		RT_CONVERT_LOCK(rt);
563 
564 		if ((lr = la->la_llreach) != NULL) {
565 			type = (solicited ? "ARP reply" : "ARP announcement");
566 			/*
567 			 * If target has changed, create a new record;
568 			 * otherwise keep existing record.
569 			 */
570 			IFLR_LOCK(lr);
571 			if (bcmp(addr, lr->lr_key.addr, alen) != 0) {
572 				IFLR_UNLOCK(lr);
573 				/* Purge any link-layer info caching */
574 				VERIFY(rt->rt_llinfo_purge != NULL);
575 				rt->rt_llinfo_purge(rt);
576 				lr = NULL;
577 				why = " for different target HW address; "
578 				    "using new llreach record";
579 				*p_rt_event_code = ROUTE_LLENTRY_CHANGED;
580 			} else {
581 				/*
582 				 * If we were doing unicast probing, we need to
583 				 * deliver an event for neighbor cache resolution
584 				 */
585 				if (lr->lr_probes != 0) {
586 					*p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
587 				}
588 
589 				lr->lr_probes = 0;      /* reset probe count */
590 				IFLR_UNLOCK(lr);
591 				if (solicited) {
592 					why = " for same target HW address; "
593 					    "keeping existing llreach record";
594 				}
595 			}
596 		}
597 
598 		if (lr == NULL) {
599 			lr = la->la_llreach = ifnet_llreach_alloc(ifp,
600 			    ETHERTYPE_IP, addr, alen, arp_llreach_base);
601 			if (lr != NULL) {
602 				lr->lr_probes = 0;      /* reset probe count */
603 				if (why == NULL) {
604 					why = "creating new llreach record";
605 				}
606 			}
607 			*p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
608 		}
609 
610 		if (arp_verbose > 1 && lr != NULL && why != NULL) {
611 			char tmp[MAX_IPv4_STR_LEN];
612 
613 			log(LOG_DEBUG, "%s: %s%s for %s\n", if_name(ifp),
614 			    type, why, inet_ntop(AF_INET,
615 			    &SIN(rt_key(rt))->sin_addr, tmp, sizeof(tmp)));
616 		}
617 	}
618 }
619 
620 struct arptf_arg {
621 	boolean_t draining;
622 	boolean_t probing;
623 	uint32_t killed;
624 	uint32_t aging;
625 	uint32_t sticky;
626 	uint32_t found;
627 	uint32_t qlen;
628 	uint32_t qsize;
629 };
630 
631 /*
632  * Free an arp entry.
633  */
634 static void
arptfree(struct llinfo_arp * la,void * arg)635 arptfree(struct llinfo_arp *la, void *arg)
636 {
637 	struct arptf_arg *ap = arg;
638 	struct rtentry *rt = la->la_rt;
639 	uint64_t timenow;
640 
641 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
642 
643 	/* rnh_lock acquired by caller protects rt from going away */
644 	RT_LOCK(rt);
645 
646 	VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
647 	VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
648 
649 	ap->found++;
650 	timenow = net_uptime();
651 
652 	/* If we're probing, flush out held packets upon probe expiration */
653 	if (ap->probing && (la->la_flags & LLINFO_PROBING) &&
654 	    la->la_probeexp <= timenow) {
655 		struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
656 		if (sdl != NULL) {
657 			sdl->sdl_alen = 0;
658 		}
659 		(void) arp_llinfo_flushq(la);
660 		/*
661 		 * Enqueue work item to invoke callback for this route entry
662 		 */
663 		route_event_enqueue_nwk_wq_entry(rt, NULL,
664 		    ROUTE_LLENTRY_UNREACH, NULL, TRUE);
665 	}
666 
667 	/*
668 	 * The following is mostly being used to arm the timer
669 	 * again and for logging.
670 	 * qlen is used to re-arm the timer. Therefore, pure probe
671 	 * requests can be considered as 0 length packets
672 	 * contributing only to length but not to the size.
673 	 */
674 	ap->qlen += qlen(&la->la_holdq);
675 	ap->qlen += la->la_prbreq_cnt;
676 	ap->qsize += qsize(&la->la_holdq);
677 
678 	if (rt->rt_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
679 		ap->sticky++;
680 		/* ARP entry is permanent? */
681 		if (rt->rt_expire == 0) {
682 			RT_UNLOCK(rt);
683 			return;
684 		}
685 	}
686 
687 	/* ARP entry hasn't expired and we're not draining? */
688 	if (!ap->draining && rt->rt_expire > timenow) {
689 		RT_UNLOCK(rt);
690 		ap->aging++;
691 		return;
692 	}
693 
694 	if (rt->rt_refcnt > 0) {
695 		/*
696 		 * ARP entry has expired, with outstanding refcnt.
697 		 * If we're not draining, force ARP query to be
698 		 * generated next time this entry is used.
699 		 */
700 		if (!ap->draining && !ap->probing) {
701 			struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
702 			if (sdl != NULL) {
703 				sdl->sdl_alen = 0;
704 			}
705 			la->la_asked = 0;
706 			rt->rt_flags &= ~RTF_REJECT;
707 		}
708 		RT_UNLOCK(rt);
709 	} else if (!(rt->rt_flags & RTF_STATIC) && !ap->probing) {
710 		/*
711 		 * ARP entry has no outstanding refcnt, and we're either
712 		 * draining or it has expired; delete it from the routing
713 		 * table.  Safe to drop rt_lock and use rt_key, since holding
714 		 * rnh_lock here prevents another thread from calling
715 		 * rt_setgate() on this route.
716 		 */
717 		RT_UNLOCK(rt);
718 		rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
719 		    rt_mask(rt), 0, NULL);
720 		arpstat.timeouts++;
721 		ap->killed++;
722 	} else {
723 		/* ARP entry is static; let it linger */
724 		RT_UNLOCK(rt);
725 	}
726 }
727 
728 void
in_arpdrain(void * arg)729 in_arpdrain(void *arg)
730 {
731 #pragma unused(arg)
732 	struct llinfo_arp *la, *ola;
733 	struct arptf_arg farg;
734 
735 	if (arp_verbose) {
736 		log(LOG_DEBUG, "%s: draining ARP entries\n", __func__);
737 	}
738 
739 	lck_mtx_lock(rnh_lock);
740 	la = llinfo_arp.lh_first;
741 	bzero(&farg, sizeof(farg));
742 	farg.draining = TRUE;
743 	while ((ola = la) != NULL) {
744 		la = la->la_le.le_next;
745 		arptfree(ola, &farg);
746 	}
747 	if (arp_verbose) {
748 		log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
749 		    "%u pkts held (%u bytes)\n", __func__, farg.found,
750 		    farg.aging, farg.sticky, farg.killed, farg.qlen,
751 		    farg.qsize);
752 	}
753 	lck_mtx_unlock(rnh_lock);
754 }
755 
756 /*
757  * Timeout routine.  Age arp_tab entries periodically.
758  */
759 static void
arp_timeout(thread_call_param_t arg0,thread_call_param_t arg1)760 arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1)
761 {
762 #pragma unused(arg0, arg1)
763 	struct llinfo_arp *la, *ola;
764 	struct timeval atv;
765 	struct arptf_arg farg;
766 
767 	lck_mtx_lock(rnh_lock);
768 	la = llinfo_arp.lh_first;
769 	bzero(&farg, sizeof(farg));
770 	while ((ola = la) != NULL) {
771 		la = la->la_le.le_next;
772 		arptfree(ola, &farg);
773 	}
774 	if (arp_verbose) {
775 		log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
776 		    "%u pkts held (%u bytes)\n", __func__, farg.found,
777 		    farg.aging, farg.sticky, farg.killed, farg.qlen,
778 		    farg.qsize);
779 	}
780 	atv.tv_usec = 0;
781 	atv.tv_sec = MAX(arpt_prune, 5);
782 	/* re-arm the timer if there's work to do */
783 	arp_timeout_run = 0;
784 	if (farg.aging > 0) {
785 		arp_sched_timeout(&atv);
786 	} else if (arp_verbose) {
787 		log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__);
788 	}
789 	lck_mtx_unlock(rnh_lock);
790 }
791 
792 static void
arp_sched_timeout(struct timeval * atv)793 arp_sched_timeout(struct timeval *atv)
794 {
795 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
796 
797 	if (!arp_timeout_run) {
798 		struct timeval tv;
799 		uint64_t deadline = 0;
800 
801 		if (arp_timeout_tcall == NULL) {
802 			arp_timeout_tcall =
803 			    thread_call_allocate(arp_timeout, NULL);
804 			VERIFY(arp_timeout_tcall != NULL);
805 		}
806 
807 		if (atv == NULL) {
808 			tv.tv_usec = 0;
809 			tv.tv_sec = MAX(arpt_prune / 5, 1);
810 			atv = &tv;
811 		}
812 		if (arp_verbose) {
813 			log(LOG_DEBUG, "%s: timer scheduled in "
814 			    "T+%llus.%lluu\n", __func__,
815 			    (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
816 		}
817 		arp_timeout_run = 1;
818 
819 		clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
820 		    mach_absolute_time(), &deadline);
821 		(void) thread_call_enter_delayed(arp_timeout_tcall, deadline);
822 	}
823 }
824 
825 /*
826  * Probe routine.
827  */
828 static void
arp_probe(thread_call_param_t arg0,thread_call_param_t arg1)829 arp_probe(thread_call_param_t arg0, thread_call_param_t arg1)
830 {
831 #pragma unused(arg0, arg1)
832 	struct llinfo_arp *la, *ola;
833 	struct timeval atv;
834 	struct arptf_arg farg;
835 
836 	lck_mtx_lock(rnh_lock);
837 	la = llinfo_arp.lh_first;
838 	bzero(&farg, sizeof(farg));
839 	farg.probing = TRUE;
840 	while ((ola = la) != NULL) {
841 		la = la->la_le.le_next;
842 		arptfree(ola, &farg);
843 	}
844 	if (arp_verbose) {
845 		log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
846 		    "%u pkts held (%u bytes)\n", __func__, farg.found,
847 		    farg.aging, farg.sticky, farg.killed, farg.qlen,
848 		    farg.qsize);
849 	}
850 	atv.tv_usec = 0;
851 	atv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
852 	/* re-arm the probe if there's work to do */
853 	arp_probe_run = 0;
854 	if (farg.qlen > 0) {
855 		arp_sched_probe(&atv);
856 	} else if (arp_verbose) {
857 		log(LOG_DEBUG, "%s: not rescheduling probe\n", __func__);
858 	}
859 	lck_mtx_unlock(rnh_lock);
860 }
861 
862 static void
arp_sched_probe(struct timeval * atv)863 arp_sched_probe(struct timeval *atv)
864 {
865 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
866 
867 	if (!arp_probe_run) {
868 		struct timeval tv;
869 		uint64_t deadline = 0;
870 
871 		if (arp_probe_tcall == NULL) {
872 			arp_probe_tcall =
873 			    thread_call_allocate(arp_probe, NULL);
874 			VERIFY(arp_probe_tcall != NULL);
875 		}
876 
877 		if (atv == NULL) {
878 			tv.tv_usec = 0;
879 			tv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
880 			atv = &tv;
881 		}
882 		if (arp_verbose) {
883 			log(LOG_DEBUG, "%s: probe scheduled in "
884 			    "T+%llus.%lluu\n", __func__,
885 			    (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
886 		}
887 		arp_probe_run = 1;
888 
889 		clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
890 		    mach_absolute_time(), &deadline);
891 		(void) thread_call_enter_delayed(arp_probe_tcall, deadline);
892 	}
893 }
894 
895 /*
896  * ifa_rtrequest() callback
897  */
898 static void
arp_rtrequest(int req,struct rtentry * rt,struct sockaddr * sa)899 arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa)
900 {
901 #pragma unused(sa)
902 	struct sockaddr *gate = rt->rt_gateway;
903 	struct llinfo_arp *la = rt->rt_llinfo;
904 	static struct sockaddr_dl null_sdl =
905 	{ .sdl_len = sizeof(null_sdl), .sdl_family = AF_LINK };
906 	uint64_t timenow;
907 	char buf[MAX_IPv4_STR_LEN];
908 
909 	VERIFY(arpinit_done);
910 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
911 	RT_LOCK_ASSERT_HELD(rt);
912 
913 	if (rt->rt_flags & RTF_GATEWAY) {
914 		return;
915 	}
916 
917 	timenow = net_uptime();
918 	switch (req) {
919 	case RTM_ADD:
920 		/*
921 		 * XXX: If this is a manually added route to interface
922 		 * such as older version of routed or gated might provide,
923 		 * restore cloning bit.
924 		 */
925 		if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL &&
926 		    SIN(rt_mask(rt))->sin_addr.s_addr != INADDR_BROADCAST) {
927 			rt->rt_flags |= RTF_CLONING;
928 		}
929 
930 		if (rt->rt_flags & RTF_CLONING) {
931 			/*
932 			 * Case 1: This route should come from a route to iface.
933 			 */
934 			if (rt_setgate(rt, rt_key(rt), SA(&null_sdl)) == 0) {
935 				gate = rt->rt_gateway;
936 				SDL(gate)->sdl_type = rt->rt_ifp->if_type;
937 				SDL(gate)->sdl_index = rt->rt_ifp->if_index;
938 				/*
939 				 * In case we're called before 1.0 sec.
940 				 * has elapsed.
941 				 */
942 				rt_setexpire(rt, MAX(timenow, 1));
943 			}
944 			break;
945 		}
946 		/* Announce a new entry if requested. */
947 		if (rt->rt_flags & RTF_ANNOUNCE) {
948 			if (la != NULL) {
949 				arp_llreach_use(la); /* Mark use timestamp */
950 			}
951 			RT_UNLOCK(rt);
952 			dlil_send_arp(rt->rt_ifp, ARPOP_REQUEST,
953 			    SDL(gate), rt_key(rt), NULL, rt_key(rt), 0);
954 			RT_LOCK(rt);
955 			arpstat.txannounces++;
956 		}
957 		OS_FALLTHROUGH;
958 	case RTM_RESOLVE:
959 		if (gate->sa_family != AF_LINK ||
960 		    gate->sa_len < sizeof(null_sdl)) {
961 			arpstat.invalidreqs++;
962 			log(LOG_ERR, "%s: route to %s has bad gateway address "
963 			    "(sa_family %u sa_len %u) on %s\n",
964 			    __func__, inet_ntop(AF_INET,
965 			    &SIN(rt_key(rt))->sin_addr.s_addr, buf,
966 			    sizeof(buf)), gate->sa_family, gate->sa_len,
967 			    if_name(rt->rt_ifp));
968 			break;
969 		}
970 		SDL(gate)->sdl_type = rt->rt_ifp->if_type;
971 		SDL(gate)->sdl_index = rt->rt_ifp->if_index;
972 
973 		if (la != NULL) {
974 			break; /* This happens on a route change */
975 		}
976 		/*
977 		 * Case 2:  This route may come from cloning, or a manual route
978 		 * add with a LL address.
979 		 */
980 		rt->rt_llinfo = la = arp_llinfo_alloc(Z_WAITOK);
981 
982 		rt->rt_llinfo_get_ri    = arp_llinfo_get_ri;
983 		rt->rt_llinfo_get_iflri = arp_llinfo_get_iflri;
984 		rt->rt_llinfo_purge     = arp_llinfo_purge;
985 		rt->rt_llinfo_free      = arp_llinfo_free;
986 		rt->rt_llinfo_refresh   = arp_llinfo_refresh;
987 		rt->rt_flags |= RTF_LLINFO;
988 		la->la_rt = rt;
989 		LIST_INSERT_HEAD(&llinfo_arp, la, la_le);
990 		arpstat.inuse++;
991 
992 		/* We have at least one entry; arm the timer if not already */
993 		arp_sched_timeout(NULL);
994 
995 		/*
996 		 * This keeps the multicast addresses from showing up
997 		 * in `arp -a' listings as unresolved.  It's not actually
998 		 * functional.  Then the same for broadcast.  For IPv4
999 		 * link-local address, keep the entry around even after
1000 		 * it has expired.
1001 		 */
1002 		if (IN_MULTICAST(ntohl(SIN(rt_key(rt))->sin_addr.s_addr))) {
1003 			RT_UNLOCK(rt);
1004 			dlil_resolve_multi(rt->rt_ifp, rt_key(rt), gate,
1005 			    sizeof(struct sockaddr_dl));
1006 			RT_LOCK(rt);
1007 			rt_setexpire(rt, 0);
1008 		} else if (in_broadcast(SIN(rt_key(rt))->sin_addr,
1009 		    rt->rt_ifp)) {
1010 			struct sockaddr_dl *gate_ll = SDL(gate);
1011 			size_t broadcast_len;
1012 			int ret = ifnet_llbroadcast_copy_bytes(rt->rt_ifp,
1013 			    LLADDR(gate_ll), sizeof(gate_ll->sdl_data),
1014 			    &broadcast_len);
1015 			if (ret == 0 && broadcast_len <= UINT8_MAX) {
1016 				gate_ll->sdl_alen = (u_char)broadcast_len;
1017 				gate_ll->sdl_family = AF_LINK;
1018 				gate_ll->sdl_len = sizeof(struct sockaddr_dl);
1019 			}
1020 			/* In case we're called before 1.0 sec. has elapsed */
1021 			rt_setexpire(rt, MAX(timenow, 1));
1022 		} else if (IN_LINKLOCAL(ntohl(SIN(rt_key(rt))->
1023 		    sin_addr.s_addr))) {
1024 			rt->rt_flags |= RTF_STATIC;
1025 		}
1026 
1027 		/* Set default maximum number of retries */
1028 		la->la_maxtries = arp_maxtries;
1029 
1030 		/* Become a regular mutex, just in case */
1031 		RT_CONVERT_LOCK(rt);
1032 		IFA_LOCK_SPIN(rt->rt_ifa);
1033 		if (SIN(rt_key(rt))->sin_addr.s_addr ==
1034 		    (IA_SIN(rt->rt_ifa))->sin_addr.s_addr) {
1035 			IFA_UNLOCK(rt->rt_ifa);
1036 			/*
1037 			 * This test used to be
1038 			 *	if (loif.if_flags & IFF_UP)
1039 			 * It allowed local traffic to be forced through the
1040 			 * hardware by configuring the loopback down.  However,
1041 			 * it causes problems during network configuration
1042 			 * for boards that can't receive packets they send.
1043 			 * It is now necessary to clear "useloopback" and
1044 			 * remove the route to force traffic out to the
1045 			 * hardware.
1046 			 */
1047 			rt_setexpire(rt, 0);
1048 			ifnet_lladdr_copy_bytes(rt->rt_ifp, LLADDR(SDL(gate)),
1049 			    SDL(gate)->sdl_alen = rt->rt_ifp->if_addrlen);
1050 			if (useloopback) {
1051 				if (rt->rt_ifp != lo_ifp) {
1052 					/*
1053 					 * Purge any link-layer info caching.
1054 					 */
1055 					if (rt->rt_llinfo_purge != NULL) {
1056 						rt->rt_llinfo_purge(rt);
1057 					}
1058 
1059 					/*
1060 					 * Adjust route ref count for the
1061 					 * interfaces.
1062 					 */
1063 					if (rt->rt_if_ref_fn != NULL) {
1064 						rt->rt_if_ref_fn(lo_ifp, 1);
1065 						rt->rt_if_ref_fn(rt->rt_ifp, -1);
1066 					}
1067 				}
1068 				rt->rt_ifp = lo_ifp;
1069 				/*
1070 				 * If rmx_mtu is not locked, update it
1071 				 * to the MTU used by the new interface.
1072 				 */
1073 				if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) {
1074 					rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
1075 				}
1076 			}
1077 		} else {
1078 			IFA_UNLOCK(rt->rt_ifa);
1079 		}
1080 		break;
1081 
1082 	case RTM_DELETE:
1083 		if (la == NULL) {
1084 			break;
1085 		}
1086 		/*
1087 		 * Unchain it but defer the actual freeing until the route
1088 		 * itself is to be freed.  rt->rt_llinfo still points to
1089 		 * llinfo_arp, and likewise, la->la_rt still points to this
1090 		 * route entry, except that RTF_LLINFO is now cleared.
1091 		 */
1092 		LIST_REMOVE(la, la_le);
1093 		la->la_le.le_next = NULL;
1094 		la->la_le.le_prev = NULL;
1095 		arpstat.inuse--;
1096 
1097 		/*
1098 		 * Purge any link-layer info caching.
1099 		 */
1100 		if (rt->rt_llinfo_purge != NULL) {
1101 			rt->rt_llinfo_purge(rt);
1102 		}
1103 
1104 		rt->rt_flags &= ~RTF_LLINFO;
1105 		(void) arp_llinfo_flushq(la);
1106 	}
1107 }
1108 
1109 /*
1110  * convert hardware address to hex string for logging errors.
1111  */
1112 static const char *
sdl_addr_to_hex(const struct sockaddr_dl * sdl,char * orig_buf,int buflen)1113 sdl_addr_to_hex(const struct sockaddr_dl *sdl, char *orig_buf, int buflen)
1114 {
1115 	char *buf = orig_buf;
1116 	int i;
1117 	const u_char *lladdr = (u_char *)(size_t)sdl->sdl_data;
1118 	int maxbytes = buflen / 3;
1119 
1120 	if (maxbytes > sdl->sdl_alen) {
1121 		maxbytes = sdl->sdl_alen;
1122 	}
1123 	*buf = '\0';
1124 	for (i = 0; i < maxbytes; i++) {
1125 		snprintf(buf, 3, "%02x", lladdr[i]);
1126 		buf += 2;
1127 		*buf = (i == maxbytes - 1) ? '\0' : ':';
1128 		buf++;
1129 	}
1130 	return orig_buf;
1131 }
1132 
1133 /*
1134  * arp_lookup_route will lookup the route for a given address.
1135  *
1136  * The address must be for a host on a local network on this interface.
1137  * If the returned route is non-NULL, the route is locked and the caller
1138  * is responsible for unlocking it and releasing its reference.
1139  */
1140 static errno_t
arp_lookup_route(const struct in_addr * addr,int create,int proxy,route_t * route,unsigned int ifscope)1141 arp_lookup_route(const struct in_addr *addr, int create, int proxy,
1142     route_t *route, unsigned int ifscope)
1143 {
1144 	struct sockaddr_inarp sin =
1145 	{ sizeof(sin), AF_INET, 0, { 0 }, { 0 }, 0, 0 };
1146 	const char *why = NULL;
1147 	errno_t error = 0;
1148 	route_t rt;
1149 
1150 	*route = NULL;
1151 
1152 	sin.sin_addr.s_addr = addr->s_addr;
1153 	sin.sin_other = proxy ? SIN_PROXY : 0;
1154 
1155 	/*
1156 	 * If the destination is a link-local address, don't
1157 	 * constrain the lookup (don't scope it).
1158 	 */
1159 	if (IN_LINKLOCAL(ntohl(addr->s_addr))) {
1160 		ifscope = IFSCOPE_NONE;
1161 	}
1162 
1163 	rt = rtalloc1_scoped((struct sockaddr *)&sin, create, 0, ifscope);
1164 	if (rt == NULL) {
1165 		return ENETUNREACH;
1166 	}
1167 
1168 	RT_LOCK(rt);
1169 
1170 	if (rt->rt_flags & RTF_GATEWAY) {
1171 		why = "host is not on local network";
1172 		error = ENETUNREACH;
1173 	} else if (!(rt->rt_flags & RTF_LLINFO)) {
1174 		why = "could not allocate llinfo";
1175 		error = ENOMEM;
1176 	} else if (rt->rt_gateway->sa_family != AF_LINK) {
1177 		why = "gateway route is not ours";
1178 		error = EPROTONOSUPPORT;
1179 	}
1180 
1181 	if (error != 0) {
1182 		if (create && (arp_verbose || log_arp_warnings)) {
1183 			char tmp[MAX_IPv4_STR_LEN];
1184 			log(LOG_DEBUG, "%s: link#%d %s failed: %s\n",
1185 			    __func__, ifscope, inet_ntop(AF_INET, addr, tmp,
1186 			    sizeof(tmp)), why);
1187 		}
1188 
1189 		/*
1190 		 * If there are no references to this route, and it is
1191 		 * a cloned route, and not static, and ARP had created
1192 		 * the route, then purge it from the routing table as
1193 		 * it is probably bogus.
1194 		 */
1195 		if (rt->rt_refcnt == 1 &&
1196 		    (rt->rt_flags & (RTF_WASCLONED | RTF_STATIC)) ==
1197 		    RTF_WASCLONED) {
1198 			/*
1199 			 * Prevent another thread from modiying rt_key,
1200 			 * rt_gateway via rt_setgate() after rt_lock is
1201 			 * dropped by marking the route as defunct.
1202 			 */
1203 			rt->rt_flags |= RTF_CONDEMNED;
1204 			RT_UNLOCK(rt);
1205 			rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1206 			    rt_mask(rt), rt->rt_flags, NULL);
1207 			rtfree(rt);
1208 		} else {
1209 			RT_REMREF_LOCKED(rt);
1210 			RT_UNLOCK(rt);
1211 		}
1212 		return error;
1213 	}
1214 
1215 	/*
1216 	 * Caller releases reference and does RT_UNLOCK(rt).
1217 	 */
1218 	*route = rt;
1219 	return 0;
1220 }
1221 
1222 boolean_t
arp_is_entry_probing(route_t p_route)1223 arp_is_entry_probing(route_t p_route)
1224 {
1225 	struct llinfo_arp *llinfo = p_route->rt_llinfo;
1226 
1227 	if (llinfo != NULL &&
1228 	    llinfo->la_llreach != NULL &&
1229 	    llinfo->la_llreach->lr_probes != 0) {
1230 		return TRUE;
1231 	}
1232 
1233 	return FALSE;
1234 }
1235 
1236 __attribute__((noinline))
1237 static void
post_kev_in_arpfailure(struct ifnet * ifp)1238 post_kev_in_arpfailure(struct ifnet *ifp)
1239 {
1240 	struct kev_msg ev_msg = {};
1241 	struct kev_in_arpfailure in_arpfailure = {};
1242 
1243 	in_arpfailure.link_data.if_family = ifp->if_family;
1244 	in_arpfailure.link_data.if_unit = ifp->if_unit;
1245 	strlcpy(in_arpfailure.link_data.if_name, ifp->if_name, IFNAMSIZ);
1246 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1247 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1248 	ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1249 	ev_msg.event_code = KEV_INET_ARPRTRFAILURE;
1250 	ev_msg.dv[0].data_ptr = &in_arpfailure;
1251 	ev_msg.dv[0].data_length = sizeof(struct kev_in_arpfailure);
1252 	dlil_post_complete_msg(NULL, &ev_msg);
1253 }
1254 
1255 __attribute__((noinline))
1256 static void
arp_send_probe_notification(route_t route)1257 arp_send_probe_notification(route_t route)
1258 {
1259 	route_event_enqueue_nwk_wq_entry(route, NULL,
1260 	    ROUTE_LLENTRY_PROBED, NULL, TRUE);
1261 
1262 	if (route->rt_flags & RTF_ROUTER) {
1263 		struct radix_node_head  *rnh = NULL;
1264 		struct route_event rt_ev;
1265 		route_event_init(&rt_ev, route, NULL, ROUTE_LLENTRY_PROBED);
1266 		/*
1267 		 * We already have a reference on rt. The function
1268 		 * frees it before returning.
1269 		 */
1270 		RT_UNLOCK(route);
1271 		lck_mtx_lock(rnh_lock);
1272 		rnh = rt_tables[AF_INET];
1273 
1274 		if (rnh != NULL) {
1275 			(void) rnh->rnh_walktree(rnh,
1276 			    route_event_walktree, (void *)&rt_ev);
1277 		}
1278 		lck_mtx_unlock(rnh_lock);
1279 		RT_LOCK(route);
1280 	}
1281 }
1282 
1283 /*
1284  * This is the ARP pre-output routine; care must be taken to ensure that
1285  * the "hint" route never gets freed via rtfree(), since the caller may
1286  * have stored it inside a struct route with a reference held for that
1287  * placeholder.
1288  */
1289 errno_t
arp_lookup_ip(ifnet_t ifp,const struct sockaddr_in * net_dest,struct sockaddr_dl * ll_dest,size_t ll_dest_len,route_t hint,mbuf_t packet)1290 arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest,
1291     struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint,
1292     mbuf_t packet)
1293 {
1294 	route_t route = NULL;   /* output route */
1295 	errno_t result = 0;
1296 	struct sockaddr_dl *gateway;
1297 	struct llinfo_arp *llinfo = NULL;
1298 	boolean_t usable, probing = FALSE;
1299 	uint64_t timenow;
1300 	struct if_llreach *lr;
1301 	struct ifaddr *rt_ifa;
1302 	struct sockaddr *sa;
1303 	uint32_t rtflags;
1304 	struct sockaddr_dl sdl = {};
1305 	boolean_t send_probe_notif = FALSE;
1306 	boolean_t enqueued = FALSE;
1307 
1308 	if (ifp == NULL || net_dest == NULL) {
1309 		return EINVAL;
1310 	}
1311 
1312 	if (net_dest->sin_family != AF_INET) {
1313 		return EAFNOSUPPORT;
1314 	}
1315 
1316 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) {
1317 		return ENETDOWN;
1318 	}
1319 
1320 	/*
1321 	 * If we were given a route, verify the route and grab the gateway
1322 	 */
1323 	if (hint != NULL) {
1324 		/*
1325 		 * Callee holds a reference on the route and returns
1326 		 * with the route entry locked, upon success.
1327 		 */
1328 		result = route_to_gwroute((const struct sockaddr *)
1329 		    net_dest, hint, &route);
1330 		if (result != 0) {
1331 			return result;
1332 		}
1333 		if (route != NULL) {
1334 			RT_LOCK_ASSERT_HELD(route);
1335 		}
1336 	}
1337 
1338 	if ((packet != NULL && (packet->m_flags & M_BCAST)) ||
1339 	    in_broadcast(net_dest->sin_addr, ifp)) {
1340 		size_t broadcast_len;
1341 		bzero(ll_dest, ll_dest_len);
1342 		result = ifnet_llbroadcast_copy_bytes(ifp, LLADDR(ll_dest),
1343 		    ll_dest_len - offsetof(struct sockaddr_dl, sdl_data),
1344 		    &broadcast_len);
1345 		if (result == 0 && broadcast_len <= UINT8_MAX) {
1346 			ll_dest->sdl_alen = (u_char)broadcast_len;
1347 			ll_dest->sdl_family = AF_LINK;
1348 			ll_dest->sdl_len = sizeof(struct sockaddr_dl);
1349 		}
1350 		goto release;
1351 	}
1352 	if ((packet != NULL && (packet->m_flags & M_MCAST)) ||
1353 	    ((ifp->if_flags & IFF_MULTICAST) &&
1354 	    IN_MULTICAST(ntohl(net_dest->sin_addr.s_addr)))) {
1355 		if (route != NULL) {
1356 			RT_UNLOCK(route);
1357 		}
1358 		result = dlil_resolve_multi(ifp,
1359 		    (const struct sockaddr *)net_dest,
1360 		    (struct sockaddr *)ll_dest, ll_dest_len);
1361 		if (route != NULL) {
1362 			RT_LOCK(route);
1363 		}
1364 		goto release;
1365 	}
1366 
1367 	/*
1368 	 * If we didn't find a route, or the route doesn't have
1369 	 * link layer information, trigger the creation of the
1370 	 * route and link layer information.
1371 	 */
1372 	if (route == NULL || route->rt_llinfo == NULL) {
1373 		/* Clean up now while we can */
1374 		if (route != NULL) {
1375 			if (route == hint) {
1376 				RT_REMREF_LOCKED(route);
1377 				RT_UNLOCK(route);
1378 			} else {
1379 				RT_UNLOCK(route);
1380 				rtfree(route);
1381 			}
1382 		}
1383 		/*
1384 		 * Callee holds a reference on the route and returns
1385 		 * with the route entry locked, upon success.
1386 		 */
1387 		result = arp_lookup_route(&net_dest->sin_addr, 1, 0, &route,
1388 		    ifp->if_index);
1389 		if (result == 0) {
1390 			RT_LOCK_ASSERT_HELD(route);
1391 		}
1392 	}
1393 
1394 	if (result || route == NULL || (llinfo = route->rt_llinfo) == NULL) {
1395 		/* In case result is 0 but no route, return an error */
1396 		if (result == 0) {
1397 			result = EHOSTUNREACH;
1398 		}
1399 
1400 		if (route != NULL && route->rt_llinfo == NULL) {
1401 			char tmp[MAX_IPv4_STR_LEN];
1402 			log(LOG_ERR, "%s: can't allocate llinfo for %s\n",
1403 			    __func__, inet_ntop(AF_INET, &net_dest->sin_addr,
1404 			    tmp, sizeof(tmp)));
1405 		}
1406 		goto release;
1407 	}
1408 
1409 	/*
1410 	 * Now that we have the right route, is it filled in?
1411 	 */
1412 	gateway = SDL(route->rt_gateway);
1413 	timenow = net_uptime();
1414 	VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1415 	VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1416 
1417 	usable = ((route->rt_expire == 0 || route->rt_expire > timenow) &&
1418 	    gateway != NULL && gateway->sdl_family == AF_LINK &&
1419 	    gateway->sdl_alen != 0);
1420 
1421 	if (usable) {
1422 		boolean_t unreachable = !arp_llreach_reachable(llinfo);
1423 
1424 		/* Entry is usable, so fill in info for caller */
1425 		bcopy(gateway, ll_dest, MIN(gateway->sdl_len, ll_dest_len));
1426 		result = 0;
1427 		arp_llreach_use(llinfo);        /* Mark use timestamp */
1428 
1429 		lr = llinfo->la_llreach;
1430 		if (lr == NULL) {
1431 			goto release;
1432 		}
1433 		rt_ifa = route->rt_ifa;
1434 
1435 		/* Become a regular mutex, just in case */
1436 		RT_CONVERT_LOCK(route);
1437 		IFLR_LOCK_SPIN(lr);
1438 
1439 		if ((unreachable || (llinfo->la_flags & LLINFO_PROBING)) &&
1440 		    lr->lr_probes < arp_unicast_lim) {
1441 			/*
1442 			 * Thus mark the entry with la_probeexp deadline to
1443 			 * trigger the probe timer to be scheduled (if not
1444 			 * already).  This gets cleared the moment we get
1445 			 * an ARP reply.
1446 			 */
1447 			probing = TRUE;
1448 			if (lr->lr_probes == 0) {
1449 				llinfo->la_probeexp = (timenow + arpt_probe);
1450 				llinfo->la_flags |= LLINFO_PROBING;
1451 				/*
1452 				 * Provide notification that ARP unicast
1453 				 * probing has started.
1454 				 * We only do it for the first unicast probe
1455 				 * attempt.
1456 				 */
1457 				send_probe_notif = TRUE;
1458 			}
1459 
1460 			/*
1461 			 * Start the unicast probe and anticipate a reply;
1462 			 * afterwards, return existing entry to caller and
1463 			 * let it be used anyway.  If peer is non-existent
1464 			 * we'll broadcast ARP next time around.
1465 			 */
1466 			lr->lr_probes++;
1467 			bzero(&sdl, sizeof(sdl));
1468 			sdl.sdl_alen = ifp->if_addrlen;
1469 			bcopy(&lr->lr_key.addr, LLADDR(&sdl),
1470 			    ifp->if_addrlen);
1471 			IFLR_UNLOCK(lr);
1472 			IFA_LOCK_SPIN(rt_ifa);
1473 			IFA_ADDREF_LOCKED(rt_ifa);
1474 			sa = rt_ifa->ifa_addr;
1475 			IFA_UNLOCK(rt_ifa);
1476 			rtflags = route->rt_flags;
1477 			RT_UNLOCK(route);
1478 			dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1479 			    (const struct sockaddr_dl *)&sdl,
1480 			    (const struct sockaddr *)net_dest, rtflags);
1481 			IFA_REMREF(rt_ifa);
1482 			RT_LOCK(route);
1483 			goto release;
1484 		} else {
1485 			IFLR_UNLOCK(lr);
1486 			if (!unreachable &&
1487 			    !(llinfo->la_flags & LLINFO_PROBING)) {
1488 				/*
1489 				 * Normal case where peer is still reachable,
1490 				 * we're not probing and if_addrlen is anything
1491 				 * but IF_LLREACH_MAXLEN.
1492 				 */
1493 				goto release;
1494 			}
1495 		}
1496 	}
1497 
1498 	if (ifp->if_flags & IFF_NOARP) {
1499 		result = ENOTSUP;
1500 		goto release;
1501 	}
1502 
1503 	/*
1504 	 * Route wasn't complete/valid; we need to send out ARP request.
1505 	 * If we've exceeded the limit of la_holdq, drop from the head
1506 	 * of queue and add this packet to the tail.  If we end up with
1507 	 * RTF_REJECT below, we'll dequeue this from tail and have the
1508 	 * caller free the packet instead.  It's safe to do that since
1509 	 * we still hold the route's rt_lock.
1510 	 */
1511 	if (packet != NULL) {
1512 		enqueued = arp_llinfo_addq(llinfo, packet);
1513 	} else {
1514 		llinfo->la_prbreq_cnt++;
1515 	}
1516 	/*
1517 	 * Regardless of permanent vs. expirable entry, we need to
1518 	 * avoid having packets sit in la_holdq forever; thus mark the
1519 	 * entry with la_probeexp deadline to trigger the probe timer
1520 	 * to be scheduled (if not already).  This gets cleared the
1521 	 * moment we get an ARP reply.
1522 	 */
1523 	probing = TRUE;
1524 	if ((qlen(&llinfo->la_holdq) + llinfo->la_prbreq_cnt) == 1) {
1525 		llinfo->la_probeexp = (timenow + arpt_probe);
1526 		llinfo->la_flags |= LLINFO_PROBING;
1527 	}
1528 
1529 	if (route->rt_expire) {
1530 		route->rt_flags &= ~RTF_REJECT;
1531 		if (llinfo->la_asked == 0 || route->rt_expire != timenow) {
1532 			rt_setexpire(route, timenow);
1533 			if (llinfo->la_asked++ < llinfo->la_maxtries) {
1534 				boolean_t sendkev = FALSE;
1535 
1536 				rt_ifa = route->rt_ifa;
1537 				lr = llinfo->la_llreach;
1538 				/* Become a regular mutex, just in case */
1539 				RT_CONVERT_LOCK(route);
1540 				/* Update probe count, if applicable */
1541 				if (lr != NULL) {
1542 					IFLR_LOCK_SPIN(lr);
1543 					lr->lr_probes++;
1544 					IFLR_UNLOCK(lr);
1545 				}
1546 				if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
1547 				    route->rt_flags & RTF_ROUTER &&
1548 				    llinfo->la_asked > 1) {
1549 					sendkev = TRUE;
1550 					llinfo->la_flags |= LLINFO_RTRFAIL_EVTSENT;
1551 				}
1552 				IFA_LOCK_SPIN(rt_ifa);
1553 				IFA_ADDREF_LOCKED(rt_ifa);
1554 				sa = rt_ifa->ifa_addr;
1555 				IFA_UNLOCK(rt_ifa);
1556 				arp_llreach_use(llinfo); /* Mark use tstamp */
1557 				rtflags = route->rt_flags;
1558 				RT_UNLOCK(route);
1559 				dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1560 				    NULL, (const struct sockaddr *)net_dest,
1561 				    rtflags);
1562 				IFA_REMREF(rt_ifa);
1563 				if (sendkev) {
1564 					post_kev_in_arpfailure(ifp);
1565 				}
1566 				result = EJUSTRETURN;
1567 				RT_LOCK(route);
1568 				goto release;
1569 			} else {
1570 				route->rt_flags |= RTF_REJECT;
1571 				rt_setexpire(route,
1572 				    route->rt_expire + arpt_down);
1573 				llinfo->la_asked = 0;
1574 				/*
1575 				 * Remove the packet that was just added above;
1576 				 * don't free it since we're not returning
1577 				 * EJUSTRETURN.  The caller will handle the
1578 				 * freeing.  Since we haven't dropped rt_lock
1579 				 * from the time of _addq() above, this packet
1580 				 * must be at the tail.
1581 				 */
1582 				if (packet != NULL && enqueued) {
1583 					classq_pkt_t pkt =
1584 					    CLASSQ_PKT_INITIALIZER(pkt);
1585 
1586 					_getq_tail(&llinfo->la_holdq, &pkt);
1587 					atomic_add_32(&arpstat.held, -1);
1588 					VERIFY(pkt.cp_mbuf == packet);
1589 				}
1590 				result = EHOSTUNREACH;
1591 				/*
1592 				 * Enqueue work item to invoke callback for this route entry
1593 				 */
1594 				route_event_enqueue_nwk_wq_entry(route, NULL,
1595 				    ROUTE_LLENTRY_UNREACH, NULL, TRUE);
1596 				goto release;
1597 			}
1598 		}
1599 	}
1600 
1601 	/* The packet is now held inside la_holdq or dropped */
1602 	result = EJUSTRETURN;
1603 	if (packet != NULL && !enqueued) {
1604 		mbuf_free(packet);
1605 		packet = NULL;
1606 	}
1607 
1608 release:
1609 	if (result == EHOSTUNREACH) {
1610 		atomic_add_32(&arpstat.dropped, 1);
1611 	}
1612 
1613 	if (route != NULL) {
1614 		if (send_probe_notif) {
1615 			arp_send_probe_notification(route);
1616 		}
1617 
1618 		if (route == hint) {
1619 			RT_REMREF_LOCKED(route);
1620 			RT_UNLOCK(route);
1621 		} else {
1622 			RT_UNLOCK(route);
1623 			rtfree(route);
1624 		}
1625 	}
1626 	if (probing) {
1627 		/* Do this after we drop rt_lock to preserve ordering */
1628 		lck_mtx_lock(rnh_lock);
1629 		arp_sched_probe(NULL);
1630 		lck_mtx_unlock(rnh_lock);
1631 	}
1632 	return result;
1633 }
1634 
1635 errno_t
arp_ip_handle_input(ifnet_t ifp,u_short arpop,const struct sockaddr_dl * sender_hw,const struct sockaddr_in * sender_ip,const struct sockaddr_in * target_ip)1636 arp_ip_handle_input(ifnet_t ifp, u_short arpop,
1637     const struct sockaddr_dl *sender_hw, const struct sockaddr_in *sender_ip,
1638     const struct sockaddr_in *target_ip)
1639 {
1640 	char ipv4str[MAX_IPv4_STR_LEN];
1641 	struct sockaddr_dl proxied = {};
1642 	struct sockaddr_dl *gateway, *target_hw = NULL;
1643 	struct ifaddr *ifa;
1644 	struct in_ifaddr *ia;
1645 	struct in_ifaddr *best_ia = NULL;
1646 	struct sockaddr_in best_ia_sin;
1647 	route_t route = NULL;
1648 	char buf[3 * MAX_HW_LEN]; /* enough for MAX_HW_LEN byte hw address */
1649 	struct llinfo_arp *llinfo;
1650 	errno_t error;
1651 	int created_announcement = 0;
1652 	int bridged = 0, is_bridge = 0;
1653 	uint32_t rt_evcode = 0;
1654 
1655 	/*
1656 	 * Here and other places within this routine where we don't hold
1657 	 * rnh_lock, trade accuracy for speed for the common scenarios
1658 	 * and avoid the use of atomic updates.
1659 	 */
1660 	arpstat.received++;
1661 
1662 	/* Do not respond to requests for 0.0.0.0 */
1663 	if (target_ip->sin_addr.s_addr == INADDR_ANY && arpop == ARPOP_REQUEST) {
1664 		goto done;
1665 	}
1666 
1667 	if (ifp->if_bridge) {
1668 		bridged = 1;
1669 	}
1670 	if (ifp->if_type == IFT_BRIDGE) {
1671 		is_bridge = 1;
1672 	}
1673 
1674 	if (arpop == ARPOP_REPLY) {
1675 		arpstat.rxreplies++;
1676 	}
1677 
1678 	/*
1679 	 * Determine if this ARP is for us
1680 	 */
1681 	lck_rw_lock_shared(&in_ifaddr_rwlock);
1682 	TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr), ia_hash) {
1683 		IFA_LOCK_SPIN(&ia->ia_ifa);
1684 		if (ia->ia_ifp == ifp &&
1685 		    ia->ia_addr.sin_addr.s_addr == target_ip->sin_addr.s_addr) {
1686 			best_ia = ia;
1687 			best_ia_sin = best_ia->ia_addr;
1688 			IFA_ADDREF_LOCKED(&ia->ia_ifa);
1689 			IFA_UNLOCK(&ia->ia_ifa);
1690 			lck_rw_done(&in_ifaddr_rwlock);
1691 			goto match;
1692 		}
1693 		IFA_UNLOCK(&ia->ia_ifa);
1694 	}
1695 
1696 	TAILQ_FOREACH(ia, INADDR_HASH(sender_ip->sin_addr.s_addr), ia_hash) {
1697 		IFA_LOCK_SPIN(&ia->ia_ifa);
1698 		if (ia->ia_ifp == ifp &&
1699 		    ia->ia_addr.sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1700 			best_ia = ia;
1701 			best_ia_sin = best_ia->ia_addr;
1702 			IFA_ADDREF_LOCKED(&ia->ia_ifa);
1703 			IFA_UNLOCK(&ia->ia_ifa);
1704 			lck_rw_done(&in_ifaddr_rwlock);
1705 			goto match;
1706 		}
1707 		IFA_UNLOCK(&ia->ia_ifa);
1708 	}
1709 
1710 #define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia)                                \
1711 	(ia->ia_ifp->if_bridge == ifp->if_softc &&                           \
1712 	bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) == 0 && \
1713 	addr == ia->ia_addr.sin_addr.s_addr)
1714 	/*
1715 	 * Check the case when bridge shares its MAC address with
1716 	 * some of its children, so packets are claimed by bridge
1717 	 * itself (bridge_input() does it first), but they are really
1718 	 * meant to be destined to the bridge member.
1719 	 */
1720 	if (is_bridge) {
1721 		TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr),
1722 		    ia_hash) {
1723 			IFA_LOCK_SPIN(&ia->ia_ifa);
1724 			if (BDG_MEMBER_MATCHES_ARP(target_ip->sin_addr.s_addr,
1725 			    ifp, ia)) {
1726 				ifp = ia->ia_ifp;
1727 				best_ia = ia;
1728 				best_ia_sin = best_ia->ia_addr;
1729 				IFA_ADDREF_LOCKED(&ia->ia_ifa);
1730 				IFA_UNLOCK(&ia->ia_ifa);
1731 				lck_rw_done(&in_ifaddr_rwlock);
1732 				goto match;
1733 			}
1734 			IFA_UNLOCK(&ia->ia_ifa);
1735 		}
1736 	}
1737 #undef BDG_MEMBER_MATCHES_ARP
1738 	lck_rw_done(&in_ifaddr_rwlock);
1739 
1740 	/*
1741 	 * No match, use the first inet address on the receive interface
1742 	 * as a dummy address for the rest of the function; we may be
1743 	 * proxying for another address.
1744 	 */
1745 	ifnet_lock_shared(ifp);
1746 	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1747 		IFA_LOCK_SPIN(ifa);
1748 		if (ifa->ifa_addr->sa_family != AF_INET) {
1749 			IFA_UNLOCK(ifa);
1750 			continue;
1751 		}
1752 		best_ia = (struct in_ifaddr *)ifa;
1753 		best_ia_sin = best_ia->ia_addr;
1754 		IFA_ADDREF_LOCKED(ifa);
1755 		IFA_UNLOCK(ifa);
1756 		ifnet_lock_done(ifp);
1757 		goto match;
1758 	}
1759 	ifnet_lock_done(ifp);
1760 
1761 	/*
1762 	 * If we're not a bridge member, or if we are but there's no
1763 	 * IPv4 address to use for the interface, drop the packet.
1764 	 */
1765 	if (!bridged || best_ia == NULL) {
1766 		goto done;
1767 	}
1768 
1769 match:
1770 	/* If the packet is from this interface, ignore the packet */
1771 	if (bcmp(CONST_LLADDR(sender_hw), IF_LLADDR(ifp),
1772 	    sender_hw->sdl_alen) == 0) {
1773 		goto done;
1774 	}
1775 
1776 	/* Check for a conflict */
1777 	if (!bridged &&
1778 	    sender_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr) {
1779 		struct kev_msg ev_msg;
1780 		struct kev_in_collision *in_collision;
1781 		u_char storage[sizeof(struct kev_in_collision) + MAX_HW_LEN];
1782 
1783 		bzero(&ev_msg, sizeof(struct kev_msg));
1784 		bzero(storage, (sizeof(struct kev_in_collision) + MAX_HW_LEN));
1785 		in_collision = (struct kev_in_collision *)(void *)storage;
1786 		log(LOG_ERR, "%s duplicate IP address %s sent from "
1787 		    "address %s\n", if_name(ifp),
1788 		    inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
1789 		    sizeof(ipv4str)), sdl_addr_to_hex(sender_hw, buf,
1790 		    sizeof(buf)));
1791 
1792 		/* Send a kernel event so anyone can learn of the conflict */
1793 		in_collision->link_data.if_family = ifp->if_family;
1794 		in_collision->link_data.if_unit = ifp->if_unit;
1795 		strlcpy(&in_collision->link_data.if_name[0],
1796 		    ifp->if_name, IFNAMSIZ);
1797 		in_collision->ia_ipaddr = sender_ip->sin_addr;
1798 		in_collision->hw_len = (sender_hw->sdl_alen < MAX_HW_LEN) ?
1799 		    sender_hw->sdl_alen : MAX_HW_LEN;
1800 		bcopy(CONST_LLADDR(sender_hw), (caddr_t)in_collision->hw_addr,
1801 		    in_collision->hw_len);
1802 		ev_msg.vendor_code = KEV_VENDOR_APPLE;
1803 		ev_msg.kev_class = KEV_NETWORK_CLASS;
1804 		ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1805 		ev_msg.event_code = KEV_INET_ARPCOLLISION;
1806 		ev_msg.dv[0].data_ptr = in_collision;
1807 		ev_msg.dv[0].data_length =
1808 		    sizeof(struct kev_in_collision) + in_collision->hw_len;
1809 		ev_msg.dv[1].data_length = 0;
1810 		dlil_post_complete_msg(NULL, &ev_msg);
1811 		atomic_add_32(&arpstat.dupips, 1);
1812 		goto respond;
1813 	}
1814 
1815 	/*
1816 	 * Look up the routing entry. If it doesn't exist and we are the
1817 	 * target, and the sender isn't 0.0.0.0, go ahead and create one.
1818 	 * Callee holds a reference on the route and returns with the route
1819 	 * entry locked, upon success.
1820 	 */
1821 	error = arp_lookup_route(&sender_ip->sin_addr,
1822 	    (target_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr &&
1823 	    sender_ip->sin_addr.s_addr != 0), 0, &route, ifp->if_index);
1824 
1825 	if (error == 0) {
1826 		RT_LOCK_ASSERT_HELD(route);
1827 	}
1828 
1829 	if (error || route == NULL || route->rt_gateway == NULL) {
1830 		if (arpop != ARPOP_REQUEST) {
1831 			goto respond;
1832 		}
1833 
1834 		if (arp_sendllconflict && send_conflicting_probes != 0 &&
1835 		    (ifp->if_eflags & IFEF_ARPLL) &&
1836 		    IN_LINKLOCAL(ntohl(target_ip->sin_addr.s_addr)) &&
1837 		    sender_ip->sin_addr.s_addr == INADDR_ANY) {
1838 			/*
1839 			 * Verify this ARP probe doesn't conflict with
1840 			 * an IPv4LL we know of on another interface.
1841 			 */
1842 			if (route != NULL) {
1843 				RT_REMREF_LOCKED(route);
1844 				RT_UNLOCK(route);
1845 				route = NULL;
1846 			}
1847 			/*
1848 			 * Callee holds a reference on the route and returns
1849 			 * with the route entry locked, upon success.
1850 			 */
1851 			error = arp_lookup_route(&target_ip->sin_addr, 0, 0,
1852 			    &route, ifp->if_index);
1853 
1854 			if (error != 0 || route == NULL ||
1855 			    route->rt_gateway == NULL) {
1856 				goto respond;
1857 			}
1858 
1859 			RT_LOCK_ASSERT_HELD(route);
1860 
1861 			gateway = SDL(route->rt_gateway);
1862 			if (route->rt_ifp != ifp && gateway->sdl_alen != 0 &&
1863 			    (gateway->sdl_alen != sender_hw->sdl_alen ||
1864 			    bcmp(CONST_LLADDR(gateway), CONST_LLADDR(sender_hw),
1865 			    gateway->sdl_alen) != 0)) {
1866 				/*
1867 				 * A node is probing for an IPv4LL we know
1868 				 * exists on a different interface. We respond
1869 				 * with a conflicting probe to force the new
1870 				 * device to pick a different IPv4LL address.
1871 				 */
1872 				if (arp_verbose || log_arp_warnings) {
1873 					log(LOG_INFO, "arp: %s on %s sent "
1874 					    "probe for %s, already on %s\n",
1875 					    sdl_addr_to_hex(sender_hw, buf,
1876 					    sizeof(buf)), if_name(ifp),
1877 					    inet_ntop(AF_INET,
1878 					    &target_ip->sin_addr, ipv4str,
1879 					    sizeof(ipv4str)),
1880 					    if_name(route->rt_ifp));
1881 					log(LOG_INFO, "arp: sending "
1882 					    "conflicting probe to %s on %s\n",
1883 					    sdl_addr_to_hex(sender_hw, buf,
1884 					    sizeof(buf)), if_name(ifp));
1885 				}
1886 				/* Mark use timestamp */
1887 				if (route->rt_llinfo != NULL) {
1888 					arp_llreach_use(route->rt_llinfo);
1889 				}
1890 				/* We're done with the route */
1891 				RT_REMREF_LOCKED(route);
1892 				RT_UNLOCK(route);
1893 				route = NULL;
1894 				/*
1895 				 * Send a conservative unicast "ARP probe".
1896 				 * This should force the other device to pick
1897 				 * a new number.  This will not force the
1898 				 * device to pick a new number if the device
1899 				 * has already assigned that number.  This will
1900 				 * not imply to the device that we own that
1901 				 * address.  The link address is always
1902 				 * present; it's never freed.
1903 				 */
1904 				ifnet_lock_shared(ifp);
1905 				ifa = ifp->if_lladdr;
1906 				IFA_ADDREF(ifa);
1907 				ifnet_lock_done(ifp);
1908 				dlil_send_arp_internal(ifp, ARPOP_REQUEST,
1909 				    SDL(ifa->ifa_addr),
1910 				    (const struct sockaddr *)sender_ip,
1911 				    sender_hw,
1912 				    (const struct sockaddr *)target_ip);
1913 				IFA_REMREF(ifa);
1914 				ifa = NULL;
1915 				atomic_add_32(&arpstat.txconflicts, 1);
1916 			}
1917 			goto respond;
1918 		} else if (keep_announcements != 0 &&
1919 		    target_ip->sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1920 			/*
1921 			 * Don't create entry if link-local address and
1922 			 * link-local is disabled
1923 			 */
1924 			if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1925 			    (ifp->if_eflags & IFEF_ARPLL)) {
1926 				if (route != NULL) {
1927 					RT_REMREF_LOCKED(route);
1928 					RT_UNLOCK(route);
1929 					route = NULL;
1930 				}
1931 				/*
1932 				 * Callee holds a reference on the route and
1933 				 * returns with the route entry locked, upon
1934 				 * success.
1935 				 */
1936 				error = arp_lookup_route(&sender_ip->sin_addr,
1937 				    1, 0, &route, ifp->if_index);
1938 
1939 				if (error == 0) {
1940 					RT_LOCK_ASSERT_HELD(route);
1941 				}
1942 
1943 				if (error == 0 && route != NULL &&
1944 				    route->rt_gateway != NULL) {
1945 					created_announcement = 1;
1946 				}
1947 			}
1948 			if (created_announcement == 0) {
1949 				goto respond;
1950 			}
1951 		} else {
1952 			goto respond;
1953 		}
1954 	}
1955 
1956 	RT_LOCK_ASSERT_HELD(route);
1957 	VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1958 	VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1959 
1960 	gateway = SDL(route->rt_gateway);
1961 	if (!bridged && route->rt_ifp != ifp) {
1962 		if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1963 		    !(ifp->if_eflags & IFEF_ARPLL)) {
1964 			if (arp_verbose || log_arp_warnings) {
1965 				log(LOG_ERR, "arp: %s is on %s but got "
1966 				    "reply from %s on %s\n",
1967 				    inet_ntop(AF_INET, &sender_ip->sin_addr,
1968 				    ipv4str, sizeof(ipv4str)),
1969 				    if_name(route->rt_ifp),
1970 				    sdl_addr_to_hex(sender_hw, buf,
1971 				    sizeof(buf)), if_name(ifp));
1972 			}
1973 			goto respond;
1974 		} else {
1975 			/* Don't change a permanent address */
1976 			if (route->rt_expire == 0) {
1977 				goto respond;
1978 			}
1979 
1980 			/*
1981 			 * We're about to check and/or change the route's ifp
1982 			 * and ifa, so do the lock dance: drop rt_lock, hold
1983 			 * rnh_lock and re-hold rt_lock to avoid violating the
1984 			 * lock ordering.  We have an extra reference on the
1985 			 * route, so it won't go away while we do this.
1986 			 */
1987 			RT_UNLOCK(route);
1988 			lck_mtx_lock(rnh_lock);
1989 			RT_LOCK(route);
1990 			/*
1991 			 * Don't change the cloned route away from the
1992 			 * parent's interface if the address did resolve
1993 			 * or if the route is defunct.  rt_ifp on both
1994 			 * the parent and the clone can now be freely
1995 			 * accessed now that we have acquired rnh_lock.
1996 			 */
1997 			gateway = SDL(route->rt_gateway);
1998 			if ((gateway->sdl_alen != 0 &&
1999 			    route->rt_parent != NULL &&
2000 			    route->rt_parent->rt_ifp == route->rt_ifp) ||
2001 			    (route->rt_flags & RTF_CONDEMNED)) {
2002 				RT_REMREF_LOCKED(route);
2003 				RT_UNLOCK(route);
2004 				route = NULL;
2005 				lck_mtx_unlock(rnh_lock);
2006 				goto respond;
2007 			}
2008 			if (route->rt_ifp != ifp) {
2009 				/*
2010 				 * Purge any link-layer info caching.
2011 				 */
2012 				if (route->rt_llinfo_purge != NULL) {
2013 					route->rt_llinfo_purge(route);
2014 				}
2015 
2016 				/* Adjust route ref count for the interfaces */
2017 				if (route->rt_if_ref_fn != NULL) {
2018 					route->rt_if_ref_fn(ifp, 1);
2019 					route->rt_if_ref_fn(route->rt_ifp, -1);
2020 				}
2021 			}
2022 			/* Change the interface when the existing route is on */
2023 			route->rt_ifp = ifp;
2024 			/*
2025 			 * If rmx_mtu is not locked, update it
2026 			 * to the MTU used by the new interface.
2027 			 */
2028 			if (!(route->rt_rmx.rmx_locks & RTV_MTU)) {
2029 				route->rt_rmx.rmx_mtu = route->rt_ifp->if_mtu;
2030 				if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
2031 					route->rt_rmx.rmx_mtu = IN6_LINKMTU(route->rt_ifp);
2032 					/* Further adjust the size for CLAT46 expansion */
2033 					route->rt_rmx.rmx_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
2034 				}
2035 			}
2036 
2037 			rtsetifa(route, &best_ia->ia_ifa);
2038 			gateway->sdl_index = ifp->if_index;
2039 			RT_UNLOCK(route);
2040 			lck_mtx_unlock(rnh_lock);
2041 			RT_LOCK(route);
2042 			/* Don't bother if the route is down */
2043 			if (!(route->rt_flags & RTF_UP)) {
2044 				goto respond;
2045 			}
2046 			/* Refresh gateway pointer */
2047 			gateway = SDL(route->rt_gateway);
2048 		}
2049 		RT_LOCK_ASSERT_HELD(route);
2050 	}
2051 
2052 	if (gateway->sdl_alen != 0 && bcmp(LLADDR(gateway),
2053 	    CONST_LLADDR(sender_hw), gateway->sdl_alen) != 0) {
2054 		if (route->rt_expire != 0 &&
2055 		    (arp_verbose || log_arp_warnings)) {
2056 			char buf2[3 * MAX_HW_LEN];
2057 			log(LOG_INFO, "arp: %s moved from %s to %s on %s\n",
2058 			    inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
2059 			    sizeof(ipv4str)),
2060 			    sdl_addr_to_hex(gateway, buf, sizeof(buf)),
2061 			    sdl_addr_to_hex(sender_hw, buf2, sizeof(buf2)),
2062 			    if_name(ifp));
2063 		} else if (route->rt_expire == 0) {
2064 			if (arp_verbose || log_arp_warnings) {
2065 				log(LOG_ERR, "arp: %s attempts to modify "
2066 				    "permanent entry for %s on %s\n",
2067 				    sdl_addr_to_hex(sender_hw, buf,
2068 				    sizeof(buf)),
2069 				    inet_ntop(AF_INET, &sender_ip->sin_addr,
2070 				    ipv4str, sizeof(ipv4str)),
2071 				    if_name(ifp));
2072 			}
2073 			goto respond;
2074 		}
2075 	}
2076 
2077 	/* Copy the sender hardware address in to the route's gateway address */
2078 	gateway->sdl_alen = sender_hw->sdl_alen;
2079 	bcopy(CONST_LLADDR(sender_hw), LLADDR(gateway), gateway->sdl_alen);
2080 
2081 	/* Update the expire time for the route and clear the reject flag */
2082 	if (route->rt_expire != 0) {
2083 		rt_setexpire(route, net_uptime() + arpt_keep);
2084 	}
2085 	route->rt_flags &= ~RTF_REJECT;
2086 
2087 	/* cache the gateway (sender HW) address */
2088 	arp_llreach_alloc(route, ifp, LLADDR(gateway), gateway->sdl_alen,
2089 	    (arpop == ARPOP_REPLY), &rt_evcode);
2090 
2091 	llinfo = route->rt_llinfo;
2092 	/* send a notification that the route is back up */
2093 	if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
2094 	    route->rt_flags & RTF_ROUTER &&
2095 	    llinfo->la_flags & LLINFO_RTRFAIL_EVTSENT) {
2096 		struct kev_msg ev_msg;
2097 		struct kev_in_arpalive in_arpalive;
2098 
2099 		llinfo->la_flags &= ~LLINFO_RTRFAIL_EVTSENT;
2100 		RT_UNLOCK(route);
2101 		bzero(&ev_msg, sizeof(ev_msg));
2102 		bzero(&in_arpalive, sizeof(in_arpalive));
2103 		in_arpalive.link_data.if_family = ifp->if_family;
2104 		in_arpalive.link_data.if_unit = ifp->if_unit;
2105 		strlcpy(in_arpalive.link_data.if_name, ifp->if_name, IFNAMSIZ);
2106 		ev_msg.vendor_code = KEV_VENDOR_APPLE;
2107 		ev_msg.kev_class = KEV_NETWORK_CLASS;
2108 		ev_msg.kev_subclass = KEV_INET_SUBCLASS;
2109 		ev_msg.event_code = KEV_INET_ARPRTRALIVE;
2110 		ev_msg.dv[0].data_ptr = &in_arpalive;
2111 		ev_msg.dv[0].data_length = sizeof(struct kev_in_arpalive);
2112 		dlil_post_complete_msg(NULL, &ev_msg);
2113 		RT_LOCK(route);
2114 	}
2115 	/* Update the llinfo, send out all queued packets at once */
2116 	llinfo->la_asked = 0;
2117 	llinfo->la_flags &= ~LLINFO_PROBING;
2118 	llinfo->la_prbreq_cnt = 0;
2119 
2120 	if (rt_evcode) {
2121 		/*
2122 		 * Enqueue work item to invoke callback for this route entry
2123 		 */
2124 		route_event_enqueue_nwk_wq_entry(route, NULL, rt_evcode, NULL, TRUE);
2125 
2126 		if (route->rt_flags & RTF_ROUTER) {
2127 			struct radix_node_head  *rnh = NULL;
2128 			struct route_event rt_ev;
2129 			route_event_init(&rt_ev, route, NULL, rt_evcode);
2130 			/*
2131 			 * We already have a reference on rt. The function
2132 			 * frees it before returning.
2133 			 */
2134 			RT_UNLOCK(route);
2135 			lck_mtx_lock(rnh_lock);
2136 			rnh = rt_tables[AF_INET];
2137 
2138 			if (rnh != NULL) {
2139 				(void) rnh->rnh_walktree(rnh, route_event_walktree,
2140 				    (void *)&rt_ev);
2141 			}
2142 			lck_mtx_unlock(rnh_lock);
2143 			RT_LOCK(route);
2144 		}
2145 	}
2146 
2147 	if (!qempty(&llinfo->la_holdq)) {
2148 		uint32_t held;
2149 		struct mbuf *m0;
2150 		classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
2151 
2152 		_getq_all(&llinfo->la_holdq, &pkt, NULL, &held, NULL);
2153 		m0 = pkt.cp_mbuf;
2154 		if (arp_verbose) {
2155 			log(LOG_DEBUG, "%s: sending %u held packets\n",
2156 			    __func__, held);
2157 		}
2158 		atomic_add_32(&arpstat.held, -held);
2159 		VERIFY(qempty(&llinfo->la_holdq));
2160 		RT_UNLOCK(route);
2161 		dlil_output(ifp, PF_INET, m0, (caddr_t)route,
2162 		    rt_key(route), 0, NULL);
2163 		RT_REMREF(route);
2164 		route = NULL;
2165 	}
2166 
2167 respond:
2168 	if (route != NULL) {
2169 		/* Mark use timestamp if we're going to send a reply */
2170 		if (arpop == ARPOP_REQUEST && route->rt_llinfo != NULL) {
2171 			arp_llreach_use(route->rt_llinfo);
2172 		}
2173 		RT_REMREF_LOCKED(route);
2174 		RT_UNLOCK(route);
2175 		route = NULL;
2176 	}
2177 
2178 	if (arpop != ARPOP_REQUEST) {
2179 		goto done;
2180 	}
2181 
2182 	/* See comments at the beginning of this routine */
2183 	arpstat.rxrequests++;
2184 
2185 	/* If we are not the target, check if we should proxy */
2186 	if (target_ip->sin_addr.s_addr != best_ia_sin.sin_addr.s_addr) {
2187 		/*
2188 		 * Find a proxy route; callee holds a reference on the
2189 		 * route and returns with the route entry locked, upon
2190 		 * success.
2191 		 */
2192 		error = arp_lookup_route(&target_ip->sin_addr, 0, SIN_PROXY,
2193 		    &route, ifp->if_index);
2194 
2195 		if (error == 0) {
2196 			RT_LOCK_ASSERT_HELD(route);
2197 			/*
2198 			 * Return proxied ARP replies only on the interface
2199 			 * or bridge cluster where this network resides.
2200 			 * Otherwise we may conflict with the host we are
2201 			 * proxying for.
2202 			 */
2203 			if (route->rt_ifp != ifp &&
2204 			    (route->rt_ifp->if_bridge != ifp->if_bridge ||
2205 			    ifp->if_bridge == NULL)) {
2206 				RT_REMREF_LOCKED(route);
2207 				RT_UNLOCK(route);
2208 				goto done;
2209 			}
2210 			proxied = *SDL(route->rt_gateway);
2211 			target_hw = &proxied;
2212 		} else {
2213 			/*
2214 			 * We don't have a route entry indicating we should
2215 			 * use proxy.  If we aren't supposed to proxy all,
2216 			 * we are done.
2217 			 */
2218 			if (!arp_proxyall) {
2219 				goto done;
2220 			}
2221 
2222 			/*
2223 			 * See if we have a route to the target ip before
2224 			 * we proxy it.
2225 			 */
2226 			route = rtalloc1_scoped((struct sockaddr *)
2227 			    (size_t)target_ip, 0, 0, ifp->if_index);
2228 			if (!route) {
2229 				goto done;
2230 			}
2231 
2232 			/*
2233 			 * Don't proxy for hosts already on the same interface.
2234 			 */
2235 			RT_LOCK(route);
2236 			if (route->rt_ifp == ifp) {
2237 				RT_UNLOCK(route);
2238 				rtfree(route);
2239 				goto done;
2240 			}
2241 		}
2242 		/* Mark use timestamp */
2243 		if (route->rt_llinfo != NULL) {
2244 			arp_llreach_use(route->rt_llinfo);
2245 		}
2246 		RT_REMREF_LOCKED(route);
2247 		RT_UNLOCK(route);
2248 	}
2249 
2250 	dlil_send_arp(ifp, ARPOP_REPLY,
2251 	    target_hw, (const struct sockaddr *)target_ip,
2252 	    sender_hw, (const struct sockaddr *)sender_ip, 0);
2253 
2254 done:
2255 	if (best_ia != NULL) {
2256 		IFA_REMREF(&best_ia->ia_ifa);
2257 	}
2258 	return 0;
2259 }
2260 
2261 void
arp_ifinit(struct ifnet * ifp,struct ifaddr * ifa)2262 arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
2263 {
2264 	struct sockaddr *sa;
2265 
2266 	IFA_LOCK(ifa);
2267 	ifa->ifa_rtrequest = arp_rtrequest;
2268 	ifa->ifa_flags |= RTF_CLONING;
2269 	sa = ifa->ifa_addr;
2270 	IFA_UNLOCK(ifa);
2271 	dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa, NULL, sa, 0);
2272 }
2273 
2274 static int
2275 arp_getstat SYSCTL_HANDLER_ARGS
2276 {
2277 #pragma unused(oidp, arg1, arg2)
2278 	if (req->oldptr == USER_ADDR_NULL) {
2279 		req->oldlen = (size_t)sizeof(struct arpstat);
2280 	}
2281 
2282 	return SYSCTL_OUT(req, &arpstat, MIN(sizeof(arpstat), req->oldlen));
2283 }
2284