xref: /xnu-8019.80.24/bsd/net/if_llreach.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2011-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * Link-layer Reachability Record
31  *
32  * Each interface maintains a red-black tree which contains records related
33  * to the on-link nodes which we are interested in communicating with.  Each
34  * record gets allocated and inserted into the tree in the following manner:
35  * upon processing an ARP announcement or reply from a known node (i.e. there
36  * exists a ARP route entry for the node), and if a link-layer reachability
37  * record for the node doesn't yet exist; and, upon processing a ND6 RS/RA/
38  * NS/NA/redirect from a node, and if a link-layer reachability record for the
39  * node doesn't yet exist.
40  *
41  * Each newly created record is then referred to by the resolver route entry;
42  * if a record already exists, its reference count gets increased for the new
43  * resolver entry which now refers to it.  A record gets removed from the tree
44  * and freed once its reference counts drops to zero, i.e. when there is no
45  * more resolver entry referring to it.
46  *
47  * A record contains the link-layer protocol (e.g. Ethertype IP/IPv6), the
48  * HW address of the sender, the "last heard from" timestamp (lr_lastrcvd) and
49  * the number of references made to it (lr_reqcnt).  Because the key for each
50  * record in the red-black tree consists of the link-layer protocol, therefore
51  * the namespace for the records is partitioned based on the type of link-layer
52  * protocol, i.e. an Ethertype IP link-layer record is only referred to by one
53  * or more ARP entries; an Ethernet IPv6 link-layer record is only referred to
54  * by one or more ND6 entries.  Therefore, lr_reqcnt represents the number of
55  * resolver entry references to the record for the same protocol family.
56  *
57  * Upon receiving packets from the network, the protocol's input callback
58  * (e.g. ether_inet{6}_input) informs the corresponding resolver (ARP/ND6)
59  * about the (link-layer) origin of the packet.  This results in searching
60  * for a matching record in the red-black tree for the interface where the
61  * packet arrived on.  If there's no match, no further processing takes place.
62  * Otherwise, the lr_lastrcvd timestamp of the record is updated.
63  *
64  * When an IP/IPv6 packet is transmitted to the resolver (i.e. the destination
65  * is on-link), ARP/ND6 records the "last spoken to" timestamp in the route
66  * entry ({la,ln}_lastused).
67  *
68  * The reachability of the on-link node is determined by the following logic,
69  * upon sending a packet thru the resolver:
70  *
71  *   a) If the record is only used by exactly one resolver entry (lr_reqcnt
72  *	is 1), i.e. the target host does not have IP/IPv6 aliases that we know
73  *	of, check if lr_lastrcvd is "recent."  If so, simply send the packet;
74  *	otherwise, re-resolve the target node.
75  *
76  *   b) If the record is shared by multiple resolver entries (lr_reqcnt is
77  *	greater than 1), i.e. the target host has more than one IP/IPv6 aliases
78  *	on the same network interface, we can't rely on lr_lastrcvd alone, as
79  *	one of the IP/IPv6 aliases could have been silently moved to another
80  *	node for which we don't have a link-layer record.  If lr_lastrcvd is
81  *	not "recent", we re-resolve the target node.  Otherwise, we perform
82  *	an additional check against {la,ln}_lastused to see whether it is also
83  *	"recent", relative to lr_lastrcvd.  If so, simply send the packet;
84  *	otherwise, re-resolve the target node.
85  *
86  * The value for "recent" is configurable by adjusting the basetime value for
87  * net.link.ether.inet.arp_llreach_base or net.inet6.icmp6.nd6_llreach_base.
88  * The default basetime value is 30 seconds, and the actual expiration time
89  * is calculated by multiplying the basetime value with some random factor,
90  * which results in a number between 15 to 45 seconds.  Setting the basetime
91  * value to 0 effectively disables this feature for the corresponding resolver.
92  *
93  * Assumptions:
94  *
95  * The above logic is based upon the following assumptions:
96  *
97  *   i) Network traffics are mostly bi-directional, i.e. the act of sending
98  *	packets to an on-link node would most likely cause us to receive
99  *	packets from that node.
100  *
101  *  ii) If the on-link node's IP/IPv6 address silently moves to another
102  *	on-link node for which we are not aware of, non-unicast packets
103  *	from the old node would trigger the record's lr_lastrcvd to be
104  *	kept recent.
105  *
106  * We can mitigate the above by having the resolver check its {la,ln}_lastused
107  * timestamp at all times, i.e. not only when lr_reqcnt is greater than 1; but
108  * we currently optimize for the common cases.
109  */
110 
111 #include <sys/param.h>
112 #include <sys/systm.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/tree.h>
116 #include <sys/sysctl.h>
117 #include <sys/mcache.h>
118 #include <sys/protosw.h>
119 
120 #include <dev/random/randomdev.h>
121 
122 #include <net/if_dl.h>
123 #include <net/if.h>
124 #include <net/if_var.h>
125 #include <net/if_llreach.h>
126 #include <net/dlil.h>
127 #include <net/kpi_interface.h>
128 #include <net/route.h>
129 
130 #include <kern/assert.h>
131 #include <kern/locks.h>
132 #include <kern/zalloc.h>
133 
134 #include <netinet6/in6_var.h>
135 #include <netinet6/nd6.h>
136 
137 static ZONE_DECLARE(iflr_zone, "if_llreach", sizeof(struct if_llreach),
138     ZC_ZFREE_CLEARMEM);
139 
140 static struct if_llreach *iflr_alloc(zalloc_flags_t);
141 static void iflr_free(struct if_llreach *);
142 static __inline int iflr_cmp(const struct if_llreach *,
143     const struct if_llreach *);
144 static __inline int iflr_reachable(struct if_llreach *, int, u_int64_t);
145 static int sysctl_llreach_ifinfo SYSCTL_HANDLER_ARGS;
146 
147 /* The following is protected by if_llreach_lock */
148 RB_GENERATE_PREV(ll_reach_tree, if_llreach, lr_link, iflr_cmp);
149 
150 SYSCTL_DECL(_net_link_generic_system);
151 
152 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, llreach_info,
153     CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_llreach_ifinfo,
154     "Per-interface tree of source link-layer reachability records");
155 
156 /*
157  * Link-layer reachability is based off node constants in RFC4861.
158  */
159 #define LL_COMPUTE_RTIME(x)     ND_COMPUTE_RTIME(x)
160 
161 void
ifnet_llreach_ifattach(struct ifnet * ifp,boolean_t reuse)162 ifnet_llreach_ifattach(struct ifnet *ifp, boolean_t reuse)
163 {
164 	lck_rw_lock_exclusive(&ifp->if_llreach_lock);
165 	/* Initialize link-layer source tree (if not already) */
166 	if (!reuse) {
167 		RB_INIT(&ifp->if_ll_srcs);
168 	}
169 	lck_rw_done(&ifp->if_llreach_lock);
170 }
171 
172 void
ifnet_llreach_ifdetach(struct ifnet * ifp)173 ifnet_llreach_ifdetach(struct ifnet *ifp)
174 {
175 #pragma unused(ifp)
176 	/*
177 	 * Nothing to do for now; the link-layer source tree might
178 	 * contain entries at this point, that are still referred
179 	 * to by route entries pointing to this ifp.
180 	 */
181 }
182 
183 /*
184  * Link-layer source tree comparison function.
185  *
186  * An ordered predicate is necessary; bcmp() is not documented to return
187  * an indication of order, memcmp() is, and is an ISO C99 requirement.
188  */
189 static __inline int
iflr_cmp(const struct if_llreach * a,const struct if_llreach * b)190 iflr_cmp(const struct if_llreach *a, const struct if_llreach *b)
191 {
192 	return memcmp(&a->lr_key, &b->lr_key, sizeof(a->lr_key));
193 }
194 
195 static __inline int
iflr_reachable(struct if_llreach * lr,int cmp_delta,u_int64_t tval)196 iflr_reachable(struct if_llreach *lr, int cmp_delta, u_int64_t tval)
197 {
198 	u_int64_t now;
199 	u_int64_t expire;
200 
201 	now = net_uptime();             /* current approx. uptime */
202 	/*
203 	 * No need for lr_lock; atomically read the last rcvd uptime.
204 	 */
205 	expire = lr->lr_lastrcvd + lr->lr_reachable;
206 	/*
207 	 * If we haven't heard back from the local host for over
208 	 * lr_reachable seconds, consider that the host is no
209 	 * longer reachable.
210 	 */
211 	if (!cmp_delta) {
212 		return expire >= now;
213 	}
214 	/*
215 	 * If the caller supplied a reference time, consider the
216 	 * host is reachable if the record hasn't expired (see above)
217 	 * and if the reference time is within the past lr_reachable
218 	 * seconds.
219 	 */
220 	return (expire >= now) && (now - tval) < lr->lr_reachable;
221 }
222 
223 int
ifnet_llreach_reachable(struct if_llreach * lr)224 ifnet_llreach_reachable(struct if_llreach *lr)
225 {
226 	/*
227 	 * Check whether the cache is too old to be trusted.
228 	 */
229 	return iflr_reachable(lr, 0, 0);
230 }
231 
232 int
ifnet_llreach_reachable_delta(struct if_llreach * lr,u_int64_t tval)233 ifnet_llreach_reachable_delta(struct if_llreach *lr, u_int64_t tval)
234 {
235 	/*
236 	 * Check whether the cache is too old to be trusted.
237 	 */
238 	return iflr_reachable(lr, 1, tval);
239 }
240 
241 void
ifnet_llreach_set_reachable(struct ifnet * ifp,u_int16_t llproto,void * addr,unsigned int alen)242 ifnet_llreach_set_reachable(struct ifnet *ifp, u_int16_t llproto, void *addr,
243     unsigned int alen)
244 {
245 	struct if_llreach find, *lr;
246 
247 	VERIFY(alen == IF_LLREACH_MAXLEN);      /* for now */
248 
249 	find.lr_key.proto = llproto;
250 	bcopy(addr, &find.lr_key.addr, IF_LLREACH_MAXLEN);
251 
252 	lck_rw_lock_shared(&ifp->if_llreach_lock);
253 	lr = RB_FIND(ll_reach_tree, &ifp->if_ll_srcs, &find);
254 	if (lr == NULL) {
255 		lck_rw_done(&ifp->if_llreach_lock);
256 		return;
257 	}
258 	/*
259 	 * No need for lr_lock; atomically update the last rcvd uptime.
260 	 */
261 	lr->lr_lastrcvd = net_uptime();
262 	lck_rw_done(&ifp->if_llreach_lock);
263 }
264 
265 struct if_llreach *
ifnet_llreach_alloc(struct ifnet * ifp,u_int16_t llproto,void * addr,unsigned int alen,u_int32_t llreach_base)266 ifnet_llreach_alloc(struct ifnet *ifp, u_int16_t llproto, void *addr,
267     unsigned int alen, u_int32_t llreach_base)
268 {
269 	struct if_llreach find, *lr;
270 	struct timeval cnow;
271 
272 	if (llreach_base == 0) {
273 		return NULL;
274 	}
275 
276 	VERIFY(alen == IF_LLREACH_MAXLEN);      /* for now */
277 
278 	find.lr_key.proto = llproto;
279 	bcopy(addr, &find.lr_key.addr, IF_LLREACH_MAXLEN);
280 
281 	lck_rw_lock_shared(&ifp->if_llreach_lock);
282 	lr = RB_FIND(ll_reach_tree, &ifp->if_ll_srcs, &find);
283 	if (lr != NULL) {
284 found:
285 		IFLR_LOCK(lr);
286 		VERIFY(lr->lr_reqcnt >= 1);
287 		lr->lr_reqcnt++;
288 		VERIFY(lr->lr_reqcnt != 0);
289 		IFLR_ADDREF_LOCKED(lr);         /* for caller */
290 		lr->lr_lastrcvd = net_uptime(); /* current approx. uptime */
291 		IFLR_UNLOCK(lr);
292 		lck_rw_done(&ifp->if_llreach_lock);
293 		return lr;
294 	}
295 
296 	if (!lck_rw_lock_shared_to_exclusive(&ifp->if_llreach_lock)) {
297 		lck_rw_lock_exclusive(&ifp->if_llreach_lock);
298 	}
299 
300 	LCK_RW_ASSERT(&ifp->if_llreach_lock, LCK_RW_ASSERT_EXCLUSIVE);
301 
302 	/* in case things have changed while becoming writer */
303 	lr = RB_FIND(ll_reach_tree, &ifp->if_ll_srcs, &find);
304 	if (lr != NULL) {
305 		goto found;
306 	}
307 
308 	lr = iflr_alloc(Z_WAITOK);
309 
310 	IFLR_LOCK(lr);
311 	lr->lr_reqcnt++;
312 	VERIFY(lr->lr_reqcnt == 1);
313 	IFLR_ADDREF_LOCKED(lr);                 /* for RB tree */
314 	IFLR_ADDREF_LOCKED(lr);                 /* for caller */
315 	lr->lr_lastrcvd = net_uptime();         /* current approx. uptime */
316 	lr->lr_baseup = lr->lr_lastrcvd;        /* base uptime */
317 	getmicrotime(&cnow);
318 	lr->lr_basecal = cnow.tv_sec;           /* base calendar time */
319 	lr->lr_basereachable = llreach_base;
320 	lr->lr_reachable = LL_COMPUTE_RTIME(lr->lr_basereachable * 1000);
321 	lr->lr_debug |= IFD_ATTACHED;
322 	lr->lr_ifp = ifp;
323 	lr->lr_key.proto = llproto;
324 	bcopy(addr, &lr->lr_key.addr, IF_LLREACH_MAXLEN);
325 	lr->lr_rssi = IFNET_RSSI_UNKNOWN;
326 	lr->lr_lqm = IFNET_LQM_THRESH_UNKNOWN;
327 	lr->lr_npm = IFNET_NPM_THRESH_UNKNOWN;
328 	RB_INSERT(ll_reach_tree, &ifp->if_ll_srcs, lr);
329 	IFLR_UNLOCK(lr);
330 	lck_rw_done(&ifp->if_llreach_lock);
331 
332 	return lr;
333 }
334 
335 void
ifnet_llreach_free(struct if_llreach * lr)336 ifnet_llreach_free(struct if_llreach *lr)
337 {
338 	struct ifnet *ifp;
339 
340 	/* no need to lock here; lr_ifp never changes */
341 	ifp = lr->lr_ifp;
342 
343 	lck_rw_lock_exclusive(&ifp->if_llreach_lock);
344 	IFLR_LOCK(lr);
345 	if (lr->lr_reqcnt == 0) {
346 		panic("%s: lr=%p negative reqcnt", __func__, lr);
347 		/* NOTREACHED */
348 	}
349 	--lr->lr_reqcnt;
350 	if (lr->lr_reqcnt > 0) {
351 		IFLR_UNLOCK(lr);
352 		lck_rw_done(&ifp->if_llreach_lock);
353 		IFLR_REMREF(lr);                /* for caller */
354 		return;
355 	}
356 	if (!(lr->lr_debug & IFD_ATTACHED)) {
357 		panic("%s: Attempt to detach an unattached llreach lr=%p",
358 		    __func__, lr);
359 		/* NOTREACHED */
360 	}
361 	lr->lr_debug &= ~IFD_ATTACHED;
362 	RB_REMOVE(ll_reach_tree, &ifp->if_ll_srcs, lr);
363 	IFLR_UNLOCK(lr);
364 	lck_rw_done(&ifp->if_llreach_lock);
365 
366 	IFLR_REMREF(lr);                        /* for RB tree */
367 	IFLR_REMREF(lr);                        /* for caller */
368 }
369 
370 u_int64_t
ifnet_llreach_up2calexp(struct if_llreach * lr,u_int64_t uptime)371 ifnet_llreach_up2calexp(struct if_llreach *lr, u_int64_t uptime)
372 {
373 	u_int64_t calendar = 0;
374 
375 	if (uptime != 0) {
376 		struct timeval cnow;
377 		u_int64_t unow;
378 
379 		getmicrotime(&cnow);    /* current calendar time */
380 		unow = net_uptime();    /* current approx. uptime */
381 		/*
382 		 * Take into account possible calendar time changes;
383 		 * adjust base calendar value if necessary, i.e.
384 		 * the calendar skew should equate to the uptime skew.
385 		 */
386 		lr->lr_basecal += (cnow.tv_sec - lr->lr_basecal) -
387 		    (unow - lr->lr_baseup);
388 
389 		calendar = lr->lr_basecal + lr->lr_reachable +
390 		    (uptime - lr->lr_baseup);
391 	}
392 
393 	return calendar;
394 }
395 
396 u_int64_t
ifnet_llreach_up2upexp(struct if_llreach * lr,u_int64_t uptime)397 ifnet_llreach_up2upexp(struct if_llreach *lr, u_int64_t uptime)
398 {
399 	return lr->lr_reachable + uptime;
400 }
401 
402 int
ifnet_llreach_get_defrouter(struct ifnet * ifp,sa_family_t af,struct ifnet_llreach_info * iflri)403 ifnet_llreach_get_defrouter(struct ifnet *ifp, sa_family_t af,
404     struct ifnet_llreach_info *iflri)
405 {
406 	struct radix_node_head *rnh;
407 	struct sockaddr_storage dst_ss, mask_ss;
408 	struct rtentry *rt;
409 	int error = ESRCH;
410 
411 	VERIFY(ifp != NULL && iflri != NULL &&
412 	    (af == AF_INET || af == AF_INET6));
413 
414 	bzero(iflri, sizeof(*iflri));
415 
416 	if ((rnh = rt_tables[af]) == NULL) {
417 		return error;
418 	}
419 
420 	bzero(&dst_ss, sizeof(dst_ss));
421 	bzero(&mask_ss, sizeof(mask_ss));
422 	dst_ss.ss_family = af;
423 	dst_ss.ss_len = (af == AF_INET) ? sizeof(struct sockaddr_in) :
424 	    sizeof(struct sockaddr_in6);
425 
426 	lck_mtx_lock(rnh_lock);
427 	rt = rt_lookup(TRUE, SA(&dst_ss), SA(&mask_ss), rnh, ifp->if_index);
428 	if (rt != NULL) {
429 		struct rtentry *gwrt;
430 
431 		RT_LOCK(rt);
432 		if ((rt->rt_flags & RTF_GATEWAY) &&
433 		    (gwrt = rt->rt_gwroute) != NULL &&
434 		    rt_key(rt)->sa_family == rt_key(gwrt)->sa_family &&
435 		    (gwrt->rt_flags & RTF_UP)) {
436 			RT_UNLOCK(rt);
437 			RT_LOCK(gwrt);
438 			if (gwrt->rt_llinfo_get_iflri != NULL) {
439 				(*gwrt->rt_llinfo_get_iflri)(gwrt, iflri);
440 				error = 0;
441 			}
442 			RT_UNLOCK(gwrt);
443 		} else {
444 			RT_UNLOCK(rt);
445 		}
446 		rtfree_locked(rt);
447 	}
448 	lck_mtx_unlock(rnh_lock);
449 
450 	return error;
451 }
452 
453 static struct if_llreach *
iflr_alloc(zalloc_flags_t how)454 iflr_alloc(zalloc_flags_t how)
455 {
456 	struct if_llreach *lr = zalloc_flags(iflr_zone, how | Z_ZERO);
457 
458 	if (lr) {
459 		lck_mtx_init(&lr->lr_lock, &ifnet_lock_group, &ifnet_lock_attr);
460 		lr->lr_debug |= IFD_ALLOC;
461 	}
462 	return lr;
463 }
464 
465 static void
iflr_free(struct if_llreach * lr)466 iflr_free(struct if_llreach *lr)
467 {
468 	IFLR_LOCK(lr);
469 	if (lr->lr_debug & IFD_ATTACHED) {
470 		panic("%s: attached lr=%p is being freed", __func__, lr);
471 		/* NOTREACHED */
472 	} else if (!(lr->lr_debug & IFD_ALLOC)) {
473 		panic("%s: lr %p cannot be freed", __func__, lr);
474 		/* NOTREACHED */
475 	} else if (lr->lr_refcnt != 0) {
476 		panic("%s: non-zero refcount lr=%p", __func__, lr);
477 		/* NOTREACHED */
478 	} else if (lr->lr_reqcnt != 0) {
479 		panic("%s: non-zero reqcnt lr=%p", __func__, lr);
480 		/* NOTREACHED */
481 	}
482 	lr->lr_debug &= ~IFD_ALLOC;
483 	IFLR_UNLOCK(lr);
484 
485 	lck_mtx_destroy(&lr->lr_lock, &ifnet_lock_group);
486 	zfree(iflr_zone, lr);
487 }
488 
489 void
iflr_addref(struct if_llreach * lr,int locked)490 iflr_addref(struct if_llreach *lr, int locked)
491 {
492 	if (!locked) {
493 		IFLR_LOCK(lr);
494 	} else {
495 		IFLR_LOCK_ASSERT_HELD(lr);
496 	}
497 
498 	if (++lr->lr_refcnt == 0) {
499 		panic("%s: lr=%p wraparound refcnt", __func__, lr);
500 		/* NOTREACHED */
501 	}
502 	if (!locked) {
503 		IFLR_UNLOCK(lr);
504 	}
505 }
506 
507 void
iflr_remref(struct if_llreach * lr)508 iflr_remref(struct if_llreach *lr)
509 {
510 	IFLR_LOCK(lr);
511 	if (lr->lr_refcnt == 0) {
512 		panic("%s: lr=%p negative refcnt", __func__, lr);
513 		/* NOTREACHED */
514 	}
515 	--lr->lr_refcnt;
516 	if (lr->lr_refcnt > 0) {
517 		IFLR_UNLOCK(lr);
518 		return;
519 	}
520 	IFLR_UNLOCK(lr);
521 
522 	iflr_free(lr);  /* deallocate it */
523 }
524 
525 void
ifnet_lr2ri(struct if_llreach * lr,struct rt_reach_info * ri)526 ifnet_lr2ri(struct if_llreach *lr, struct rt_reach_info *ri)
527 {
528 	struct if_llreach_info lri;
529 
530 	IFLR_LOCK_ASSERT_HELD(lr);
531 
532 	bzero(ri, sizeof(*ri));
533 	ifnet_lr2lri(lr, &lri);
534 	ri->ri_refcnt = lri.lri_refcnt;
535 	ri->ri_probes = lri.lri_probes;
536 	ri->ri_rcv_expire = lri.lri_expire;
537 	ri->ri_rssi = lri.lri_rssi;
538 	ri->ri_lqm = lri.lri_lqm;
539 	ri->ri_npm = lri.lri_npm;
540 }
541 
542 void
ifnet_lr2iflri(struct if_llreach * lr,struct ifnet_llreach_info * iflri)543 ifnet_lr2iflri(struct if_llreach *lr, struct ifnet_llreach_info *iflri)
544 {
545 	IFLR_LOCK_ASSERT_HELD(lr);
546 
547 	bzero(iflri, sizeof(*iflri));
548 	/*
549 	 * Note here we return request count, not actual memory refcnt.
550 	 */
551 	iflri->iflri_refcnt = lr->lr_reqcnt;
552 	iflri->iflri_probes = lr->lr_probes;
553 	iflri->iflri_rcv_expire = ifnet_llreach_up2upexp(lr, lr->lr_lastrcvd);
554 	iflri->iflri_curtime = net_uptime();
555 	switch (lr->lr_key.proto) {
556 	case ETHERTYPE_IP:
557 		iflri->iflri_netproto = PF_INET;
558 		break;
559 	case ETHERTYPE_IPV6:
560 		iflri->iflri_netproto = PF_INET6;
561 		break;
562 	default:
563 		/*
564 		 * This shouldn't be possible for the time being,
565 		 * since link-layer reachability records are only
566 		 * kept for ARP and ND6.
567 		 */
568 		iflri->iflri_netproto = PF_UNSPEC;
569 		break;
570 	}
571 	bcopy(&lr->lr_key.addr, &iflri->iflri_addr, IF_LLREACH_MAXLEN);
572 	iflri->iflri_rssi = lr->lr_rssi;
573 	iflri->iflri_lqm = lr->lr_lqm;
574 	iflri->iflri_npm = lr->lr_npm;
575 }
576 
577 void
ifnet_lr2lri(struct if_llreach * lr,struct if_llreach_info * lri)578 ifnet_lr2lri(struct if_llreach *lr, struct if_llreach_info *lri)
579 {
580 	IFLR_LOCK_ASSERT_HELD(lr);
581 
582 	bzero(lri, sizeof(*lri));
583 	/*
584 	 * Note here we return request count, not actual memory refcnt.
585 	 */
586 	lri->lri_refcnt = lr->lr_reqcnt;
587 	lri->lri_ifindex = lr->lr_ifp->if_index;
588 	lri->lri_probes = lr->lr_probes;
589 	lri->lri_expire = ifnet_llreach_up2calexp(lr, lr->lr_lastrcvd);
590 	lri->lri_proto = lr->lr_key.proto;
591 	bcopy(&lr->lr_key.addr, &lri->lri_addr, IF_LLREACH_MAXLEN);
592 	lri->lri_rssi = lr->lr_rssi;
593 	lri->lri_lqm = lr->lr_lqm;
594 	lri->lri_npm = lr->lr_npm;
595 }
596 
597 static int
598 sysctl_llreach_ifinfo SYSCTL_HANDLER_ARGS
599 {
600 #pragma unused(oidp)
601 	int             *name, retval = 0;
602 	unsigned int    namelen;
603 	uint32_t        ifindex;
604 	struct if_llreach *lr;
605 	struct if_llreach_info lri = {};
606 	struct ifnet    *ifp;
607 
608 	name = (int *)arg1;
609 	namelen = (unsigned int)arg2;
610 
611 	if (req->newptr != USER_ADDR_NULL) {
612 		return EPERM;
613 	}
614 
615 	if (namelen != 1) {
616 		return EINVAL;
617 	}
618 
619 	ifindex = name[0];
620 	ifnet_head_lock_shared();
621 	if (ifindex <= 0 || ifindex > (u_int)if_index) {
622 		printf("%s: ifindex %u out of range\n", __func__, ifindex);
623 		ifnet_head_done();
624 		return ENOENT;
625 	}
626 
627 	ifp = ifindex2ifnet[ifindex];
628 	ifnet_head_done();
629 	if (ifp == NULL) {
630 		printf("%s: no ifp for ifindex %u\n", __func__, ifindex);
631 		return ENOENT;
632 	}
633 
634 	lck_rw_lock_shared(&ifp->if_llreach_lock);
635 	RB_FOREACH(lr, ll_reach_tree, &ifp->if_ll_srcs) {
636 		/* Export to if_llreach_info structure */
637 		IFLR_LOCK(lr);
638 		ifnet_lr2lri(lr, &lri);
639 		IFLR_UNLOCK(lr);
640 
641 		if ((retval = SYSCTL_OUT(req, &lri, sizeof(lri))) != 0) {
642 			break;
643 		}
644 	}
645 	lck_rw_done(&ifp->if_llreach_lock);
646 
647 	return retval;
648 }
649