xref: /xnu-12377.61.12/bsd/net/if_llreach.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2011-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * Link-layer Reachability Record
31  *
32  * Each interface maintains a red-black tree which contains records related
33  * to the on-link nodes which we are interested in communicating with.  Each
34  * record gets allocated and inserted into the tree in the following manner:
35  * upon processing an ARP announcement or reply from a known node (i.e. there
36  * exists a ARP route entry for the node), and if a link-layer reachability
37  * record for the node doesn't yet exist; and, upon processing a ND6 RS/RA/
38  * NS/NA/redirect from a node, and if a link-layer reachability record for the
39  * node doesn't yet exist.
40  *
41  * Each newly created record is then referred to by the resolver route entry;
42  * if a record already exists, its reference count gets increased for the new
43  * resolver entry which now refers to it.  A record gets removed from the tree
44  * and freed once its reference counts drops to zero, i.e. when there is no
45  * more resolver entry referring to it.
46  *
47  * A record contains the link-layer protocol (e.g. Ethertype IP/IPv6), the
48  * HW address of the sender, the "last heard from" timestamp (lr_lastrcvd) and
49  * the number of references made to it (lr_reqcnt).  Because the key for each
50  * record in the red-black tree consists of the link-layer protocol, therefore
51  * the namespace for the records is partitioned based on the type of link-layer
52  * protocol, i.e. an Ethertype IP link-layer record is only referred to by one
53  * or more ARP entries; an Ethernet IPv6 link-layer record is only referred to
54  * by one or more ND6 entries.  Therefore, lr_reqcnt represents the number of
55  * resolver entry references to the record for the same protocol family.
56  *
57  * Upon receiving packets from the network, the protocol's input callback
58  * (e.g. ether_inet{6}_input) informs the corresponding resolver (ARP/ND6)
59  * about the (link-layer) origin of the packet.  This results in searching
60  * for a matching record in the red-black tree for the interface where the
61  * packet arrived on.  If there's no match, no further processing takes place.
62  * Otherwise, the lr_lastrcvd timestamp of the record is updated.
63  *
64  * When an IP/IPv6 packet is transmitted to the resolver (i.e. the destination
65  * is on-link), ARP/ND6 records the "last spoken to" timestamp in the route
66  * entry ({la,ln}_lastused).
67  *
68  * The reachability of the on-link node is determined by the following logic,
69  * upon sending a packet thru the resolver:
70  *
71  *   a) If the record is only used by exactly one resolver entry (lr_reqcnt
72  *	is 1), i.e. the target host does not have IP/IPv6 aliases that we know
73  *	of, check if lr_lastrcvd is "recent."  If so, simply send the packet;
74  *	otherwise, re-resolve the target node.
75  *
76  *   b) If the record is shared by multiple resolver entries (lr_reqcnt is
77  *	greater than 1), i.e. the target host has more than one IP/IPv6 aliases
78  *	on the same network interface, we can't rely on lr_lastrcvd alone, as
79  *	one of the IP/IPv6 aliases could have been silently moved to another
80  *	node for which we don't have a link-layer record.  If lr_lastrcvd is
81  *	not "recent", we re-resolve the target node.  Otherwise, we perform
82  *	an additional check against {la,ln}_lastused to see whether it is also
83  *	"recent", relative to lr_lastrcvd.  If so, simply send the packet;
84  *	otherwise, re-resolve the target node.
85  *
86  * The value for "recent" is configurable by adjusting the basetime value for
87  * net.link.ether.inet.arp_llreach_base or net.inet6.icmp6.nd6_llreach_base.
88  * The default basetime value is 30 seconds, and the actual expiration time
89  * is calculated by multiplying the basetime value with some random factor,
90  * which results in a number between 15 to 45 seconds.  Setting the basetime
91  * value to 0 effectively disables this feature for the corresponding resolver.
92  *
93  * Assumptions:
94  *
95  * The above logic is based upon the following assumptions:
96  *
97  *   i) Network traffics are mostly bi-directional, i.e. the act of sending
98  *	packets to an on-link node would most likely cause us to receive
99  *	packets from that node.
100  *
101  *  ii) If the on-link node's IP/IPv6 address silently moves to another
102  *	on-link node for which we are not aware of, non-unicast packets
103  *	from the old node would trigger the record's lr_lastrcvd to be
104  *	kept recent.
105  *
106  * We can mitigate the above by having the resolver check its {la,ln}_lastused
107  * timestamp at all times, i.e. not only when lr_reqcnt is greater than 1; but
108  * we currently optimize for the common cases.
109  */
110 
111 #include <sys/param.h>
112 #include <sys/systm.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/tree.h>
116 #include <sys/mcache.h>
117 #include <sys/protosw.h>
118 
119 #include <dev/random/randomdev.h>
120 
121 #include <net/if_dl.h>
122 #include <net/if.h>
123 #include <net/if_var.h>
124 #include <net/if_llreach.h>
125 #include <net/dlil.h>
126 #include <net/kpi_interface.h>
127 #include <net/route.h>
128 #include <net/net_sysctl.h>
129 
130 #include <kern/assert.h>
131 #include <kern/locks.h>
132 #include <kern/uipc_domain.h>
133 #include <kern/zalloc.h>
134 
135 #include <netinet6/in6_var.h>
136 #include <netinet6/nd6.h>
137 
138 static KALLOC_TYPE_DEFINE(iflr_zone, struct if_llreach, NET_KT_DEFAULT);
139 
140 static struct if_llreach *iflr_alloc(zalloc_flags_t);
141 static void iflr_free(struct if_llreach *);
142 static __inline int iflr_cmp(const struct if_llreach *,
143     const struct if_llreach *);
144 static __inline int iflr_reachable(struct if_llreach *, int, u_int64_t);
145 static int sysctl_llreach_ifinfo SYSCTL_HANDLER_ARGS;
146 
147 /* The following is protected by if_llreach_lock */
148 RB_GENERATE_PREV(ll_reach_tree, if_llreach, lr_link, iflr_cmp);
149 
150 SYSCTL_DECL(_net_link_generic_system);
151 
152 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, llreach_info,
153     CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_llreach_ifinfo,
154     "Per-interface tree of source link-layer reachability records");
155 
156 /*
157  * Link-layer reachability is based off node constants in RFC4861.
158  */
159 #define LL_COMPUTE_RTIME(x)     ND_COMPUTE_RTIME(x)
160 
161 void
ifnet_llreach_ifattach(struct ifnet * ifp,boolean_t reuse)162 ifnet_llreach_ifattach(struct ifnet *ifp, boolean_t reuse)
163 {
164 	lck_rw_lock_exclusive(&ifp->if_llreach_lock);
165 	/* Initialize link-layer source tree (if not already) */
166 	if (!reuse) {
167 		RB_INIT(&ifp->if_ll_srcs);
168 	}
169 	lck_rw_done(&ifp->if_llreach_lock);
170 }
171 
172 void
ifnet_llreach_ifdetach(struct ifnet * ifp)173 ifnet_llreach_ifdetach(struct ifnet *ifp)
174 {
175 #pragma unused(ifp)
176 	/*
177 	 * Nothing to do for now; the link-layer source tree might
178 	 * contain entries at this point, that are still referred
179 	 * to by route entries pointing to this ifp.
180 	 */
181 }
182 
183 /*
184  * Link-layer source tree comparison function.
185  *
186  * An ordered predicate is necessary; bcmp() is not documented to return
187  * an indication of order, memcmp() is, and is an ISO C99 requirement.
188  */
189 static __inline int
iflr_cmp(const struct if_llreach * a,const struct if_llreach * b)190 iflr_cmp(const struct if_llreach *a, const struct if_llreach *b)
191 {
192 	return memcmp(&a->lr_key, &b->lr_key, sizeof(a->lr_key));
193 }
194 
195 static __inline int
iflr_reachable(struct if_llreach * lr,int cmp_delta,u_int64_t tval)196 iflr_reachable(struct if_llreach *lr, int cmp_delta, u_int64_t tval)
197 {
198 	u_int64_t now;
199 	u_int64_t expire;
200 
201 	now = net_uptime();             /* current approx. uptime */
202 	/*
203 	 * No need for lr_lock; atomically read the last rcvd uptime.
204 	 */
205 	expire = lr->lr_lastrcvd + lr->lr_reachable;
206 	/*
207 	 * If we haven't heard back from the local host for over
208 	 * lr_reachable seconds, consider that the host is no
209 	 * longer reachable.
210 	 */
211 	if (!cmp_delta) {
212 		return expire >= now;
213 	}
214 	/*
215 	 * If the caller supplied a reference time, consider the
216 	 * host is reachable if the record hasn't expired (see above)
217 	 * and if the reference time is within the past lr_reachable
218 	 * seconds.
219 	 */
220 	return (expire >= now) && (now - tval) < lr->lr_reachable;
221 }
222 
223 int
ifnet_llreach_reachable(struct if_llreach * lr)224 ifnet_llreach_reachable(struct if_llreach *lr)
225 {
226 	/*
227 	 * Check whether the cache is too old to be trusted.
228 	 */
229 	return iflr_reachable(lr, 0, 0);
230 }
231 
232 int
ifnet_llreach_reachable_delta(struct if_llreach * lr,u_int64_t tval)233 ifnet_llreach_reachable_delta(struct if_llreach *lr, u_int64_t tval)
234 {
235 	/*
236 	 * Check whether the cache is too old to be trusted.
237 	 */
238 	return iflr_reachable(lr, 1, tval);
239 }
240 
241 void
ifnet_llreach_set_reachable(struct ifnet * ifp,u_int16_t llproto,void * __sized_by (alen)addr,unsigned int alen)242 ifnet_llreach_set_reachable(struct ifnet *ifp, u_int16_t llproto,
243     void *__sized_by(alen) addr,
244     unsigned int alen)
245 {
246 	struct if_llreach find, *lr;
247 
248 	VERIFY(alen == IF_LLREACH_MAXLEN);      /* for now */
249 
250 	find.lr_key.proto = llproto;
251 	bcopy(addr, &find.lr_key.addr, IF_LLREACH_MAXLEN);
252 
253 	lck_rw_lock_shared(&ifp->if_llreach_lock);
254 	lr = RB_FIND(ll_reach_tree, &ifp->if_ll_srcs, &find);
255 	if (lr == NULL) {
256 		lck_rw_done(&ifp->if_llreach_lock);
257 		return;
258 	}
259 	/*
260 	 * No need for lr_lock; atomically update the last rcvd uptime.
261 	 */
262 	lr->lr_lastrcvd = net_uptime();
263 	lck_rw_done(&ifp->if_llreach_lock);
264 }
265 
266 struct if_llreach *
ifnet_llreach_alloc(struct ifnet * ifp,u_int16_t llproto,void * __sized_by (alen)addr,unsigned int alen,u_int32_t llreach_base)267 ifnet_llreach_alloc(struct ifnet *ifp, u_int16_t llproto,
268     void *__sized_by(alen) addr,
269     unsigned int alen, u_int32_t llreach_base)
270 {
271 	struct if_llreach find, *lr;
272 	struct timeval cnow;
273 
274 	if (llreach_base == 0) {
275 		return NULL;
276 	}
277 
278 	VERIFY(alen == IF_LLREACH_MAXLEN);      /* for now */
279 
280 	find.lr_key.proto = llproto;
281 	bcopy(addr, &find.lr_key.addr, IF_LLREACH_MAXLEN);
282 
283 	lck_rw_lock_shared(&ifp->if_llreach_lock);
284 	lr = RB_FIND(ll_reach_tree, &ifp->if_ll_srcs, &find);
285 	if (lr != NULL) {
286 found:
287 		IFLR_LOCK(lr);
288 		VERIFY(lr->lr_reqcnt >= 1);
289 		lr->lr_reqcnt++;
290 		VERIFY(lr->lr_reqcnt != 0);
291 		IFLR_ADDREF_LOCKED(lr);         /* for caller */
292 		lr->lr_lastrcvd = net_uptime(); /* current approx. uptime */
293 		IFLR_UNLOCK(lr);
294 		lck_rw_done(&ifp->if_llreach_lock);
295 		return lr;
296 	}
297 
298 	if (!lck_rw_lock_shared_to_exclusive(&ifp->if_llreach_lock)) {
299 		lck_rw_lock_exclusive(&ifp->if_llreach_lock);
300 	}
301 
302 	LCK_RW_ASSERT(&ifp->if_llreach_lock, LCK_RW_ASSERT_EXCLUSIVE);
303 
304 	/* in case things have changed while becoming writer */
305 	lr = RB_FIND(ll_reach_tree, &ifp->if_ll_srcs, &find);
306 	if (lr != NULL) {
307 		goto found;
308 	}
309 
310 	lr = iflr_alloc(Z_WAITOK);
311 
312 	IFLR_LOCK(lr);
313 	lr->lr_reqcnt++;
314 	VERIFY(lr->lr_reqcnt == 1);
315 	IFLR_ADDREF_LOCKED(lr);                 /* for RB tree */
316 	IFLR_ADDREF_LOCKED(lr);                 /* for caller */
317 	lr->lr_lastrcvd = net_uptime();         /* current approx. uptime */
318 	lr->lr_baseup = lr->lr_lastrcvd;        /* base uptime */
319 	getmicrotime(&cnow);
320 	lr->lr_basecal = cnow.tv_sec;           /* base calendar time */
321 	lr->lr_basereachable = llreach_base;
322 	lr->lr_reachable = LL_COMPUTE_RTIME(lr->lr_basereachable * 1000);
323 	lr->lr_debug |= IFD_ATTACHED;
324 	lr->lr_ifp = ifp;
325 	lr->lr_key.proto = llproto;
326 	bcopy(addr, &lr->lr_key.addr, IF_LLREACH_MAXLEN);
327 	lr->lr_rssi = IFNET_RSSI_UNKNOWN;
328 	lr->lr_lqm = IFNET_LQM_THRESH_UNKNOWN;
329 	lr->lr_npm = IFNET_NPM_THRESH_UNKNOWN;
330 	RB_INSERT(ll_reach_tree, &ifp->if_ll_srcs, lr);
331 	IFLR_UNLOCK(lr);
332 	lck_rw_done(&ifp->if_llreach_lock);
333 
334 	return lr;
335 }
336 
337 void
ifnet_llreach_free(struct if_llreach * lr)338 ifnet_llreach_free(struct if_llreach *lr)
339 {
340 	struct ifnet *ifp;
341 
342 	/* no need to lock here; lr_ifp never changes */
343 	ifp = lr->lr_ifp;
344 
345 	lck_rw_lock_exclusive(&ifp->if_llreach_lock);
346 	IFLR_LOCK(lr);
347 	if (lr->lr_reqcnt == 0) {
348 		panic("%s: lr=%p negative reqcnt", __func__, lr);
349 		/* NOTREACHED */
350 	}
351 	--lr->lr_reqcnt;
352 	if (lr->lr_reqcnt > 0) {
353 		IFLR_UNLOCK(lr);
354 		lck_rw_done(&ifp->if_llreach_lock);
355 		IFLR_REMREF(lr);                /* for caller */
356 		return;
357 	}
358 	if (!(lr->lr_debug & IFD_ATTACHED)) {
359 		panic("%s: Attempt to detach an unattached llreach lr=%p",
360 		    __func__, lr);
361 		/* NOTREACHED */
362 	}
363 	lr->lr_debug &= ~IFD_ATTACHED;
364 	RB_REMOVE(ll_reach_tree, &ifp->if_ll_srcs, lr);
365 	IFLR_UNLOCK(lr);
366 	lck_rw_done(&ifp->if_llreach_lock);
367 
368 	IFLR_REMREF(lr);                        /* for RB tree */
369 	IFLR_REMREF(lr);                        /* for caller */
370 }
371 
372 u_int64_t
ifnet_llreach_up2calexp(struct if_llreach * lr,u_int64_t uptime)373 ifnet_llreach_up2calexp(struct if_llreach *lr, u_int64_t uptime)
374 {
375 	u_int64_t calendar = 0;
376 
377 	if (uptime != 0) {
378 		struct timeval cnow;
379 		u_int64_t unow;
380 
381 		getmicrotime(&cnow);    /* current calendar time */
382 		unow = net_uptime();    /* current approx. uptime */
383 		/*
384 		 * Take into account possible calendar time changes;
385 		 * adjust base calendar value if necessary, i.e.
386 		 * the calendar skew should equate to the uptime skew.
387 		 */
388 		lr->lr_basecal += (cnow.tv_sec - lr->lr_basecal) -
389 		    (unow - lr->lr_baseup);
390 
391 		calendar = lr->lr_basecal + lr->lr_reachable +
392 		    (uptime - lr->lr_baseup);
393 	}
394 
395 	return calendar;
396 }
397 
398 u_int64_t
ifnet_llreach_up2upexp(struct if_llreach * lr,u_int64_t uptime)399 ifnet_llreach_up2upexp(struct if_llreach *lr, u_int64_t uptime)
400 {
401 	return lr->lr_reachable + uptime;
402 }
403 
404 int
ifnet_llreach_get_defrouter(struct ifnet * ifp,sa_family_t af,struct ifnet_llreach_info * iflri)405 ifnet_llreach_get_defrouter(struct ifnet *ifp, sa_family_t af,
406     struct ifnet_llreach_info *iflri)
407 {
408 	struct radix_node_head *rnh;
409 	struct sockaddr_storage dst_ss, mask_ss;
410 	struct rtentry *rt;
411 	int error = ESRCH;
412 
413 	VERIFY(ifp != NULL && iflri != NULL &&
414 	    (af == AF_INET || af == AF_INET6));
415 
416 	bzero(iflri, sizeof(*iflri));
417 
418 	if ((rnh = rt_tables[af]) == NULL) {
419 		return error;
420 	}
421 
422 	bzero(&dst_ss, sizeof(dst_ss));
423 	bzero(&mask_ss, sizeof(mask_ss));
424 	dst_ss.ss_family = af;
425 	dst_ss.ss_len = (af == AF_INET) ? sizeof(struct sockaddr_in) :
426 	    sizeof(struct sockaddr_in6);
427 
428 	lck_mtx_lock(rnh_lock);
429 	rt = rt_lookup(TRUE, SA(&dst_ss), SA(&mask_ss), rnh, ifp->if_index);
430 	if (rt != NULL) {
431 		struct rtentry *gwrt;
432 
433 		RT_LOCK(rt);
434 		if ((rt->rt_flags & RTF_GATEWAY) &&
435 		    (gwrt = rt->rt_gwroute) != NULL &&
436 		    rt_key(rt)->sa_family == rt_key(gwrt)->sa_family &&
437 		    (gwrt->rt_flags & RTF_UP)) {
438 			RT_UNLOCK(rt);
439 			RT_LOCK(gwrt);
440 			if (gwrt->rt_llinfo_get_iflri != NULL) {
441 				(*gwrt->rt_llinfo_get_iflri)(gwrt, iflri);
442 				error = 0;
443 			}
444 			RT_UNLOCK(gwrt);
445 		} else {
446 			RT_UNLOCK(rt);
447 		}
448 		rtfree_locked(rt);
449 	}
450 	lck_mtx_unlock(rnh_lock);
451 
452 	return error;
453 }
454 
455 static struct if_llreach *
iflr_alloc(zalloc_flags_t how)456 iflr_alloc(zalloc_flags_t how)
457 {
458 	struct if_llreach *lr = zalloc_flags(iflr_zone, how | Z_ZERO);
459 
460 	if (lr) {
461 		lck_mtx_init(&lr->lr_lock, &ifnet_lock_group, &ifnet_lock_attr);
462 		lr->lr_debug |= IFD_ALLOC;
463 	}
464 	return lr;
465 }
466 
467 static void
iflr_free(struct if_llreach * lr)468 iflr_free(struct if_llreach *lr)
469 {
470 	IFLR_LOCK(lr);
471 	if (lr->lr_debug & IFD_ATTACHED) {
472 		panic("%s: attached lr=%p is being freed", __func__, lr);
473 		/* NOTREACHED */
474 	} else if (!(lr->lr_debug & IFD_ALLOC)) {
475 		panic("%s: lr %p cannot be freed", __func__, lr);
476 		/* NOTREACHED */
477 	} else if (lr->lr_refcnt != 0) {
478 		panic("%s: non-zero refcount lr=%p", __func__, lr);
479 		/* NOTREACHED */
480 	} else if (lr->lr_reqcnt != 0) {
481 		panic("%s: non-zero reqcnt lr=%p", __func__, lr);
482 		/* NOTREACHED */
483 	}
484 	lr->lr_debug &= ~IFD_ALLOC;
485 	IFLR_UNLOCK(lr);
486 
487 	lck_mtx_destroy(&lr->lr_lock, &ifnet_lock_group);
488 	zfree(iflr_zone, lr);
489 }
490 
491 void
iflr_addref(struct if_llreach * lr,int locked)492 iflr_addref(struct if_llreach *lr, int locked)
493 {
494 	if (!locked) {
495 		IFLR_LOCK(lr);
496 	} else {
497 		IFLR_LOCK_ASSERT_HELD(lr);
498 	}
499 
500 	if (++lr->lr_refcnt == 0) {
501 		panic("%s: lr=%p wraparound refcnt", __func__, lr);
502 		/* NOTREACHED */
503 	}
504 	if (!locked) {
505 		IFLR_UNLOCK(lr);
506 	}
507 }
508 
509 void
iflr_remref(struct if_llreach * lr)510 iflr_remref(struct if_llreach *lr)
511 {
512 	IFLR_LOCK(lr);
513 	if (lr->lr_refcnt == 0) {
514 		panic("%s: lr=%p negative refcnt", __func__, lr);
515 		/* NOTREACHED */
516 	}
517 	--lr->lr_refcnt;
518 	if (lr->lr_refcnt > 0) {
519 		IFLR_UNLOCK(lr);
520 		return;
521 	}
522 	IFLR_UNLOCK(lr);
523 
524 	iflr_free(lr);  /* deallocate it */
525 }
526 
527 void
ifnet_lr2ri(struct if_llreach * lr,struct rt_reach_info * ri)528 ifnet_lr2ri(struct if_llreach *lr, struct rt_reach_info *ri)
529 {
530 	struct if_llreach_info lri;
531 
532 	IFLR_LOCK_ASSERT_HELD(lr);
533 
534 	bzero(ri, sizeof(*ri));
535 	ifnet_lr2lri(lr, &lri);
536 	ri->ri_refcnt = lri.lri_refcnt;
537 	ri->ri_probes = lri.lri_probes;
538 	ri->ri_rcv_expire = lri.lri_expire;
539 	ri->ri_rssi = lri.lri_rssi;
540 	ri->ri_lqm = lri.lri_lqm;
541 	ri->ri_npm = lri.lri_npm;
542 }
543 
544 void
ifnet_lr2iflri(struct if_llreach * lr,struct ifnet_llreach_info * iflri)545 ifnet_lr2iflri(struct if_llreach *lr, struct ifnet_llreach_info *iflri)
546 {
547 	IFLR_LOCK_ASSERT_HELD(lr);
548 
549 	bzero(iflri, sizeof(*iflri));
550 	/*
551 	 * Note here we return request count, not actual memory refcnt.
552 	 */
553 	iflri->iflri_refcnt = lr->lr_reqcnt;
554 	iflri->iflri_probes = lr->lr_probes;
555 	iflri->iflri_rcv_expire = ifnet_llreach_up2upexp(lr, lr->lr_lastrcvd);
556 	iflri->iflri_curtime = net_uptime();
557 	switch (lr->lr_key.proto) {
558 	case ETHERTYPE_IP:
559 		iflri->iflri_netproto = PF_INET;
560 		break;
561 	case ETHERTYPE_IPV6:
562 		iflri->iflri_netproto = PF_INET6;
563 		break;
564 	default:
565 		/*
566 		 * This shouldn't be possible for the time being,
567 		 * since link-layer reachability records are only
568 		 * kept for ARP and ND6.
569 		 */
570 		iflri->iflri_netproto = PF_UNSPEC;
571 		break;
572 	}
573 	bcopy(&lr->lr_key.addr, &iflri->iflri_addr, IF_LLREACH_MAXLEN);
574 	iflri->iflri_rssi = lr->lr_rssi;
575 	iflri->iflri_lqm = lr->lr_lqm;
576 	iflri->iflri_npm = lr->lr_npm;
577 }
578 
579 void
ifnet_lr2lri(struct if_llreach * lr,struct if_llreach_info * lri)580 ifnet_lr2lri(struct if_llreach *lr, struct if_llreach_info *lri)
581 {
582 	IFLR_LOCK_ASSERT_HELD(lr);
583 
584 	bzero(lri, sizeof(*lri));
585 	/*
586 	 * Note here we return request count, not actual memory refcnt.
587 	 */
588 	lri->lri_refcnt = lr->lr_reqcnt;
589 	lri->lri_ifindex = lr->lr_ifp->if_index;
590 	lri->lri_probes = lr->lr_probes;
591 	lri->lri_expire = ifnet_llreach_up2calexp(lr, lr->lr_lastrcvd);
592 	lri->lri_proto = lr->lr_key.proto;
593 	bcopy(&lr->lr_key.addr, &lri->lri_addr, IF_LLREACH_MAXLEN);
594 	lri->lri_rssi = lr->lr_rssi;
595 	lri->lri_lqm = lr->lr_lqm;
596 	lri->lri_npm = lr->lr_npm;
597 }
598 
599 static int
600 sysctl_llreach_ifinfo SYSCTL_HANDLER_ARGS
601 {
602 #pragma unused(oidp)
603 	DECLARE_SYSCTL_HANDLER_ARG_ARRAY(int, 1, name, namelen);
604 	int             retval = 0;
605 	uint32_t        ifindex;
606 	struct if_llreach *lr;
607 	struct if_llreach_info lri = {};
608 	struct ifnet    *ifp;
609 
610 	if (req->newptr != USER_ADDR_NULL) {
611 		return EPERM;
612 	}
613 
614 	ifindex = name[0];
615 	ifnet_head_lock_shared();
616 	if (ifindex <= 0 || ifindex > (u_int)if_index) {
617 		printf("%s: ifindex %u out of range\n", __func__, ifindex);
618 		ifnet_head_done();
619 		return ENOENT;
620 	}
621 
622 	ifp = ifindex2ifnet[ifindex];
623 	ifnet_head_done();
624 	if (ifp == NULL) {
625 		printf("%s: no ifp for ifindex %u\n", __func__, ifindex);
626 		return ENOENT;
627 	}
628 
629 	lck_rw_lock_shared(&ifp->if_llreach_lock);
630 	RB_FOREACH(lr, ll_reach_tree, &ifp->if_ll_srcs) {
631 		/* Export to if_llreach_info structure */
632 		IFLR_LOCK(lr);
633 		ifnet_lr2lri(lr, &lri);
634 		IFLR_UNLOCK(lr);
635 
636 		if ((retval = SYSCTL_OUT(req, &lri, sizeof(lri))) != 0) {
637 			break;
638 		}
639 	}
640 	lck_rw_done(&ifp->if_llreach_lock);
641 
642 	return retval;
643 }
644