xref: /xnu-8019.80.24/bsd/net/rtsock.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1988, 1991, 1993
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)rtsock.c	8.5 (Berkeley) 11/2/94
61  */
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kauth.h>
66 #include <sys/kernel.h>
67 #include <sys/sysctl.h>
68 #include <sys/proc.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/domain.h>
74 #include <sys/protosw.h>
75 #include <sys/syslog.h>
76 #include <sys/mcache.h>
77 #include <kern/locks.h>
78 #include <sys/codesign.h>
79 
80 #include <net/if.h>
81 #include <net/route.h>
82 #include <net/dlil.h>
83 #include <net/raw_cb.h>
84 #include <netinet/in.h>
85 #include <netinet/in_var.h>
86 #include <netinet/in_arp.h>
87 #include <netinet/ip.h>
88 #include <netinet/ip6.h>
89 #include <netinet6/nd6.h>
90 
91 extern struct rtstat rtstat;
92 extern struct domain routedomain_s;
93 static struct domain *routedomain = NULL;
94 
95 MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
96 
97 static struct sockaddr route_dst = { .sa_len = 2, .sa_family = PF_ROUTE, .sa_data = { 0, } };
98 static struct sockaddr route_src = { .sa_len = 2, .sa_family = PF_ROUTE, .sa_data = { 0, } };
99 static struct sockaddr sa_zero   = { .sa_len = sizeof(sa_zero), .sa_family = AF_INET, .sa_data = { 0, } };
100 
101 struct route_cb {
102 	u_int32_t       ip_count;       /* attached w/ AF_INET */
103 	u_int32_t       ip6_count;      /* attached w/ AF_INET6 */
104 	u_int32_t       any_count;      /* total attached */
105 };
106 
107 static struct route_cb route_cb;
108 
109 struct walkarg {
110 	int     w_tmemsize;
111 	int     w_op, w_arg;
112 	caddr_t w_tmem;
113 	struct sysctl_req *w_req;
114 };
115 
116 static void route_dinit(struct domain *);
117 static int rts_abort(struct socket *);
118 static int rts_attach(struct socket *, int, struct proc *);
119 static int rts_bind(struct socket *, struct sockaddr *, struct proc *);
120 static int rts_connect(struct socket *, struct sockaddr *, struct proc *);
121 static int rts_detach(struct socket *);
122 static int rts_disconnect(struct socket *);
123 static int rts_peeraddr(struct socket *, struct sockaddr **);
124 static int rts_send(struct socket *, int, struct mbuf *, struct sockaddr *,
125     struct mbuf *, struct proc *);
126 static int rts_shutdown(struct socket *);
127 static int rts_sockaddr(struct socket *, struct sockaddr **);
128 
129 static int route_output(struct mbuf *, struct socket *);
130 static int rt_setmetrics(u_int32_t, struct rt_metrics *, struct rtentry *);
131 static void rt_getmetrics(struct rtentry *, struct rt_metrics *);
132 static void rt_setif(struct rtentry *, struct sockaddr *, struct sockaddr *,
133     struct sockaddr *, unsigned int);
134 static int rt_xaddrs(caddr_t, caddr_t, struct rt_addrinfo *);
135 static struct mbuf *rt_msg1(u_char, struct rt_addrinfo *);
136 static int rt_msg2(u_char, struct rt_addrinfo *, caddr_t, struct walkarg *,
137     kauth_cred_t *);
138 static int sysctl_dumpentry(struct radix_node *rn, void *vw);
139 static int sysctl_dumpentry_ext(struct radix_node *rn, void *vw);
140 static int sysctl_iflist(int af, struct walkarg *w);
141 static int sysctl_iflist2(int af, struct walkarg *w);
142 static int sysctl_rtstat(struct sysctl_req *);
143 static int sysctl_rttrash(struct sysctl_req *);
144 static int sysctl_rtsock SYSCTL_HANDLER_ARGS;
145 
146 SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD | CTLFLAG_LOCKED,
147     sysctl_rtsock, "");
148 
149 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "routing");
150 
151 /* Align x to 1024 (only power of 2) assuming x is positive */
152 #define ALIGN_BYTES(x) do {                                             \
153 	x = (uint32_t)P2ALIGN(x, 1024);                         \
154 } while(0)
155 
156 #define ROUNDUP32(a)                                                    \
157 	((a) > 0 ? (1 + (((a) - 1) | (sizeof (uint32_t) - 1))) :        \
158 	sizeof (uint32_t))
159 
160 #define ADVANCE32(x, n)                                                 \
161 	(x += ROUNDUP32((n)->sa_len))
162 
163 #define RT_HAS_IFADDR(rt)                                               \
164 	((rt)->rt_ifa != NULL && (rt)->rt_ifa->ifa_addr != NULL)
165 
166 /*
167  * It really doesn't make any sense at all for this code to share much
168  * with raw_usrreq.c, since its functionality is so restricted.  XXX
169  */
170 static int
rts_abort(struct socket * so)171 rts_abort(struct socket *so)
172 {
173 	return raw_usrreqs.pru_abort(so);
174 }
175 
176 /* pru_accept is EOPNOTSUPP */
177 
178 static int
rts_attach(struct socket * so,int proto,struct proc * p)179 rts_attach(struct socket *so, int proto, struct proc *p)
180 {
181 #pragma unused(p)
182 	struct rawcb *rp;
183 	int error;
184 
185 	VERIFY(so->so_pcb == NULL);
186 
187 	MALLOC(rp, struct rawcb *, sizeof(*rp), M_PCB, M_WAITOK | M_ZERO);
188 	if (rp == NULL) {
189 		return ENOBUFS;
190 	}
191 
192 	so->so_pcb = (caddr_t)rp;
193 	/* don't use raw_usrreqs.pru_attach, it checks for SS_PRIV */
194 	error = raw_attach(so, proto);
195 	rp = sotorawcb(so);
196 	if (error) {
197 		FREE(rp, M_PCB);
198 		so->so_pcb = NULL;
199 		so->so_flags |= SOF_PCBCLEARING;
200 		return error;
201 	}
202 
203 	switch (rp->rcb_proto.sp_protocol) {
204 	case AF_INET:
205 		atomic_add_32(&route_cb.ip_count, 1);
206 		break;
207 	case AF_INET6:
208 		atomic_add_32(&route_cb.ip6_count, 1);
209 		break;
210 	}
211 	rp->rcb_faddr = &route_src;
212 	atomic_add_32(&route_cb.any_count, 1);
213 	/* the socket is already locked when we enter rts_attach */
214 	soisconnected(so);
215 	so->so_options |= SO_USELOOPBACK;
216 	return 0;
217 }
218 
219 static int
rts_bind(struct socket * so,struct sockaddr * nam,struct proc * p)220 rts_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
221 {
222 	return raw_usrreqs.pru_bind(so, nam, p); /* xxx just EINVAL */
223 }
224 
225 static int
rts_connect(struct socket * so,struct sockaddr * nam,struct proc * p)226 rts_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
227 {
228 	return raw_usrreqs.pru_connect(so, nam, p); /* XXX just EINVAL */
229 }
230 
231 /* pru_connect2 is EOPNOTSUPP */
232 /* pru_control is EOPNOTSUPP */
233 
234 static int
rts_detach(struct socket * so)235 rts_detach(struct socket *so)
236 {
237 	struct rawcb *rp = sotorawcb(so);
238 
239 	VERIFY(rp != NULL);
240 
241 	switch (rp->rcb_proto.sp_protocol) {
242 	case AF_INET:
243 		atomic_add_32(&route_cb.ip_count, -1);
244 		break;
245 	case AF_INET6:
246 		atomic_add_32(&route_cb.ip6_count, -1);
247 		break;
248 	}
249 	atomic_add_32(&route_cb.any_count, -1);
250 	return raw_usrreqs.pru_detach(so);
251 }
252 
253 static int
rts_disconnect(struct socket * so)254 rts_disconnect(struct socket *so)
255 {
256 	return raw_usrreqs.pru_disconnect(so);
257 }
258 
259 /* pru_listen is EOPNOTSUPP */
260 
261 static int
rts_peeraddr(struct socket * so,struct sockaddr ** nam)262 rts_peeraddr(struct socket *so, struct sockaddr **nam)
263 {
264 	return raw_usrreqs.pru_peeraddr(so, nam);
265 }
266 
267 /* pru_rcvd is EOPNOTSUPP */
268 /* pru_rcvoob is EOPNOTSUPP */
269 
270 static int
rts_send(struct socket * so,int flags,struct mbuf * m,struct sockaddr * nam,struct mbuf * control,struct proc * p)271 rts_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
272     struct mbuf *control, struct proc *p)
273 {
274 	return raw_usrreqs.pru_send(so, flags, m, nam, control, p);
275 }
276 
277 /* pru_sense is null */
278 
279 static int
rts_shutdown(struct socket * so)280 rts_shutdown(struct socket *so)
281 {
282 	return raw_usrreqs.pru_shutdown(so);
283 }
284 
285 static int
rts_sockaddr(struct socket * so,struct sockaddr ** nam)286 rts_sockaddr(struct socket *so, struct sockaddr **nam)
287 {
288 	return raw_usrreqs.pru_sockaddr(so, nam);
289 }
290 
291 static struct pr_usrreqs route_usrreqs = {
292 	.pru_abort =            rts_abort,
293 	.pru_attach =           rts_attach,
294 	.pru_bind =             rts_bind,
295 	.pru_connect =          rts_connect,
296 	.pru_detach =           rts_detach,
297 	.pru_disconnect =       rts_disconnect,
298 	.pru_peeraddr =         rts_peeraddr,
299 	.pru_send =             rts_send,
300 	.pru_shutdown =         rts_shutdown,
301 	.pru_sockaddr =         rts_sockaddr,
302 	.pru_sosend =           sosend,
303 	.pru_soreceive =        soreceive,
304 };
305 
306 /*ARGSUSED*/
307 static int
route_output(struct mbuf * m,struct socket * so)308 route_output(struct mbuf *m, struct socket *so)
309 {
310 	struct rt_msghdr *rtm = NULL;
311 	struct rtentry *rt = NULL;
312 	struct rtentry *saved_nrt = NULL;
313 	struct radix_node_head *rnh;
314 	struct rt_addrinfo info;
315 	int len, error = 0;
316 	sa_family_t dst_sa_family = 0;
317 	struct ifnet *ifp = NULL;
318 	struct sockaddr_in dst_in, gate_in;
319 	int sendonlytoself = 0;
320 	unsigned int ifscope = IFSCOPE_NONE;
321 	struct rawcb *rp = NULL;
322 	boolean_t is_router = FALSE;
323 #define senderr(e) { error = (e); goto flush; }
324 	if (m == NULL || ((m->m_len < sizeof(intptr_t)) &&
325 	    (m = m_pullup(m, sizeof(intptr_t))) == NULL)) {
326 		return ENOBUFS;
327 	}
328 	VERIFY(m->m_flags & M_PKTHDR);
329 
330 	/*
331 	 * Unlock the socket (but keep a reference) it won't be
332 	 * accessed until raw_input appends to it.
333 	 */
334 	socket_unlock(so, 0);
335 	lck_mtx_lock(rnh_lock);
336 
337 	len = m->m_pkthdr.len;
338 	if (len < sizeof(*rtm) ||
339 	    len != mtod(m, struct rt_msghdr *)->rtm_msglen) {
340 		info.rti_info[RTAX_DST] = NULL;
341 		senderr(EINVAL);
342 	}
343 	R_Malloc(rtm, struct rt_msghdr *, len);
344 	if (rtm == NULL) {
345 		info.rti_info[RTAX_DST] = NULL;
346 		senderr(ENOBUFS);
347 	}
348 	m_copydata(m, 0, len, (caddr_t)rtm);
349 	if (rtm->rtm_version != RTM_VERSION) {
350 		info.rti_info[RTAX_DST] = NULL;
351 		senderr(EPROTONOSUPPORT);
352 	}
353 
354 	/*
355 	 * Silent version of RTM_GET for Reachabiltiy APIs. We may change
356 	 * all RTM_GETs to be silent in the future, so this is private for now.
357 	 */
358 	if (rtm->rtm_type == RTM_GET_SILENT) {
359 		if (!(so->so_options & SO_USELOOPBACK)) {
360 			senderr(EINVAL);
361 		}
362 		sendonlytoself = 1;
363 		rtm->rtm_type = RTM_GET;
364 	}
365 
366 	/*
367 	 * Perform permission checking, only privileged sockets
368 	 * may perform operations other than RTM_GET
369 	 */
370 	if (rtm->rtm_type != RTM_GET && !(so->so_state & SS_PRIV)) {
371 		info.rti_info[RTAX_DST] = NULL;
372 		senderr(EPERM);
373 	}
374 
375 	rtm->rtm_pid = proc_selfpid();
376 	info.rti_addrs = rtm->rtm_addrs;
377 	if (rt_xaddrs((caddr_t)(rtm + 1), len + (caddr_t)rtm, &info)) {
378 		info.rti_info[RTAX_DST] = NULL;
379 		senderr(EINVAL);
380 	}
381 	if (info.rti_info[RTAX_DST] == NULL ||
382 	    info.rti_info[RTAX_DST]->sa_family >= AF_MAX ||
383 	    (info.rti_info[RTAX_GATEWAY] != NULL &&
384 	    info.rti_info[RTAX_GATEWAY]->sa_family >= AF_MAX)) {
385 		senderr(EINVAL);
386 	}
387 
388 	if (info.rti_info[RTAX_DST]->sa_family == AF_INET &&
389 	    info.rti_info[RTAX_DST]->sa_len != sizeof(struct sockaddr_in)) {
390 		/* At minimum, we need up to sin_addr */
391 		if (info.rti_info[RTAX_DST]->sa_len <
392 		    offsetof(struct sockaddr_in, sin_zero)) {
393 			senderr(EINVAL);
394 		}
395 		bzero(&dst_in, sizeof(dst_in));
396 		dst_in.sin_len = sizeof(dst_in);
397 		dst_in.sin_family = AF_INET;
398 		dst_in.sin_port = SIN(info.rti_info[RTAX_DST])->sin_port;
399 		dst_in.sin_addr = SIN(info.rti_info[RTAX_DST])->sin_addr;
400 		info.rti_info[RTAX_DST] = (struct sockaddr *)&dst_in;
401 		dst_sa_family = info.rti_info[RTAX_DST]->sa_family;
402 	} else if (info.rti_info[RTAX_DST]->sa_family == AF_INET6 &&
403 	    info.rti_info[RTAX_DST]->sa_len < sizeof(struct sockaddr_in6)) {
404 		senderr(EINVAL);
405 	}
406 
407 	if (info.rti_info[RTAX_GATEWAY] != NULL) {
408 		if (info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET &&
409 		    info.rti_info[RTAX_GATEWAY]->sa_len != sizeof(struct sockaddr_in)) {
410 			/* At minimum, we need up to sin_addr */
411 			if (info.rti_info[RTAX_GATEWAY]->sa_len <
412 			    offsetof(struct sockaddr_in, sin_zero)) {
413 				senderr(EINVAL);
414 			}
415 			bzero(&gate_in, sizeof(gate_in));
416 			gate_in.sin_len = sizeof(gate_in);
417 			gate_in.sin_family = AF_INET;
418 			gate_in.sin_port = SIN(info.rti_info[RTAX_GATEWAY])->sin_port;
419 			gate_in.sin_addr = SIN(info.rti_info[RTAX_GATEWAY])->sin_addr;
420 			info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&gate_in;
421 		} else if (info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET6 &&
422 		    info.rti_info[RTAX_GATEWAY]->sa_len < sizeof(struct sockaddr_in6)) {
423 			senderr(EINVAL);
424 		}
425 	}
426 
427 	if (info.rti_info[RTAX_GENMASK]) {
428 		struct radix_node *t;
429 		t = rn_addmask((caddr_t)info.rti_info[RTAX_GENMASK], 0, 1);
430 		if (t != NULL && Bcmp(info.rti_info[RTAX_GENMASK],
431 		    t->rn_key, *(u_char *)info.rti_info[RTAX_GENMASK]) == 0) {
432 			info.rti_info[RTAX_GENMASK] =
433 			    (struct sockaddr *)(t->rn_key);
434 		} else {
435 			senderr(ENOBUFS);
436 		}
437 	}
438 
439 	/*
440 	 * If RTF_IFSCOPE flag is set, then rtm_index specifies the scope.
441 	 */
442 	if (rtm->rtm_flags & RTF_IFSCOPE) {
443 		if (info.rti_info[RTAX_DST]->sa_family != AF_INET &&
444 		    info.rti_info[RTAX_DST]->sa_family != AF_INET6) {
445 			senderr(EINVAL);
446 		}
447 		ifscope = rtm->rtm_index;
448 	}
449 	/*
450 	 * Block changes on INTCOPROC interfaces.
451 	 */
452 	if (ifscope) {
453 		unsigned int intcoproc_scope = 0;
454 		ifnet_head_lock_shared();
455 		TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
456 			if (IFNET_IS_INTCOPROC(ifp)) {
457 				intcoproc_scope = ifp->if_index;
458 				break;
459 			}
460 		}
461 		ifnet_head_done();
462 		if (intcoproc_scope == ifscope && proc_getpid(current_proc()) != 0) {
463 			senderr(EINVAL);
464 		}
465 	}
466 
467 	/*
468 	 * RTF_PROXY can only be set internally from within the kernel.
469 	 */
470 	if (rtm->rtm_flags & RTF_PROXY) {
471 		senderr(EINVAL);
472 	}
473 
474 	/*
475 	 * For AF_INET, always zero out the embedded scope ID.  If this is
476 	 * a scoped request, it must be done explicitly by setting RTF_IFSCOPE
477 	 * flag and the corresponding rtm_index value.  This is to prevent
478 	 * false interpretation of the scope ID because it's using the sin_zero
479 	 * field, which might not be properly cleared by the requestor.
480 	 */
481 	if (info.rti_info[RTAX_DST]->sa_family == AF_INET) {
482 		sin_set_ifscope(info.rti_info[RTAX_DST], IFSCOPE_NONE);
483 	}
484 	if (info.rti_info[RTAX_GATEWAY] != NULL &&
485 	    info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET) {
486 		sin_set_ifscope(info.rti_info[RTAX_GATEWAY], IFSCOPE_NONE);
487 	}
488 	if (info.rti_info[RTAX_DST]->sa_family == AF_INET6 &&
489 	    IN6_IS_SCOPE_EMBED(&SIN6(info.rti_info[RTAX_DST])->sin6_addr) &&
490 	    !IN6_IS_ADDR_UNICAST_BASED_MULTICAST(&SIN6(info.rti_info[RTAX_DST])->sin6_addr) &&
491 	    SIN6(info.rti_info[RTAX_DST])->sin6_scope_id == 0) {
492 		SIN6(info.rti_info[RTAX_DST])->sin6_scope_id = ntohs(SIN6(info.rti_info[RTAX_DST])->sin6_addr.s6_addr16[1]);
493 		SIN6(info.rti_info[RTAX_DST])->sin6_addr.s6_addr16[1] = 0;
494 	}
495 
496 	switch (rtm->rtm_type) {
497 	case RTM_ADD:
498 		if (info.rti_info[RTAX_GATEWAY] == NULL) {
499 			senderr(EINVAL);
500 		}
501 
502 		error = rtrequest_scoped_locked(RTM_ADD,
503 		    info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY],
504 		    info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt,
505 		    ifscope);
506 		if (error == 0 && saved_nrt != NULL) {
507 			RT_LOCK(saved_nrt);
508 			/*
509 			 * If the route request specified an interface with
510 			 * IFA and/or IFP, we set the requested interface on
511 			 * the route with rt_setif.  It would be much better
512 			 * to do this inside rtrequest, but that would
513 			 * require passing the desired interface, in some
514 			 * form, to rtrequest.  Since rtrequest is called in
515 			 * so many places (roughly 40 in our source), adding
516 			 * a parameter is to much for us to swallow; this is
517 			 * something for the FreeBSD developers to tackle.
518 			 * Instead, we let rtrequest compute whatever
519 			 * interface it wants, then come in behind it and
520 			 * stick in the interface that we really want.  This
521 			 * works reasonably well except when rtrequest can't
522 			 * figure out what interface to use (with
523 			 * ifa_withroute) and returns ENETUNREACH.  Ideally
524 			 * it shouldn't matter if rtrequest can't figure out
525 			 * the interface if we're going to explicitly set it
526 			 * ourselves anyway.  But practically we can't
527 			 * recover here because rtrequest will not do any of
528 			 * the work necessary to add the route if it can't
529 			 * find an interface.  As long as there is a default
530 			 * route that leads to some interface, rtrequest will
531 			 * find an interface, so this problem should be
532 			 * rarely encountered.
533 			 * [email protected]
534 			 */
535 			rt_setif(saved_nrt,
536 			    info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA],
537 			    info.rti_info[RTAX_GATEWAY], ifscope);
538 			(void)rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, saved_nrt);
539 			saved_nrt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
540 			saved_nrt->rt_rmx.rmx_locks |=
541 			    (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
542 			saved_nrt->rt_genmask = info.rti_info[RTAX_GENMASK];
543 			RT_REMREF_LOCKED(saved_nrt);
544 			RT_UNLOCK(saved_nrt);
545 		}
546 		break;
547 
548 	case RTM_DELETE:
549 		error = rtrequest_scoped_locked(RTM_DELETE,
550 		    info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY],
551 		    info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt,
552 		    ifscope);
553 		if (error == 0) {
554 			rt = saved_nrt;
555 			RT_LOCK(rt);
556 			goto report;
557 		}
558 		break;
559 
560 	case RTM_GET:
561 	case RTM_CHANGE:
562 	case RTM_LOCK:
563 		rnh = rt_tables[info.rti_info[RTAX_DST]->sa_family];
564 		if (rnh == NULL) {
565 			senderr(EAFNOSUPPORT);
566 		}
567 		/*
568 		 * Lookup the best match based on the key-mask pair;
569 		 * callee adds a reference and checks for root node.
570 		 */
571 		rt = rt_lookup(TRUE, info.rti_info[RTAX_DST],
572 		    info.rti_info[RTAX_NETMASK], rnh, ifscope);
573 		if (rt == NULL) {
574 			senderr(ESRCH);
575 		}
576 		RT_LOCK(rt);
577 
578 		/*
579 		 * Holding rnh_lock here prevents the possibility of
580 		 * ifa from changing (e.g. in_ifinit), so it is safe
581 		 * to access its ifa_addr (down below) without locking.
582 		 */
583 		switch (rtm->rtm_type) {
584 		case RTM_GET: {
585 			kauth_cred_t cred;
586 			kauth_cred_t* credp;
587 			struct ifaddr *ifa2;
588 report:
589 			cred = kauth_cred_proc_ref(current_proc());
590 			credp = &cred;
591 
592 			ifa2 = NULL;
593 			RT_LOCK_ASSERT_HELD(rt);
594 			info.rti_info[RTAX_DST] = rt_key(rt);
595 			dst_sa_family = info.rti_info[RTAX_DST]->sa_family;
596 			info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
597 			info.rti_info[RTAX_NETMASK] = rt_mask(rt);
598 			info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
599 			if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) {
600 				ifp = rt->rt_ifp;
601 				if (ifp != NULL) {
602 					ifnet_lock_shared(ifp);
603 					ifa2 = ifp->if_lladdr;
604 					info.rti_info[RTAX_IFP] =
605 					    ifa2->ifa_addr;
606 					IFA_ADDREF(ifa2);
607 					ifnet_lock_done(ifp);
608 					info.rti_info[RTAX_IFA] =
609 					    rt->rt_ifa->ifa_addr;
610 					rtm->rtm_index = ifp->if_index;
611 				} else {
612 					info.rti_info[RTAX_IFP] = NULL;
613 					info.rti_info[RTAX_IFA] = NULL;
614 				}
615 			} else if ((ifp = rt->rt_ifp) != NULL) {
616 				rtm->rtm_index = ifp->if_index;
617 			}
618 			if (ifa2 != NULL) {
619 				IFA_LOCK(ifa2);
620 			}
621 			len = rt_msg2(rtm->rtm_type, &info, NULL, NULL, credp);
622 			if (ifa2 != NULL) {
623 				IFA_UNLOCK(ifa2);
624 			}
625 			struct rt_msghdr *out_rtm;
626 			R_Malloc(out_rtm, struct rt_msghdr *, len);
627 			if (out_rtm == NULL) {
628 				RT_UNLOCK(rt);
629 				if (ifa2 != NULL) {
630 					IFA_REMREF(ifa2);
631 				}
632 				senderr(ENOBUFS);
633 			}
634 			Bcopy(rtm, out_rtm, sizeof(struct rt_msghdr));
635 			if (ifa2 != NULL) {
636 				IFA_LOCK(ifa2);
637 			}
638 			(void) rt_msg2(out_rtm->rtm_type, &info, (caddr_t)out_rtm,
639 			    NULL, &cred);
640 			if (ifa2 != NULL) {
641 				IFA_UNLOCK(ifa2);
642 			}
643 			R_Free(rtm);
644 			rtm = out_rtm;
645 			rtm->rtm_flags = rt->rt_flags;
646 			rt_getmetrics(rt, &rtm->rtm_rmx);
647 			rtm->rtm_addrs = info.rti_addrs;
648 			if (ifa2 != NULL) {
649 				IFA_REMREF(ifa2);
650 			}
651 
652 			kauth_cred_unref(&cred);
653 			break;
654 		}
655 
656 		case RTM_CHANGE:
657 			is_router = (rt->rt_flags & RTF_ROUTER) ? TRUE : FALSE;
658 
659 			if (info.rti_info[RTAX_GATEWAY] != NULL &&
660 			    (error = rt_setgate(rt, rt_key(rt),
661 			    info.rti_info[RTAX_GATEWAY]))) {
662 				int tmp = error;
663 				RT_UNLOCK(rt);
664 				senderr(tmp);
665 			}
666 			/*
667 			 * If they tried to change things but didn't specify
668 			 * the required gateway, then just use the old one.
669 			 * This can happen if the user tries to change the
670 			 * flags on the default route without changing the
671 			 * default gateway. Changing flags still doesn't work.
672 			 */
673 			if ((rt->rt_flags & RTF_GATEWAY) &&
674 			    info.rti_info[RTAX_GATEWAY] == NULL) {
675 				info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
676 			}
677 
678 			/*
679 			 * On Darwin, we call rt_setif which contains the
680 			 * equivalent to the code found at this very spot
681 			 * in BSD.
682 			 */
683 			rt_setif(rt,
684 			    info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA],
685 			    info.rti_info[RTAX_GATEWAY], ifscope);
686 
687 			if ((error = rt_setmetrics(rtm->rtm_inits,
688 			    &rtm->rtm_rmx, rt))) {
689 				int tmp = error;
690 				RT_UNLOCK(rt);
691 				senderr(tmp);
692 			}
693 			if (info.rti_info[RTAX_GENMASK]) {
694 				rt->rt_genmask = info.rti_info[RTAX_GENMASK];
695 			}
696 
697 			/*
698 			 * Enqueue work item to invoke callback for this route entry
699 			 * This may not be needed always, but for now issue it anytime
700 			 * RTM_CHANGE gets called.
701 			 */
702 			route_event_enqueue_nwk_wq_entry(rt, NULL, ROUTE_ENTRY_REFRESH, NULL, TRUE);
703 			/*
704 			 * If the route is for a router, walk the tree to send refresh
705 			 * event to protocol cloned entries
706 			 */
707 			if (is_router) {
708 				struct route_event rt_ev;
709 				route_event_init(&rt_ev, rt, NULL, ROUTE_ENTRY_REFRESH);
710 				RT_UNLOCK(rt);
711 				(void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev);
712 				RT_LOCK(rt);
713 			}
714 			OS_FALLTHROUGH;
715 		case RTM_LOCK:
716 			rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
717 			rt->rt_rmx.rmx_locks |=
718 			    (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
719 			break;
720 		}
721 		RT_UNLOCK(rt);
722 		break;
723 	default:
724 		senderr(EOPNOTSUPP);
725 	}
726 flush:
727 	if (rtm != NULL) {
728 		if (error) {
729 			rtm->rtm_errno = error;
730 		} else {
731 			rtm->rtm_flags |= RTF_DONE;
732 		}
733 	}
734 	if (rt != NULL) {
735 		RT_LOCK_ASSERT_NOTHELD(rt);
736 		rtfree_locked(rt);
737 	}
738 	lck_mtx_unlock(rnh_lock);
739 
740 	/* relock the socket now */
741 	socket_lock(so, 0);
742 	/*
743 	 * Check to see if we don't want our own messages.
744 	 */
745 	if (!(so->so_options & SO_USELOOPBACK)) {
746 		if (route_cb.any_count <= 1) {
747 			if (rtm != NULL) {
748 				R_Free(rtm);
749 			}
750 			m_freem(m);
751 			return error;
752 		}
753 		/* There is another listener, so construct message */
754 		rp = sotorawcb(so);
755 	}
756 	if (rtm != NULL) {
757 		m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm);
758 		if (m->m_pkthdr.len < rtm->rtm_msglen) {
759 			m_freem(m);
760 			m = NULL;
761 		} else if (m->m_pkthdr.len > rtm->rtm_msglen) {
762 			m_adj(m, rtm->rtm_msglen - m->m_pkthdr.len);
763 		}
764 		R_Free(rtm);
765 	}
766 	if (sendonlytoself && m != NULL) {
767 		error = 0;
768 		if (sbappendaddr(&so->so_rcv, &route_src, m,
769 		    NULL, &error) != 0) {
770 			sorwakeup(so);
771 		}
772 		if (error) {
773 			return error;
774 		}
775 	} else {
776 		struct sockproto route_proto = { .sp_family = PF_ROUTE, .sp_protocol = 0 };
777 		if (rp != NULL) {
778 			rp->rcb_proto.sp_family = 0; /* Avoid us */
779 		}
780 		if (dst_sa_family != 0) {
781 			route_proto.sp_protocol = dst_sa_family;
782 		}
783 		if (m != NULL) {
784 			socket_unlock(so, 0);
785 			raw_input(m, &route_proto, &route_src, &route_dst);
786 			socket_lock(so, 0);
787 		}
788 		if (rp != NULL) {
789 			rp->rcb_proto.sp_family = PF_ROUTE;
790 		}
791 	}
792 	return error;
793 }
794 
795 void
rt_setexpire(struct rtentry * rt,uint64_t expiry)796 rt_setexpire(struct rtentry *rt, uint64_t expiry)
797 {
798 	/* set both rt_expire and rmx_expire */
799 	rt->rt_expire = expiry;
800 	if (expiry) {
801 		rt->rt_rmx.rmx_expire =
802 		    (int32_t)(expiry + rt->base_calendartime -
803 		    rt->base_uptime);
804 	} else {
805 		rt->rt_rmx.rmx_expire = 0;
806 	}
807 }
808 
809 static int
rt_setmetrics(u_int32_t which,struct rt_metrics * in,struct rtentry * out)810 rt_setmetrics(u_int32_t which, struct rt_metrics *in, struct rtentry *out)
811 {
812 	if (!(which & RTV_REFRESH_HOST)) {
813 		struct timeval caltime;
814 		getmicrotime(&caltime);
815 #define metric(f, e) if (which & (f)) out->rt_rmx.e = in->e;
816 		metric(RTV_RPIPE, rmx_recvpipe);
817 		metric(RTV_SPIPE, rmx_sendpipe);
818 		metric(RTV_SSTHRESH, rmx_ssthresh);
819 		metric(RTV_RTT, rmx_rtt);
820 		metric(RTV_RTTVAR, rmx_rttvar);
821 		metric(RTV_HOPCOUNT, rmx_hopcount);
822 		metric(RTV_MTU, rmx_mtu);
823 		metric(RTV_EXPIRE, rmx_expire);
824 #undef metric
825 		if (out->rt_rmx.rmx_expire > 0) {
826 			/* account for system time change */
827 			getmicrotime(&caltime);
828 			out->base_calendartime +=
829 			    NET_CALCULATE_CLOCKSKEW(caltime,
830 			    out->base_calendartime,
831 			    net_uptime(), out->base_uptime);
832 			rt_setexpire(out,
833 			    out->rt_rmx.rmx_expire -
834 			    out->base_calendartime +
835 			    out->base_uptime);
836 		} else {
837 			rt_setexpire(out, 0);
838 		}
839 
840 		VERIFY(out->rt_expire == 0 || out->rt_rmx.rmx_expire != 0);
841 		VERIFY(out->rt_expire != 0 || out->rt_rmx.rmx_expire == 0);
842 	} else {
843 		/* Only RTV_REFRESH_HOST must be set */
844 		if ((which & ~RTV_REFRESH_HOST) ||
845 		    (out->rt_flags & RTF_STATIC) ||
846 		    !(out->rt_flags & RTF_LLINFO)) {
847 			return EINVAL;
848 		}
849 
850 		if (out->rt_llinfo_refresh == NULL) {
851 			return ENOTSUP;
852 		}
853 
854 		out->rt_llinfo_refresh(out);
855 	}
856 	return 0;
857 }
858 
859 static void
rt_getmetrics(struct rtentry * in,struct rt_metrics * out)860 rt_getmetrics(struct rtentry *in, struct rt_metrics *out)
861 {
862 	struct timeval caltime;
863 
864 	VERIFY(in->rt_expire == 0 || in->rt_rmx.rmx_expire != 0);
865 	VERIFY(in->rt_expire != 0 || in->rt_rmx.rmx_expire == 0);
866 
867 	*out = in->rt_rmx;
868 
869 	if (in->rt_expire != 0) {
870 		/* account for system time change */
871 		getmicrotime(&caltime);
872 
873 		in->base_calendartime +=
874 		    NET_CALCULATE_CLOCKSKEW(caltime,
875 		    in->base_calendartime, net_uptime(), in->base_uptime);
876 
877 		out->rmx_expire = (int32_t)(in->base_calendartime +
878 		    in->rt_expire - in->base_uptime);
879 	} else {
880 		out->rmx_expire = 0;
881 	}
882 }
883 
884 /*
885  * Set route's interface given info.rti_info[RTAX_IFP],
886  * info.rti_info[RTAX_IFA], and gateway.
887  */
888 static void
rt_setif(struct rtentry * rt,struct sockaddr * Ifpaddr,struct sockaddr * Ifaaddr,struct sockaddr * Gate,unsigned int ifscope)889 rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr,
890     struct sockaddr *Gate, unsigned int ifscope)
891 {
892 	struct ifaddr *ifa = NULL;
893 	struct ifnet *ifp = NULL;
894 	void (*ifa_rtrequest)(int, struct rtentry *, struct sockaddr *);
895 
896 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
897 
898 	RT_LOCK_ASSERT_HELD(rt);
899 
900 	/* Don't update a defunct route */
901 	if (rt->rt_flags & RTF_CONDEMNED) {
902 		return;
903 	}
904 
905 	/* Add an extra ref for ourselves */
906 	RT_ADDREF_LOCKED(rt);
907 
908 	/* Become a regular mutex, just in case */
909 	RT_CONVERT_LOCK(rt);
910 
911 	/*
912 	 * New gateway could require new ifaddr, ifp; flags may also
913 	 * be different; ifp may be specified by ll sockaddr when
914 	 * protocol address is ambiguous.
915 	 */
916 	if (Ifpaddr && (ifa = ifa_ifwithnet_scoped(Ifpaddr, ifscope)) &&
917 	    (ifp = ifa->ifa_ifp) && (Ifaaddr || Gate)) {
918 		IFA_REMREF(ifa);
919 		ifa = ifaof_ifpforaddr(Ifaaddr ? Ifaaddr : Gate, ifp);
920 	} else {
921 		if (ifa != NULL) {
922 			IFA_REMREF(ifa);
923 			ifa = NULL;
924 		}
925 		if (Ifpaddr && (ifp = if_withname(Ifpaddr))) {
926 			if (Gate) {
927 				ifa = ifaof_ifpforaddr(Gate, ifp);
928 			} else {
929 				ifnet_lock_shared(ifp);
930 				ifa = TAILQ_FIRST(&ifp->if_addrhead);
931 				if (ifa != NULL) {
932 					IFA_ADDREF(ifa);
933 				}
934 				ifnet_lock_done(ifp);
935 			}
936 		} else if (Ifaaddr &&
937 		    (ifa = ifa_ifwithaddr_scoped(Ifaaddr, ifscope))) {
938 			ifp = ifa->ifa_ifp;
939 		} else if (Gate != NULL) {
940 			/*
941 			 * Safe to drop rt_lock and use rt_key, since holding
942 			 * rnh_lock here prevents another thread from calling
943 			 * rt_setgate() on this route.  We cannot hold the
944 			 * lock across ifa_ifwithroute since the lookup done
945 			 * by that routine may point to the same route.
946 			 */
947 			RT_UNLOCK(rt);
948 			if ((ifa = ifa_ifwithroute_scoped_locked(rt->rt_flags,
949 			    rt_key(rt), Gate, ifscope)) != NULL) {
950 				ifp = ifa->ifa_ifp;
951 			}
952 			RT_LOCK(rt);
953 			/* Don't update a defunct route */
954 			if (rt->rt_flags & RTF_CONDEMNED) {
955 				if (ifa != NULL) {
956 					IFA_REMREF(ifa);
957 				}
958 				/* Release extra ref */
959 				RT_REMREF_LOCKED(rt);
960 				return;
961 			}
962 		}
963 	}
964 
965 	/* trigger route cache reevaluation */
966 	if (rt_key(rt)->sa_family == AF_INET) {
967 		routegenid_inet_update();
968 	} else if (rt_key(rt)->sa_family == AF_INET6) {
969 		routegenid_inet6_update();
970 	}
971 
972 	if (ifa != NULL) {
973 		struct ifaddr *oifa = rt->rt_ifa;
974 		if (oifa != ifa) {
975 			if (oifa != NULL) {
976 				IFA_LOCK_SPIN(oifa);
977 				ifa_rtrequest = oifa->ifa_rtrequest;
978 				IFA_UNLOCK(oifa);
979 				if (ifa_rtrequest != NULL) {
980 					ifa_rtrequest(RTM_DELETE, rt, Gate);
981 				}
982 			}
983 			rtsetifa(rt, ifa);
984 
985 			if (rt->rt_ifp != ifp) {
986 				/*
987 				 * Purge any link-layer info caching.
988 				 */
989 				if (rt->rt_llinfo_purge != NULL) {
990 					rt->rt_llinfo_purge(rt);
991 				}
992 
993 				/*
994 				 * Adjust route ref count for the interfaces.
995 				 */
996 				if (rt->rt_if_ref_fn != NULL) {
997 					rt->rt_if_ref_fn(ifp, 1);
998 					rt->rt_if_ref_fn(rt->rt_ifp, -1);
999 				}
1000 			}
1001 			rt->rt_ifp = ifp;
1002 			/*
1003 			 * If this is the (non-scoped) default route, record
1004 			 * the interface index used for the primary ifscope.
1005 			 */
1006 			if (rt_primary_default(rt, rt_key(rt))) {
1007 				set_primary_ifscope(rt_key(rt)->sa_family,
1008 				    rt->rt_ifp->if_index);
1009 			}
1010 			/*
1011 			 * If rmx_mtu is not locked, update it
1012 			 * to the MTU used by the new interface.
1013 			 */
1014 			if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) {
1015 				rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
1016 				if (rt_key(rt)->sa_family == AF_INET &&
1017 				    INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
1018 					rt->rt_rmx.rmx_mtu = IN6_LINKMTU(rt->rt_ifp);
1019 					/* Further adjust the size for CLAT46 expansion */
1020 					rt->rt_rmx.rmx_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
1021 				}
1022 			}
1023 
1024 			if (rt->rt_ifa != NULL) {
1025 				IFA_LOCK_SPIN(rt->rt_ifa);
1026 				ifa_rtrequest = rt->rt_ifa->ifa_rtrequest;
1027 				IFA_UNLOCK(rt->rt_ifa);
1028 				if (ifa_rtrequest != NULL) {
1029 					ifa_rtrequest(RTM_ADD, rt, Gate);
1030 				}
1031 			}
1032 			IFA_REMREF(ifa);
1033 			/* Release extra ref */
1034 			RT_REMREF_LOCKED(rt);
1035 			return;
1036 		}
1037 		IFA_REMREF(ifa);
1038 		ifa = NULL;
1039 	}
1040 
1041 	/* XXX: to reset gateway to correct value, at RTM_CHANGE */
1042 	if (rt->rt_ifa != NULL) {
1043 		IFA_LOCK_SPIN(rt->rt_ifa);
1044 		ifa_rtrequest = rt->rt_ifa->ifa_rtrequest;
1045 		IFA_UNLOCK(rt->rt_ifa);
1046 		if (ifa_rtrequest != NULL) {
1047 			ifa_rtrequest(RTM_ADD, rt, Gate);
1048 		}
1049 	}
1050 
1051 	/*
1052 	 * Workaround for local address routes pointing to the loopback
1053 	 * interface added by configd, until <rdar://problem/12970142>.
1054 	 */
1055 	if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
1056 	    (rt->rt_flags & RTF_HOST) && rt->rt_ifa->ifa_ifp == rt->rt_ifp) {
1057 		ifa = ifa_ifwithaddr(rt_key(rt));
1058 		if (ifa != NULL) {
1059 			if (ifa != rt->rt_ifa) {
1060 				rtsetifa(rt, ifa);
1061 			}
1062 			IFA_REMREF(ifa);
1063 		}
1064 	}
1065 
1066 	/* Release extra ref */
1067 	RT_REMREF_LOCKED(rt);
1068 }
1069 
1070 /*
1071  * Extract the addresses of the passed sockaddrs.
1072  * Do a little sanity checking so as to avoid bad memory references.
1073  * This data is derived straight from userland.
1074  */
1075 static int
rt_xaddrs(caddr_t cp,caddr_t cplim,struct rt_addrinfo * rtinfo)1076 rt_xaddrs(caddr_t cp, caddr_t cplim, struct rt_addrinfo *rtinfo)
1077 {
1078 	struct sockaddr *sa;
1079 	int i;
1080 
1081 	bzero(rtinfo->rti_info, sizeof(rtinfo->rti_info));
1082 	for (i = 0; (i < RTAX_MAX) && (cp < cplim); i++) {
1083 		if ((rtinfo->rti_addrs & (1 << i)) == 0) {
1084 			continue;
1085 		}
1086 		sa = (struct sockaddr *)cp;
1087 		/*
1088 		 * It won't fit.
1089 		 */
1090 		if ((cp + sa->sa_len) > cplim) {
1091 			return EINVAL;
1092 		}
1093 		if (sa->sa_len > sizeof(struct sockaddr_storage)) {
1094 			return EINVAL;
1095 		}
1096 		/*
1097 		 * there are no more.. quit now
1098 		 * If there are more bits, they are in error.
1099 		 * I've seen this. route(1) can evidently generate these.
1100 		 * This causes kernel to core dump.
1101 		 * for compatibility, If we see this, point to a safe address.
1102 		 */
1103 		if (sa->sa_len == 0) {
1104 			rtinfo->rti_info[i] = &sa_zero;
1105 			return 0; /* should be EINVAL but for compat */
1106 		}
1107 		if (sa->sa_len < offsetof(struct sockaddr, sa_data)) {
1108 			return EINVAL;
1109 		}
1110 		/* accept it */
1111 		rtinfo->rti_info[i] = sa;
1112 		ADVANCE32(cp, sa);
1113 	}
1114 	return 0;
1115 }
1116 
1117 static struct mbuf *
rt_msg1(u_char type,struct rt_addrinfo * rtinfo)1118 rt_msg1(u_char type, struct rt_addrinfo *rtinfo)
1119 {
1120 	struct rt_msghdr *rtm;
1121 	struct mbuf *m;
1122 	int i;
1123 	int len, dlen, off;
1124 
1125 	switch (type) {
1126 	case RTM_DELADDR:
1127 	case RTM_NEWADDR:
1128 		len = sizeof(struct ifa_msghdr);
1129 		break;
1130 
1131 	case RTM_DELMADDR:
1132 	case RTM_NEWMADDR:
1133 		len = sizeof(struct ifma_msghdr);
1134 		break;
1135 
1136 	case RTM_IFINFO:
1137 		len = sizeof(struct if_msghdr);
1138 		break;
1139 
1140 	default:
1141 		len = sizeof(struct rt_msghdr);
1142 	}
1143 	m = m_gethdr(M_DONTWAIT, MT_DATA);
1144 	if (m && len > MHLEN) {
1145 		MCLGET(m, M_DONTWAIT);
1146 		if (!(m->m_flags & M_EXT)) {
1147 			m_free(m);
1148 			m = NULL;
1149 		}
1150 	}
1151 	if (m == NULL) {
1152 		return NULL;
1153 	}
1154 	m->m_pkthdr.len = m->m_len = len;
1155 	m->m_pkthdr.rcvif = NULL;
1156 	rtm = mtod(m, struct rt_msghdr *);
1157 	bzero((caddr_t)rtm, len);
1158 	off = len;
1159 	for (i = 0; i < RTAX_MAX; i++) {
1160 		struct sockaddr *sa, *hint;
1161 		uint8_t ssbuf[SOCK_MAXADDRLEN + 1];
1162 
1163 		/*
1164 		 * Make sure to accomodate the largest possible size of sa_len.
1165 		 */
1166 		_CASSERT(sizeof(ssbuf) == (SOCK_MAXADDRLEN + 1));
1167 
1168 		if ((sa = rtinfo->rti_info[i]) == NULL) {
1169 			continue;
1170 		}
1171 
1172 		switch (i) {
1173 		case RTAX_DST:
1174 		case RTAX_NETMASK:
1175 			if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL) {
1176 				hint = rtinfo->rti_info[RTAX_IFA];
1177 			}
1178 
1179 			/* Scrub away any trace of embedded interface scope */
1180 			sa = rtm_scrub(type, i, hint, sa, &ssbuf,
1181 			    sizeof(ssbuf), NULL);
1182 			break;
1183 
1184 		default:
1185 			break;
1186 		}
1187 
1188 		rtinfo->rti_addrs |= (1 << i);
1189 		dlen = sa->sa_len;
1190 		m_copyback(m, off, dlen, (caddr_t)sa);
1191 		len = off + dlen;
1192 		off += ROUNDUP32(dlen);
1193 	}
1194 	if (m->m_pkthdr.len != len) {
1195 		m_freem(m);
1196 		return NULL;
1197 	}
1198 	rtm->rtm_msglen = (u_short)len;
1199 	rtm->rtm_version = RTM_VERSION;
1200 	rtm->rtm_type = type;
1201 	return m;
1202 }
1203 
1204 static int
rt_msg2(u_char type,struct rt_addrinfo * rtinfo,caddr_t cp,struct walkarg * w,kauth_cred_t * credp)1205 rt_msg2(u_char type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w,
1206     kauth_cred_t* credp)
1207 {
1208 	int i;
1209 	int len, dlen, rlen, second_time = 0;
1210 	caddr_t cp0;
1211 
1212 	rtinfo->rti_addrs = 0;
1213 again:
1214 	switch (type) {
1215 	case RTM_DELADDR:
1216 	case RTM_NEWADDR:
1217 		len = sizeof(struct ifa_msghdr);
1218 		break;
1219 
1220 	case RTM_DELMADDR:
1221 	case RTM_NEWMADDR:
1222 		len = sizeof(struct ifma_msghdr);
1223 		break;
1224 
1225 	case RTM_IFINFO:
1226 		len = sizeof(struct if_msghdr);
1227 		break;
1228 
1229 	case RTM_IFINFO2:
1230 		len = sizeof(struct if_msghdr2);
1231 		break;
1232 
1233 	case RTM_NEWMADDR2:
1234 		len = sizeof(struct ifma_msghdr2);
1235 		break;
1236 
1237 	case RTM_GET_EXT:
1238 		len = sizeof(struct rt_msghdr_ext);
1239 		break;
1240 
1241 	case RTM_GET2:
1242 		len = sizeof(struct rt_msghdr2);
1243 		break;
1244 
1245 	default:
1246 		len = sizeof(struct rt_msghdr);
1247 	}
1248 	cp0 = cp;
1249 	if (cp0) {
1250 		cp += len;
1251 	}
1252 	for (i = 0; i < RTAX_MAX; i++) {
1253 		struct sockaddr *sa, *hint;
1254 		uint8_t ssbuf[SOCK_MAXADDRLEN + 1];
1255 
1256 		/*
1257 		 * Make sure to accomodate the largest possible size of sa_len.
1258 		 */
1259 		_CASSERT(sizeof(ssbuf) == (SOCK_MAXADDRLEN + 1));
1260 
1261 		if ((sa = rtinfo->rti_info[i]) == NULL) {
1262 			continue;
1263 		}
1264 
1265 		switch (i) {
1266 		case RTAX_DST:
1267 		case RTAX_NETMASK:
1268 			if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL) {
1269 				hint = rtinfo->rti_info[RTAX_IFA];
1270 			}
1271 
1272 			/* Scrub away any trace of embedded interface scope */
1273 			sa = rtm_scrub(type, i, hint, sa, &ssbuf,
1274 			    sizeof(ssbuf), NULL);
1275 			break;
1276 		case RTAX_GATEWAY:
1277 		case RTAX_IFP:
1278 			sa = rtm_scrub(type, i, NULL, sa, &ssbuf,
1279 			    sizeof(ssbuf), credp);
1280 			break;
1281 
1282 		default:
1283 			break;
1284 		}
1285 
1286 		rtinfo->rti_addrs |= (1 << i);
1287 		dlen = sa->sa_len;
1288 		rlen = ROUNDUP32(dlen);
1289 		if (cp) {
1290 			bcopy((caddr_t)sa, cp, (size_t)dlen);
1291 			if (dlen != rlen) {
1292 				bzero(cp + dlen, rlen - dlen);
1293 			}
1294 			cp += rlen;
1295 		}
1296 		len += rlen;
1297 	}
1298 	if (cp == NULL && w != NULL && !second_time) {
1299 		struct walkarg *rw = w;
1300 
1301 		if (rw->w_req != NULL) {
1302 			if (rw->w_tmemsize < len) {
1303 				if (rw->w_tmem != NULL) {
1304 					kfree_data(rw->w_tmem, rw->w_tmemsize);
1305 				}
1306 				rw->w_tmem = (caddr_t) kalloc_data(len, Z_ZERO | Z_WAITOK);
1307 				if (rw->w_tmem != NULL) {
1308 					rw->w_tmemsize = len;
1309 				}
1310 			}
1311 			if (rw->w_tmem != NULL) {
1312 				cp = rw->w_tmem;
1313 				second_time = 1;
1314 				goto again;
1315 			}
1316 		}
1317 	}
1318 	if (cp) {
1319 		struct rt_msghdr *rtm = (struct rt_msghdr *)(void *)cp0;
1320 
1321 		rtm->rtm_version = RTM_VERSION;
1322 		rtm->rtm_type = type;
1323 		rtm->rtm_msglen = (u_short)len;
1324 	}
1325 	return len;
1326 }
1327 
1328 /*
1329  * This routine is called to generate a message from the routing
1330  * socket indicating that a redirect has occurred, a routing lookup
1331  * has failed, or that a protocol has detected timeouts to a particular
1332  * destination.
1333  */
1334 void
rt_missmsg(u_char type,struct rt_addrinfo * rtinfo,int flags,int error)1335 rt_missmsg(u_char type, struct rt_addrinfo *rtinfo, int flags, int error)
1336 {
1337 	struct rt_msghdr *rtm;
1338 	struct mbuf *m;
1339 	struct sockaddr *sa = rtinfo->rti_info[RTAX_DST];
1340 	struct sockproto route_proto = { .sp_family = PF_ROUTE, .sp_protocol = 0 };
1341 
1342 	if (route_cb.any_count == 0) {
1343 		return;
1344 	}
1345 	m = rt_msg1(type, rtinfo);
1346 	if (m == NULL) {
1347 		return;
1348 	}
1349 	rtm = mtod(m, struct rt_msghdr *);
1350 	rtm->rtm_flags = RTF_DONE | flags;
1351 	rtm->rtm_errno = error;
1352 	rtm->rtm_addrs = rtinfo->rti_addrs;
1353 	route_proto.sp_family = sa ? sa->sa_family : 0;
1354 	raw_input(m, &route_proto, &route_src, &route_dst);
1355 }
1356 
1357 /*
1358  * This routine is called to generate a message from the routing
1359  * socket indicating that the status of a network interface has changed.
1360  */
1361 void
rt_ifmsg(struct ifnet * ifp)1362 rt_ifmsg(struct ifnet *ifp)
1363 {
1364 	struct if_msghdr *ifm;
1365 	struct mbuf *m;
1366 	struct rt_addrinfo info;
1367 	struct  sockproto route_proto = { .sp_family = PF_ROUTE, .sp_protocol = 0 };
1368 
1369 	if (route_cb.any_count == 0) {
1370 		return;
1371 	}
1372 	bzero((caddr_t)&info, sizeof(info));
1373 	m = rt_msg1(RTM_IFINFO, &info);
1374 	if (m == NULL) {
1375 		return;
1376 	}
1377 	ifm = mtod(m, struct if_msghdr *);
1378 	ifm->ifm_index = ifp->if_index;
1379 	ifm->ifm_flags = (u_short)ifp->if_flags;
1380 	if_data_internal_to_if_data(ifp, &ifp->if_data, &ifm->ifm_data);
1381 	ifm->ifm_addrs = 0;
1382 	raw_input(m, &route_proto, &route_src, &route_dst);
1383 }
1384 
1385 /*
1386  * This is called to generate messages from the routing socket
1387  * indicating a network interface has had addresses associated with it.
1388  * if we ever reverse the logic and replace messages TO the routing
1389  * socket indicate a request to configure interfaces, then it will
1390  * be unnecessary as the routing socket will automatically generate
1391  * copies of it.
1392  *
1393  * Since this is coming from the interface, it is expected that the
1394  * interface will be locked.  Caller must hold rnh_lock and rt_lock.
1395  */
1396 void
rt_newaddrmsg(u_char cmd,struct ifaddr * ifa,int error,struct rtentry * rt)1397 rt_newaddrmsg(u_char cmd, struct ifaddr *ifa, int error, struct rtentry *rt)
1398 {
1399 	struct rt_addrinfo info;
1400 	struct sockaddr *sa = 0;
1401 	int pass;
1402 	struct mbuf *m = 0;
1403 	struct ifnet *ifp = ifa->ifa_ifp;
1404 	struct sockproto route_proto = { .sp_family = PF_ROUTE, .sp_protocol = 0 };
1405 
1406 	LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1407 	RT_LOCK_ASSERT_HELD(rt);
1408 
1409 	if (route_cb.any_count == 0) {
1410 		return;
1411 	}
1412 
1413 	/* Become a regular mutex, just in case */
1414 	RT_CONVERT_LOCK(rt);
1415 	for (pass = 1; pass < 3; pass++) {
1416 		bzero((caddr_t)&info, sizeof(info));
1417 		if ((cmd == RTM_ADD && pass == 1) ||
1418 		    (cmd == RTM_DELETE && pass == 2)) {
1419 			struct ifa_msghdr *ifam;
1420 			u_char ncmd = cmd == RTM_ADD ? RTM_NEWADDR : RTM_DELADDR;
1421 
1422 			/* Lock ifp for if_lladdr */
1423 			ifnet_lock_shared(ifp);
1424 			IFA_LOCK(ifa);
1425 			info.rti_info[RTAX_IFA] = sa = ifa->ifa_addr;
1426 			/*
1427 			 * Holding ifnet lock here prevents the link address
1428 			 * from changing contents, so no need to hold its
1429 			 * lock.  The link address is always present; it's
1430 			 * never freed.
1431 			 */
1432 			info.rti_info[RTAX_IFP] = ifp->if_lladdr->ifa_addr;
1433 			info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1434 			info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
1435 			if ((m = rt_msg1(ncmd, &info)) == NULL) {
1436 				IFA_UNLOCK(ifa);
1437 				ifnet_lock_done(ifp);
1438 				continue;
1439 			}
1440 			IFA_UNLOCK(ifa);
1441 			ifnet_lock_done(ifp);
1442 			ifam = mtod(m, struct ifa_msghdr *);
1443 			ifam->ifam_index = ifp->if_index;
1444 			IFA_LOCK_SPIN(ifa);
1445 			ifam->ifam_metric = ifa->ifa_metric;
1446 			ifam->ifam_flags = ifa->ifa_flags;
1447 			IFA_UNLOCK(ifa);
1448 			ifam->ifam_addrs = info.rti_addrs;
1449 		}
1450 		if ((cmd == RTM_ADD && pass == 2) ||
1451 		    (cmd == RTM_DELETE && pass == 1)) {
1452 			struct rt_msghdr *rtm;
1453 
1454 			if (rt == NULL) {
1455 				continue;
1456 			}
1457 			info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1458 			info.rti_info[RTAX_DST] = sa = rt_key(rt);
1459 			info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1460 			if ((m = rt_msg1(cmd, &info)) == NULL) {
1461 				continue;
1462 			}
1463 			rtm = mtod(m, struct rt_msghdr *);
1464 			rtm->rtm_index = ifp->if_index;
1465 			rtm->rtm_flags |= rt->rt_flags;
1466 			rtm->rtm_errno = error;
1467 			rtm->rtm_addrs = info.rti_addrs;
1468 		}
1469 		route_proto.sp_protocol = sa ? sa->sa_family : 0;
1470 		raw_input(m, &route_proto, &route_src, &route_dst);
1471 	}
1472 }
1473 
1474 /*
1475  * This is the analogue to the rt_newaddrmsg which performs the same
1476  * function but for multicast group memberhips.  This is easier since
1477  * there is no route state to worry about.
1478  */
1479 void
rt_newmaddrmsg(u_char cmd,struct ifmultiaddr * ifma)1480 rt_newmaddrmsg(u_char cmd, struct ifmultiaddr *ifma)
1481 {
1482 	struct rt_addrinfo info;
1483 	struct mbuf *m = 0;
1484 	struct ifnet *ifp = ifma->ifma_ifp;
1485 	struct ifma_msghdr *ifmam;
1486 	struct sockproto route_proto = { .sp_family = PF_ROUTE, .sp_protocol = 0 };
1487 
1488 	if (route_cb.any_count == 0) {
1489 		return;
1490 	}
1491 
1492 	/* Lock ifp for if_lladdr */
1493 	ifnet_lock_shared(ifp);
1494 	bzero((caddr_t)&info, sizeof(info));
1495 	IFMA_LOCK(ifma);
1496 	info.rti_info[RTAX_IFA] = ifma->ifma_addr;
1497 	/* lladdr doesn't need lock */
1498 	info.rti_info[RTAX_IFP] = ifp->if_lladdr->ifa_addr;
1499 
1500 	/*
1501 	 * If a link-layer address is present, present it as a ``gateway''
1502 	 * (similarly to how ARP entries, e.g., are presented).
1503 	 */
1504 	info.rti_info[RTAX_GATEWAY] = (ifma->ifma_ll != NULL) ?
1505 	    ifma->ifma_ll->ifma_addr : NULL;
1506 	if ((m = rt_msg1(cmd, &info)) == NULL) {
1507 		IFMA_UNLOCK(ifma);
1508 		ifnet_lock_done(ifp);
1509 		return;
1510 	}
1511 	ifmam = mtod(m, struct ifma_msghdr *);
1512 	ifmam->ifmam_index = ifp->if_index;
1513 	ifmam->ifmam_addrs = info.rti_addrs;
1514 	route_proto.sp_protocol = ifma->ifma_addr->sa_family;
1515 	IFMA_UNLOCK(ifma);
1516 	ifnet_lock_done(ifp);
1517 	raw_input(m, &route_proto, &route_src, &route_dst);
1518 }
1519 
1520 const char *
rtm2str(int cmd)1521 rtm2str(int cmd)
1522 {
1523 	const char *c = "RTM_?";
1524 
1525 	switch (cmd) {
1526 	case RTM_ADD:
1527 		c = "RTM_ADD";
1528 		break;
1529 	case RTM_DELETE:
1530 		c = "RTM_DELETE";
1531 		break;
1532 	case RTM_CHANGE:
1533 		c = "RTM_CHANGE";
1534 		break;
1535 	case RTM_GET:
1536 		c = "RTM_GET";
1537 		break;
1538 	case RTM_LOSING:
1539 		c = "RTM_LOSING";
1540 		break;
1541 	case RTM_REDIRECT:
1542 		c = "RTM_REDIRECT";
1543 		break;
1544 	case RTM_MISS:
1545 		c = "RTM_MISS";
1546 		break;
1547 	case RTM_LOCK:
1548 		c = "RTM_LOCK";
1549 		break;
1550 	case RTM_OLDADD:
1551 		c = "RTM_OLDADD";
1552 		break;
1553 	case RTM_OLDDEL:
1554 		c = "RTM_OLDDEL";
1555 		break;
1556 	case RTM_RESOLVE:
1557 		c = "RTM_RESOLVE";
1558 		break;
1559 	case RTM_NEWADDR:
1560 		c = "RTM_NEWADDR";
1561 		break;
1562 	case RTM_DELADDR:
1563 		c = "RTM_DELADDR";
1564 		break;
1565 	case RTM_IFINFO:
1566 		c = "RTM_IFINFO";
1567 		break;
1568 	case RTM_NEWMADDR:
1569 		c = "RTM_NEWMADDR";
1570 		break;
1571 	case RTM_DELMADDR:
1572 		c = "RTM_DELMADDR";
1573 		break;
1574 	case RTM_GET_SILENT:
1575 		c = "RTM_GET_SILENT";
1576 		break;
1577 	case RTM_IFINFO2:
1578 		c = "RTM_IFINFO2";
1579 		break;
1580 	case RTM_NEWMADDR2:
1581 		c = "RTM_NEWMADDR2";
1582 		break;
1583 	case RTM_GET2:
1584 		c = "RTM_GET2";
1585 		break;
1586 	case RTM_GET_EXT:
1587 		c = "RTM_GET_EXT";
1588 		break;
1589 	}
1590 
1591 	return c;
1592 }
1593 
1594 /*
1595  * This is used in dumping the kernel table via sysctl().
1596  */
1597 static int
sysctl_dumpentry(struct radix_node * rn,void * vw)1598 sysctl_dumpentry(struct radix_node *rn, void *vw)
1599 {
1600 	struct walkarg *w = vw;
1601 	struct rtentry *rt = (struct rtentry *)rn;
1602 	int error = 0, size;
1603 	struct rt_addrinfo info;
1604 	kauth_cred_t cred;
1605 	kauth_cred_t *credp;
1606 
1607 	cred = kauth_cred_proc_ref(current_proc());
1608 	credp = &cred;
1609 
1610 	RT_LOCK(rt);
1611 	if ((w->w_op == NET_RT_FLAGS || w->w_op == NET_RT_FLAGS_PRIV) &&
1612 	    !(rt->rt_flags & w->w_arg)) {
1613 		goto done;
1614 	}
1615 
1616 	/*
1617 	 * If the matching route has RTF_LLINFO set, then we can skip scrubbing the MAC
1618 	 * only if the outgoing interface is not loopback and the process has entitlement
1619 	 * for neighbor cache read.
1620 	 */
1621 	if (w->w_op == NET_RT_FLAGS_PRIV && (rt->rt_flags & RTF_LLINFO)) {
1622 		if (rt->rt_ifp != lo_ifp &&
1623 		    (route_op_entitlement_check(NULL, cred, ROUTE_OP_READ, TRUE) == 0)) {
1624 			credp = NULL;
1625 		}
1626 	}
1627 
1628 	bzero((caddr_t)&info, sizeof(info));
1629 	info.rti_info[RTAX_DST] = rt_key(rt);
1630 	info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1631 	info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1632 	info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
1633 	if (RT_HAS_IFADDR(rt)) {
1634 		info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
1635 	}
1636 
1637 	if (w->w_op != NET_RT_DUMP2) {
1638 		size = rt_msg2(RTM_GET, &info, NULL, w, credp);
1639 		if (w->w_req != NULL && w->w_tmem != NULL) {
1640 			struct rt_msghdr *rtm =
1641 			    (struct rt_msghdr *)(void *)w->w_tmem;
1642 
1643 			rtm->rtm_flags = rt->rt_flags;
1644 			rtm->rtm_use = rt->rt_use;
1645 			rt_getmetrics(rt, &rtm->rtm_rmx);
1646 			rtm->rtm_index = rt->rt_ifp->if_index;
1647 			rtm->rtm_pid = 0;
1648 			rtm->rtm_seq = 0;
1649 			rtm->rtm_errno = 0;
1650 			rtm->rtm_addrs = info.rti_addrs;
1651 			error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
1652 		}
1653 	} else {
1654 		size = rt_msg2(RTM_GET2, &info, NULL, w, credp);
1655 		if (w->w_req != NULL && w->w_tmem != NULL) {
1656 			struct rt_msghdr2 *rtm =
1657 			    (struct rt_msghdr2 *)(void *)w->w_tmem;
1658 
1659 			rtm->rtm_flags = rt->rt_flags;
1660 			rtm->rtm_use = rt->rt_use;
1661 			rt_getmetrics(rt, &rtm->rtm_rmx);
1662 			rtm->rtm_index = rt->rt_ifp->if_index;
1663 			rtm->rtm_refcnt = rt->rt_refcnt;
1664 			if (rt->rt_parent) {
1665 				rtm->rtm_parentflags = rt->rt_parent->rt_flags;
1666 			} else {
1667 				rtm->rtm_parentflags = 0;
1668 			}
1669 			rtm->rtm_reserved = 0;
1670 			rtm->rtm_addrs = info.rti_addrs;
1671 			error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
1672 		}
1673 	}
1674 
1675 done:
1676 	RT_UNLOCK(rt);
1677 	kauth_cred_unref(&cred);
1678 	return error;
1679 }
1680 
1681 /*
1682  * This is used for dumping extended information from route entries.
1683  */
1684 static int
sysctl_dumpentry_ext(struct radix_node * rn,void * vw)1685 sysctl_dumpentry_ext(struct radix_node *rn, void *vw)
1686 {
1687 	struct walkarg *w = vw;
1688 	struct rtentry *rt = (struct rtentry *)rn;
1689 	int error = 0, size;
1690 	struct rt_addrinfo info;
1691 	kauth_cred_t cred;
1692 
1693 	cred = kauth_cred_proc_ref(current_proc());
1694 
1695 	RT_LOCK(rt);
1696 	if (w->w_op == NET_RT_DUMPX_FLAGS && !(rt->rt_flags & w->w_arg)) {
1697 		goto done;
1698 	}
1699 	bzero(&info, sizeof(info));
1700 	info.rti_info[RTAX_DST] = rt_key(rt);
1701 	info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1702 	info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1703 	info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
1704 
1705 	size = rt_msg2(RTM_GET_EXT, &info, NULL, w, &cred);
1706 	if (w->w_req != NULL && w->w_tmem != NULL) {
1707 		struct rt_msghdr_ext *ertm =
1708 		    (struct rt_msghdr_ext *)(void *)w->w_tmem;
1709 
1710 		ertm->rtm_flags = rt->rt_flags;
1711 		ertm->rtm_use = rt->rt_use;
1712 		rt_getmetrics(rt, &ertm->rtm_rmx);
1713 		ertm->rtm_index = rt->rt_ifp->if_index;
1714 		ertm->rtm_pid = 0;
1715 		ertm->rtm_seq = 0;
1716 		ertm->rtm_errno = 0;
1717 		ertm->rtm_addrs = info.rti_addrs;
1718 		if (rt->rt_llinfo_get_ri == NULL) {
1719 			bzero(&ertm->rtm_ri, sizeof(ertm->rtm_ri));
1720 			ertm->rtm_ri.ri_rssi = IFNET_RSSI_UNKNOWN;
1721 			ertm->rtm_ri.ri_lqm = IFNET_LQM_THRESH_OFF;
1722 			ertm->rtm_ri.ri_npm = IFNET_NPM_THRESH_UNKNOWN;
1723 		} else {
1724 			rt->rt_llinfo_get_ri(rt, &ertm->rtm_ri);
1725 		}
1726 		error = SYSCTL_OUT(w->w_req, (caddr_t)ertm, size);
1727 	}
1728 
1729 done:
1730 	RT_UNLOCK(rt);
1731 	kauth_cred_unref(&cred);
1732 	return error;
1733 }
1734 
1735 /*
1736  * rdar://9307819
1737  * To avoid to call copyout() while holding locks and to cause problems
1738  * in the paging path, sysctl_iflist() and sysctl_iflist2() contstruct
1739  * the list in two passes. In the first pass we compute the total
1740  * length of the data we are going to copyout, then we release
1741  * all locks to allocate a temporary buffer that gets filled
1742  * in the second pass.
1743  *
1744  * Note that we are verifying the assumption that _MALLOC returns a buffer
1745  * that is at least 32 bits aligned and that the messages and addresses are
1746  * 32 bits aligned.
1747  */
1748 static int
sysctl_iflist(int af,struct walkarg * w)1749 sysctl_iflist(int af, struct walkarg *w)
1750 {
1751 	struct ifnet *ifp;
1752 	struct ifaddr *ifa;
1753 	struct  rt_addrinfo info;
1754 	int     len = 0, error = 0;
1755 	int     pass = 0;
1756 	int     total_len = 0, total_buffer_len = 0, current_len = 0;
1757 	char    *total_buffer = NULL, *cp = NULL;
1758 	kauth_cred_t cred;
1759 
1760 	cred = kauth_cred_proc_ref(current_proc());
1761 
1762 	bzero((caddr_t)&info, sizeof(info));
1763 
1764 	for (pass = 0; pass < 2; pass++) {
1765 		ifnet_head_lock_shared();
1766 
1767 		TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1768 			if (error) {
1769 				break;
1770 			}
1771 			if (w->w_arg && w->w_arg != ifp->if_index) {
1772 				continue;
1773 			}
1774 			ifnet_lock_shared(ifp);
1775 			/*
1776 			 * Holding ifnet lock here prevents the link address
1777 			 * from changing contents, so no need to hold the ifa
1778 			 * lock.  The link address is always present; it's
1779 			 * never freed.
1780 			 */
1781 			ifa = ifp->if_lladdr;
1782 			info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1783 			len = rt_msg2(RTM_IFINFO, &info, NULL, NULL, &cred);
1784 			if (pass == 0) {
1785 				total_len += len;
1786 			} else {
1787 				struct if_msghdr *ifm;
1788 
1789 				if (current_len + len > total_len) {
1790 					ifnet_lock_done(ifp);
1791 					error = ENOBUFS;
1792 					break;
1793 				}
1794 				info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1795 				len = rt_msg2(RTM_IFINFO, &info,
1796 				    (caddr_t)cp, NULL, &cred);
1797 				info.rti_info[RTAX_IFP] = NULL;
1798 
1799 				ifm = (struct if_msghdr *)(void *)cp;
1800 				ifm->ifm_index = ifp->if_index;
1801 				ifm->ifm_flags = (u_short)ifp->if_flags;
1802 				if_data_internal_to_if_data(ifp, &ifp->if_data,
1803 				    &ifm->ifm_data);
1804 				ifm->ifm_addrs = info.rti_addrs;
1805 				/*
1806 				 * <rdar://problem/32940901>
1807 				 * Round bytes only for non-platform
1808 				 */
1809 				if (!csproc_get_platform_binary(w->w_req->p)) {
1810 					ALIGN_BYTES(ifm->ifm_data.ifi_ibytes);
1811 					ALIGN_BYTES(ifm->ifm_data.ifi_obytes);
1812 				}
1813 
1814 				cp += len;
1815 				VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
1816 				current_len += len;
1817 			}
1818 			while ((ifa = ifa->ifa_link.tqe_next) != NULL) {
1819 				IFA_LOCK(ifa);
1820 				if (af && af != ifa->ifa_addr->sa_family) {
1821 					IFA_UNLOCK(ifa);
1822 					continue;
1823 				}
1824 				if (ifa->ifa_addr->sa_family == AF_INET6 &&
1825 				    (((struct in6_ifaddr *)ifa)->ia6_flags &
1826 				    IN6_IFF_CLAT46) != 0) {
1827 					IFA_UNLOCK(ifa);
1828 					continue;
1829 				}
1830 				info.rti_info[RTAX_IFA] = ifa->ifa_addr;
1831 				info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1832 				info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
1833 				len = rt_msg2(RTM_NEWADDR, &info, NULL, NULL,
1834 				    &cred);
1835 				if (pass == 0) {
1836 					total_len += len;
1837 				} else {
1838 					struct ifa_msghdr *ifam;
1839 
1840 					if (current_len + len > total_len) {
1841 						IFA_UNLOCK(ifa);
1842 						error = ENOBUFS;
1843 						break;
1844 					}
1845 					len = rt_msg2(RTM_NEWADDR, &info,
1846 					    (caddr_t)cp, NULL, &cred);
1847 
1848 					ifam = (struct ifa_msghdr *)(void *)cp;
1849 					ifam->ifam_index =
1850 					    ifa->ifa_ifp->if_index;
1851 					ifam->ifam_flags = ifa->ifa_flags;
1852 					ifam->ifam_metric = ifa->ifa_metric;
1853 					ifam->ifam_addrs = info.rti_addrs;
1854 
1855 					cp += len;
1856 					VERIFY(IS_P2ALIGNED(cp,
1857 					    sizeof(u_int32_t)));
1858 					current_len += len;
1859 				}
1860 				IFA_UNLOCK(ifa);
1861 			}
1862 			ifnet_lock_done(ifp);
1863 			info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
1864 			    info.rti_info[RTAX_BRD] = NULL;
1865 		}
1866 
1867 		ifnet_head_done();
1868 
1869 		if (error != 0) {
1870 			if (error == ENOBUFS) {
1871 				printf("%s: current_len (%d) + len (%d) > "
1872 				    "total_len (%d)\n", __func__, current_len,
1873 				    len, total_len);
1874 			}
1875 			break;
1876 		}
1877 
1878 		if (pass == 0) {
1879 			/* Better to return zero length buffer than ENOBUFS */
1880 			if (total_len == 0) {
1881 				total_len = 1;
1882 			}
1883 			total_len += total_len >> 3;
1884 			total_buffer_len = total_len;
1885 			total_buffer = (char *) kalloc_data(total_len, Z_ZERO | Z_WAITOK);
1886 			if (total_buffer == NULL) {
1887 				printf("%s: kalloc_data(%d) failed\n", __func__,
1888 				    total_len);
1889 				error = ENOBUFS;
1890 				break;
1891 			}
1892 			cp = total_buffer;
1893 			VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
1894 		} else {
1895 			error = SYSCTL_OUT(w->w_req, total_buffer, current_len);
1896 			if (error) {
1897 				break;
1898 			}
1899 		}
1900 	}
1901 
1902 	if (total_buffer != NULL) {
1903 		kfree_data(total_buffer, total_buffer_len);
1904 	}
1905 
1906 	kauth_cred_unref(&cred);
1907 	return error;
1908 }
1909 
1910 static int
sysctl_iflist2(int af,struct walkarg * w)1911 sysctl_iflist2(int af, struct walkarg *w)
1912 {
1913 	struct ifnet *ifp;
1914 	struct ifaddr *ifa;
1915 	struct  rt_addrinfo info;
1916 	int     len = 0, error = 0;
1917 	int     pass = 0;
1918 	int     total_len = 0, total_buffer_len = 0, current_len = 0;
1919 	char    *total_buffer = NULL, *cp = NULL;
1920 	kauth_cred_t cred;
1921 
1922 	cred = kauth_cred_proc_ref(current_proc());
1923 
1924 	bzero((caddr_t)&info, sizeof(info));
1925 
1926 	for (pass = 0; pass < 2; pass++) {
1927 		struct ifmultiaddr *ifma;
1928 
1929 		ifnet_head_lock_shared();
1930 
1931 		TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1932 			if (error) {
1933 				break;
1934 			}
1935 			if (w->w_arg && w->w_arg != ifp->if_index) {
1936 				continue;
1937 			}
1938 			ifnet_lock_shared(ifp);
1939 			/*
1940 			 * Holding ifnet lock here prevents the link address
1941 			 * from changing contents, so no need to hold the ifa
1942 			 * lock.  The link address is always present; it's
1943 			 * never freed.
1944 			 */
1945 			ifa = ifp->if_lladdr;
1946 			info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1947 			len = rt_msg2(RTM_IFINFO2, &info, NULL, NULL, &cred);
1948 			if (pass == 0) {
1949 				total_len += len;
1950 			} else {
1951 				struct if_msghdr2 *ifm;
1952 
1953 				if (current_len + len > total_len) {
1954 					ifnet_lock_done(ifp);
1955 					error = ENOBUFS;
1956 					break;
1957 				}
1958 				info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1959 				len = rt_msg2(RTM_IFINFO2, &info,
1960 				    (caddr_t)cp, NULL, &cred);
1961 				info.rti_info[RTAX_IFP] = NULL;
1962 
1963 				ifm = (struct if_msghdr2 *)(void *)cp;
1964 				ifm->ifm_addrs = info.rti_addrs;
1965 				ifm->ifm_flags = (u_short)ifp->if_flags;
1966 				ifm->ifm_index = ifp->if_index;
1967 				ifm->ifm_snd_len = IFCQ_LEN(ifp->if_snd);
1968 				ifm->ifm_snd_maxlen = IFCQ_MAXLEN(ifp->if_snd);
1969 				ifm->ifm_snd_drops =
1970 				    (int)ifp->if_snd->ifcq_dropcnt.packets;
1971 				ifm->ifm_timer = ifp->if_timer;
1972 				if_data_internal_to_if_data64(ifp,
1973 				    &ifp->if_data, &ifm->ifm_data);
1974 				/*
1975 				 * <rdar://problem/32940901>
1976 				 * Round bytes only for non-platform
1977 				 */
1978 				if (!csproc_get_platform_binary(w->w_req->p)) {
1979 					ALIGN_BYTES(ifm->ifm_data.ifi_ibytes);
1980 					ALIGN_BYTES(ifm->ifm_data.ifi_obytes);
1981 				}
1982 
1983 				cp += len;
1984 				VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
1985 				current_len += len;
1986 			}
1987 			while ((ifa = ifa->ifa_link.tqe_next) != NULL) {
1988 				IFA_LOCK(ifa);
1989 				if (af && af != ifa->ifa_addr->sa_family) {
1990 					IFA_UNLOCK(ifa);
1991 					continue;
1992 				}
1993 				if (ifa->ifa_addr->sa_family == AF_INET6 &&
1994 				    (((struct in6_ifaddr *)ifa)->ia6_flags &
1995 				    IN6_IFF_CLAT46) != 0) {
1996 					IFA_UNLOCK(ifa);
1997 					continue;
1998 				}
1999 
2000 				info.rti_info[RTAX_IFA] = ifa->ifa_addr;
2001 				info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
2002 				info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
2003 				len = rt_msg2(RTM_NEWADDR, &info, NULL, NULL,
2004 				    &cred);
2005 				if (pass == 0) {
2006 					total_len += len;
2007 				} else {
2008 					struct ifa_msghdr *ifam;
2009 
2010 					if (current_len + len > total_len) {
2011 						IFA_UNLOCK(ifa);
2012 						error = ENOBUFS;
2013 						break;
2014 					}
2015 					len = rt_msg2(RTM_NEWADDR, &info,
2016 					    (caddr_t)cp, NULL, &cred);
2017 
2018 					ifam = (struct ifa_msghdr *)(void *)cp;
2019 					ifam->ifam_index =
2020 					    ifa->ifa_ifp->if_index;
2021 					ifam->ifam_flags = ifa->ifa_flags;
2022 					ifam->ifam_metric = ifa->ifa_metric;
2023 					ifam->ifam_addrs = info.rti_addrs;
2024 
2025 					cp += len;
2026 					VERIFY(IS_P2ALIGNED(cp,
2027 					    sizeof(u_int32_t)));
2028 					current_len += len;
2029 				}
2030 				IFA_UNLOCK(ifa);
2031 			}
2032 			if (error) {
2033 				ifnet_lock_done(ifp);
2034 				break;
2035 			}
2036 
2037 			for (ifma = LIST_FIRST(&ifp->if_multiaddrs);
2038 			    ifma != NULL; ifma = LIST_NEXT(ifma, ifma_link)) {
2039 				struct ifaddr *ifa0;
2040 
2041 				IFMA_LOCK(ifma);
2042 				if (af && af != ifma->ifma_addr->sa_family) {
2043 					IFMA_UNLOCK(ifma);
2044 					continue;
2045 				}
2046 				bzero((caddr_t)&info, sizeof(info));
2047 				info.rti_info[RTAX_IFA] = ifma->ifma_addr;
2048 				/*
2049 				 * Holding ifnet lock here prevents the link
2050 				 * address from changing contents, so no need
2051 				 * to hold the ifa0 lock.  The link address is
2052 				 * always present; it's never freed.
2053 				 */
2054 				ifa0 = ifp->if_lladdr;
2055 				info.rti_info[RTAX_IFP] = ifa0->ifa_addr;
2056 				if (ifma->ifma_ll != NULL) {
2057 					info.rti_info[RTAX_GATEWAY] =
2058 					    ifma->ifma_ll->ifma_addr;
2059 				}
2060 				len = rt_msg2(RTM_NEWMADDR2, &info, NULL, NULL,
2061 				    &cred);
2062 				if (pass == 0) {
2063 					total_len += len;
2064 				} else {
2065 					struct ifma_msghdr2 *ifmam;
2066 
2067 					if (current_len + len > total_len) {
2068 						IFMA_UNLOCK(ifma);
2069 						error = ENOBUFS;
2070 						break;
2071 					}
2072 					len = rt_msg2(RTM_NEWMADDR2, &info,
2073 					    (caddr_t)cp, NULL, &cred);
2074 
2075 					ifmam =
2076 					    (struct ifma_msghdr2 *)(void *)cp;
2077 					ifmam->ifmam_addrs = info.rti_addrs;
2078 					ifmam->ifmam_flags = 0;
2079 					ifmam->ifmam_index =
2080 					    ifma->ifma_ifp->if_index;
2081 					ifmam->ifmam_refcount =
2082 					    ifma->ifma_reqcnt;
2083 
2084 					cp += len;
2085 					VERIFY(IS_P2ALIGNED(cp,
2086 					    sizeof(u_int32_t)));
2087 					current_len += len;
2088 				}
2089 				IFMA_UNLOCK(ifma);
2090 			}
2091 			ifnet_lock_done(ifp);
2092 			info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
2093 			    info.rti_info[RTAX_BRD] = NULL;
2094 		}
2095 		ifnet_head_done();
2096 
2097 		if (error) {
2098 			if (error == ENOBUFS) {
2099 				printf("%s: current_len (%d) + len (%d) > "
2100 				    "total_len (%d)\n", __func__, current_len,
2101 				    len, total_len);
2102 			}
2103 			break;
2104 		}
2105 
2106 		if (pass == 0) {
2107 			/* Better to return zero length buffer than ENOBUFS */
2108 			if (total_len == 0) {
2109 				total_len = 1;
2110 			}
2111 			total_len += total_len >> 3;
2112 			total_buffer_len = total_len;
2113 			total_buffer = (char *) kalloc_data(total_len, Z_ZERO | Z_WAITOK);
2114 			if (total_buffer == NULL) {
2115 				printf("%s: kalloc_data(%d) failed\n", __func__,
2116 				    total_len);
2117 				error = ENOBUFS;
2118 				break;
2119 			}
2120 			cp = total_buffer;
2121 			VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
2122 		} else {
2123 			error = SYSCTL_OUT(w->w_req, total_buffer, current_len);
2124 			if (error) {
2125 				break;
2126 			}
2127 		}
2128 	}
2129 
2130 	if (total_buffer != NULL) {
2131 		kfree_data(total_buffer, total_buffer_len);
2132 	}
2133 
2134 	kauth_cred_unref(&cred);
2135 	return error;
2136 }
2137 
2138 
2139 static int
sysctl_rtstat(struct sysctl_req * req)2140 sysctl_rtstat(struct sysctl_req *req)
2141 {
2142 	return SYSCTL_OUT(req, &rtstat, sizeof(struct rtstat));
2143 }
2144 
2145 static int
sysctl_rttrash(struct sysctl_req * req)2146 sysctl_rttrash(struct sysctl_req *req)
2147 {
2148 	return SYSCTL_OUT(req, &rttrash, sizeof(rttrash));
2149 }
2150 
2151 static int
2152 sysctl_rtsock SYSCTL_HANDLER_ARGS
2153 {
2154 #pragma unused(oidp)
2155 	int     *name = (int *)arg1;
2156 	u_int   namelen = arg2;
2157 	struct radix_node_head *rnh;
2158 	int     i, error = EINVAL;
2159 	u_char  af;
2160 	struct  walkarg w;
2161 
2162 	name++;
2163 	namelen--;
2164 	if (req->newptr) {
2165 		return EPERM;
2166 	}
2167 	if (namelen != 3) {
2168 		return EINVAL;
2169 	}
2170 	af = (u_char)name[0];
2171 	Bzero(&w, sizeof(w));
2172 	w.w_op = name[1];
2173 	w.w_arg = name[2];
2174 	w.w_req = req;
2175 
2176 	switch (w.w_op) {
2177 	case NET_RT_DUMP:
2178 	case NET_RT_DUMP2:
2179 	case NET_RT_FLAGS:
2180 	case NET_RT_FLAGS_PRIV:
2181 		lck_mtx_lock(rnh_lock);
2182 		for (i = 1; i <= AF_MAX; i++) {
2183 			if ((rnh = rt_tables[i]) && (af == 0 || af == i) &&
2184 			    (error = rnh->rnh_walktree(rnh,
2185 			    sysctl_dumpentry, &w))) {
2186 				break;
2187 			}
2188 		}
2189 		lck_mtx_unlock(rnh_lock);
2190 		break;
2191 	case NET_RT_DUMPX:
2192 	case NET_RT_DUMPX_FLAGS:
2193 		lck_mtx_lock(rnh_lock);
2194 		for (i = 1; i <= AF_MAX; i++) {
2195 			if ((rnh = rt_tables[i]) && (af == 0 || af == i) &&
2196 			    (error = rnh->rnh_walktree(rnh,
2197 			    sysctl_dumpentry_ext, &w))) {
2198 				break;
2199 			}
2200 		}
2201 		lck_mtx_unlock(rnh_lock);
2202 		break;
2203 	case NET_RT_IFLIST:
2204 		error = sysctl_iflist(af, &w);
2205 		break;
2206 	case NET_RT_IFLIST2:
2207 		error = sysctl_iflist2(af, &w);
2208 		break;
2209 	case NET_RT_STAT:
2210 		error = sysctl_rtstat(req);
2211 		break;
2212 	case NET_RT_TRASH:
2213 		error = sysctl_rttrash(req);
2214 		break;
2215 	}
2216 	if (w.w_tmem != NULL) {
2217 		kfree_data(w.w_tmem, w.w_tmemsize);
2218 	}
2219 	return error;
2220 }
2221 
2222 /*
2223  * Definitions of protocols supported in the ROUTE domain.
2224  */
2225 static struct protosw routesw[] = {
2226 	{
2227 		.pr_type =              SOCK_RAW,
2228 		.pr_protocol =          0,
2229 		.pr_flags =             PR_ATOMIC | PR_ADDR,
2230 		.pr_output =            route_output,
2231 		.pr_ctlinput =          raw_ctlinput,
2232 		.pr_usrreqs =           &route_usrreqs,
2233 	}
2234 };
2235 
2236 static int route_proto_count = (sizeof(routesw) / sizeof(struct protosw));
2237 
2238 struct domain routedomain_s = {
2239 	.dom_family =           PF_ROUTE,
2240 	.dom_name =             "route",
2241 	.dom_init =             route_dinit,
2242 };
2243 
2244 static void
route_dinit(struct domain * dp)2245 route_dinit(struct domain *dp)
2246 {
2247 	struct protosw *pr;
2248 	int i;
2249 
2250 	VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
2251 	VERIFY(routedomain == NULL);
2252 
2253 	routedomain = dp;
2254 
2255 	for (i = 0, pr = &routesw[0]; i < route_proto_count; i++, pr++) {
2256 		net_add_proto(pr, dp, 1);
2257 	}
2258 
2259 	route_init();
2260 }
2261