xref: /xnu-10063.121.3/bsd/netinet/in_pcb.c (revision 2c2f96dc2b9a4408a43d3150ae9c105355ca3daa)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1991, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)in_pcb.c	8.4 (Berkeley) 5/24/95
61  * $FreeBSD: src/sys/netinet/in_pcb.c,v 1.59.2.17 2001/08/13 16:26:17 ume Exp $
62  */
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/domain.h>
69 #include <sys/protosw.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/proc.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/mcache.h>
76 #include <sys/kauth.h>
77 #include <sys/priv.h>
78 #include <sys/proc_uuid_policy.h>
79 #include <sys/syslog.h>
80 #include <sys/priv.h>
81 #include <sys/file_internal.h>
82 #include <net/dlil.h>
83 
84 #include <libkern/OSAtomic.h>
85 #include <kern/locks.h>
86 
87 #include <machine/limits.h>
88 
89 #include <kern/zalloc.h>
90 
91 #include <net/if.h>
92 #include <net/if_types.h>
93 #include <net/route.h>
94 #include <net/flowhash.h>
95 #include <net/flowadv.h>
96 #include <net/nat464_utils.h>
97 #include <net/ntstat.h>
98 #include <net/nwk_wq.h>
99 #include <net/restricted_in_port.h>
100 
101 #include <netinet/in.h>
102 #include <netinet/in_pcb.h>
103 #include <netinet/inp_log.h>
104 #include <netinet/in_var.h>
105 #include <netinet/ip_var.h>
106 
107 #include <netinet/ip6.h>
108 #include <netinet6/ip6_var.h>
109 
110 #include <sys/kdebug.h>
111 #include <sys/random.h>
112 
113 #include <dev/random/randomdev.h>
114 #include <mach/boolean.h>
115 
116 #include <atm/atm_internal.h>
117 #include <pexpert/pexpert.h>
118 
119 #if NECP
120 #include <net/necp.h>
121 #endif
122 
123 #include <sys/stat.h>
124 #include <sys/ubc.h>
125 #include <sys/vnode.h>
126 
127 #include <os/log.h>
128 
129 #if SKYWALK
130 #include <skywalk/namespace/flowidns.h>
131 #endif /* SKYWALK */
132 
133 #include <IOKit/IOBSD.h>
134 
135 #include <net/sockaddr_utils.h>
136 
137 extern const char *proc_name_address(struct proc *);
138 
139 static LCK_GRP_DECLARE(inpcb_lock_grp, "inpcb");
140 static LCK_ATTR_DECLARE(inpcb_lock_attr, 0, 0);
141 static LCK_MTX_DECLARE_ATTR(inpcb_lock, &inpcb_lock_grp, &inpcb_lock_attr);
142 static LCK_MTX_DECLARE_ATTR(inpcb_timeout_lock, &inpcb_lock_grp, &inpcb_lock_attr);
143 
144 static TAILQ_HEAD(, inpcbinfo) inpcb_head = TAILQ_HEAD_INITIALIZER(inpcb_head);
145 
146 static u_int16_t inpcb_timeout_run = 0; /* INPCB timer is scheduled to run */
147 static boolean_t inpcb_garbage_collecting = FALSE; /* gc timer is scheduled */
148 static boolean_t inpcb_ticking = FALSE;         /* "slow" timer is scheduled */
149 static boolean_t inpcb_fast_timer_on = FALSE;
150 
151 #define INPCB_GCREQ_THRESHOLD   50000
152 
153 static thread_call_t inpcb_thread_call, inpcb_fast_thread_call;
154 static void inpcb_sched_timeout(void);
155 static void inpcb_sched_lazy_timeout(void);
156 static void _inpcb_sched_timeout(unsigned int);
157 static void inpcb_timeout(void *, void *);
158 const int inpcb_timeout_lazy = 10;      /* 10 seconds leeway for lazy timers */
159 extern int tvtohz(struct timeval *);
160 
161 #if CONFIG_PROC_UUID_POLICY
162 static void inp_update_cellular_policy(struct inpcb *, boolean_t);
163 #if NECP
164 static void inp_update_necp_want_app_policy(struct inpcb *, boolean_t);
165 #endif /* NECP */
166 #endif /* !CONFIG_PROC_UUID_POLICY */
167 
168 #define DBG_FNC_PCB_LOOKUP      NETDBG_CODE(DBG_NETTCP, (6 << 8))
169 #define DBG_FNC_PCB_HLOOKUP     NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1))
170 
171 int allow_udp_port_exhaustion = 0;
172 
173 /*
174  * These configure the range of local port addresses assigned to
175  * "unspecified" outgoing connections/packets/whatever.
176  */
177 int     ipport_lowfirstauto  = IPPORT_RESERVED - 1;     /* 1023 */
178 int     ipport_lowlastauto = IPPORT_RESERVEDSTART;      /* 600 */
179 int     ipport_firstauto = IPPORT_HIFIRSTAUTO;          /* 49152 */
180 int     ipport_lastauto  = IPPORT_HILASTAUTO;           /* 65535 */
181 int     ipport_hifirstauto = IPPORT_HIFIRSTAUTO;        /* 49152 */
182 int     ipport_hilastauto  = IPPORT_HILASTAUTO;         /* 65535 */
183 
184 #define RANGECHK(var, min, max) \
185 	if ((var) < (min)) { (var) = (min); } \
186 	else if ((var) > (max)) { (var) = (max); }
187 
188 static int
189 sysctl_net_ipport_check SYSCTL_HANDLER_ARGS
190 {
191 #pragma unused(arg1, arg2)
192 	int error;
193 	int new_value = *(int *)oidp->oid_arg1;
194 #if (DEBUG | DEVELOPMENT)
195 	int old_value = *(int *)oidp->oid_arg1;
196 	/*
197 	 * For unit testing allow a non-superuser process with the
198 	 * proper entitlement to modify the variables
199 	 */
200 	if (req->newptr) {
201 		if (proc_suser(current_proc()) != 0 &&
202 		    (error = priv_check_cred(kauth_cred_get(),
203 		    PRIV_NETINET_RESERVEDPORT, 0))) {
204 			return EPERM;
205 		}
206 	}
207 #endif /* (DEBUG | DEVELOPMENT) */
208 
209 	error = sysctl_handle_int(oidp, &new_value, 0, req);
210 	if (!error) {
211 		if (oidp->oid_arg1 == &ipport_lowfirstauto || oidp->oid_arg1 == &ipport_lowlastauto) {
212 			RANGECHK(new_value, 1, IPPORT_RESERVED - 1);
213 		} else {
214 			RANGECHK(new_value, IPPORT_RESERVED, USHRT_MAX);
215 		}
216 		*(int *)oidp->oid_arg1 = new_value;
217 	}
218 
219 #if (DEBUG | DEVELOPMENT)
220 	os_log(OS_LOG_DEFAULT,
221 	    "%s:%u sysctl net.restricted_port.verbose: %d -> %d)",
222 	    proc_best_name(current_proc()), proc_selfpid(),
223 	    old_value, *(int *)oidp->oid_arg1);
224 #endif /* (DEBUG | DEVELOPMENT) */
225 
226 	return error;
227 }
228 
229 #undef RANGECHK
230 
231 SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange,
232     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IP Ports");
233 
234 #if (DEBUG | DEVELOPMENT)
235 #define CTLFAGS_IP_PORTRANGE (CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY)
236 #else
237 #define CTLFAGS_IP_PORTRANGE (CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED)
238 #endif /* (DEBUG | DEVELOPMENT) */
239 
240 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst,
241     CTLFAGS_IP_PORTRANGE,
242     &ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", "");
243 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast,
244     CTLFAGS_IP_PORTRANGE,
245     &ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", "");
246 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, first,
247     CTLFAGS_IP_PORTRANGE,
248     &ipport_firstauto, 0, &sysctl_net_ipport_check, "I", "");
249 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, last,
250     CTLFAGS_IP_PORTRANGE,
251     &ipport_lastauto, 0, &sysctl_net_ipport_check, "I", "");
252 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst,
253     CTLFAGS_IP_PORTRANGE,
254     &ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", "");
255 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast,
256     CTLFAGS_IP_PORTRANGE,
257     &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", "");
258 SYSCTL_INT(_net_inet_ip_portrange, OID_AUTO, ipport_allow_udp_port_exhaustion,
259     CTLFLAG_LOCKED | CTLFLAG_RW, &allow_udp_port_exhaustion, 0, "");
260 
261 static uint32_t apn_fallbk_debug = 0;
262 #define apn_fallbk_log(x)       do { if (apn_fallbk_debug >= 1) log x; } while (0)
263 
264 #if !XNU_TARGET_OS_OSX
265 static boolean_t apn_fallbk_enabled = TRUE;
266 
267 SYSCTL_DECL(_net_inet);
268 SYSCTL_NODE(_net_inet, OID_AUTO, apn_fallback, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "APN Fallback");
269 SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_LOCKED,
270     &apn_fallbk_enabled, 0, "APN fallback enable");
271 SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
272     &apn_fallbk_debug, 0, "APN fallback debug enable");
273 #else /* XNU_TARGET_OS_OSX */
274 static boolean_t apn_fallbk_enabled = FALSE;
275 #endif /* XNU_TARGET_OS_OSX */
276 
277 extern int      udp_use_randomport;
278 extern int      tcp_use_randomport;
279 
280 /* Structs used for flowhash computation */
281 struct inp_flowhash_key_addr {
282 	union {
283 		struct in_addr  v4;
284 		struct in6_addr v6;
285 		u_int8_t        addr8[16];
286 		u_int16_t       addr16[8];
287 		u_int32_t       addr32[4];
288 	} infha;
289 };
290 
291 struct inp_flowhash_key {
292 	struct inp_flowhash_key_addr    infh_laddr;
293 	struct inp_flowhash_key_addr    infh_faddr;
294 	u_int32_t                       infh_lport;
295 	u_int32_t                       infh_fport;
296 	u_int32_t                       infh_af;
297 	u_int32_t                       infh_proto;
298 	u_int32_t                       infh_rand1;
299 	u_int32_t                       infh_rand2;
300 };
301 
302 #if !SKYWALK
303 static u_int32_t inp_hash_seed = 0;
304 #endif /* !SKYWALK */
305 
306 static int infc_cmp(const struct inpcb *, const struct inpcb *);
307 
308 /* Flags used by inp_fc_getinp */
309 #define INPFC_SOLOCKED  0x1
310 #define INPFC_REMOVE    0x2
311 static struct inpcb *inp_fc_getinp(u_int32_t, u_int32_t);
312 
313 static void inp_fc_feedback(struct inpcb *);
314 extern void tcp_remove_from_time_wait(struct inpcb *inp);
315 
316 static LCK_MTX_DECLARE_ATTR(inp_fc_lck, &inpcb_lock_grp, &inpcb_lock_attr);
317 
318 RB_HEAD(inp_fc_tree, inpcb) inp_fc_tree;
319 RB_PROTOTYPE(inp_fc_tree, inpcb, infc_link, infc_cmp);
320 RB_GENERATE(inp_fc_tree, inpcb, infc_link, infc_cmp);
321 
322 /*
323  * Use this inp as a key to find an inp in the flowhash tree.
324  * Accesses to it are protected by inp_fc_lck.
325  */
326 struct inpcb key_inp;
327 
328 /*
329  * in_pcb.c: manage the Protocol Control Blocks.
330  */
331 
332 void
in_pcbinit(void)333 in_pcbinit(void)
334 {
335 	static int inpcb_initialized = 0;
336 	uint32_t logging_config;
337 
338 	VERIFY(!inpcb_initialized);
339 	inpcb_initialized = 1;
340 
341 	logging_config = atm_get_diagnostic_config();
342 	if (logging_config & 0x80000000) {
343 		inp_log_privacy = 1;
344 	}
345 
346 	inpcb_thread_call = thread_call_allocate_with_priority(inpcb_timeout,
347 	    NULL, THREAD_CALL_PRIORITY_KERNEL);
348 	/* Give it an arg so that we know that this is the fast timer */
349 	inpcb_fast_thread_call = thread_call_allocate_with_priority(
350 		inpcb_timeout, &inpcb_timeout, THREAD_CALL_PRIORITY_KERNEL);
351 	if (inpcb_thread_call == NULL || inpcb_fast_thread_call == NULL) {
352 		panic("unable to alloc the inpcb thread call");
353 	}
354 
355 	/*
356 	 * Initialize data structures required to deliver
357 	 * flow advisories.
358 	 */
359 	lck_mtx_lock(&inp_fc_lck);
360 	RB_INIT(&inp_fc_tree);
361 	bzero(&key_inp, sizeof(key_inp));
362 	lck_mtx_unlock(&inp_fc_lck);
363 }
364 
365 #define INPCB_HAVE_TIMER_REQ(req)       (((req).intimer_lazy > 0) || \
366 	((req).intimer_fast > 0) || ((req).intimer_nodelay > 0))
367 static void
inpcb_timeout(void * arg0,void * arg1)368 inpcb_timeout(void *arg0, void *arg1)
369 {
370 #pragma unused(arg1)
371 	struct inpcbinfo *ipi;
372 	boolean_t t, gc;
373 	struct intimercount gccnt, tmcnt;
374 
375 	/*
376 	 * Update coarse-grained networking timestamp (in sec.); the idea
377 	 * is to piggy-back on the timeout callout to update the counter
378 	 * returnable via net_uptime().
379 	 */
380 	net_update_uptime();
381 
382 	bzero(&gccnt, sizeof(gccnt));
383 	bzero(&tmcnt, sizeof(tmcnt));
384 
385 	lck_mtx_lock_spin(&inpcb_timeout_lock);
386 	gc = inpcb_garbage_collecting;
387 	inpcb_garbage_collecting = FALSE;
388 
389 	t = inpcb_ticking;
390 	inpcb_ticking = FALSE;
391 
392 	if (gc || t) {
393 		lck_mtx_unlock(&inpcb_timeout_lock);
394 
395 		lck_mtx_lock(&inpcb_lock);
396 		TAILQ_FOREACH(ipi, &inpcb_head, ipi_entry) {
397 			if (INPCB_HAVE_TIMER_REQ(ipi->ipi_gc_req)) {
398 				bzero(&ipi->ipi_gc_req,
399 				    sizeof(ipi->ipi_gc_req));
400 				if (gc && ipi->ipi_gc != NULL) {
401 					ipi->ipi_gc(ipi);
402 					gccnt.intimer_lazy +=
403 					    ipi->ipi_gc_req.intimer_lazy;
404 					gccnt.intimer_fast +=
405 					    ipi->ipi_gc_req.intimer_fast;
406 					gccnt.intimer_nodelay +=
407 					    ipi->ipi_gc_req.intimer_nodelay;
408 				}
409 			}
410 			if (INPCB_HAVE_TIMER_REQ(ipi->ipi_timer_req)) {
411 				bzero(&ipi->ipi_timer_req,
412 				    sizeof(ipi->ipi_timer_req));
413 				if (t && ipi->ipi_timer != NULL) {
414 					ipi->ipi_timer(ipi);
415 					tmcnt.intimer_lazy +=
416 					    ipi->ipi_timer_req.intimer_lazy;
417 					tmcnt.intimer_fast +=
418 					    ipi->ipi_timer_req.intimer_fast;
419 					tmcnt.intimer_nodelay +=
420 					    ipi->ipi_timer_req.intimer_nodelay;
421 				}
422 			}
423 		}
424 		lck_mtx_unlock(&inpcb_lock);
425 		lck_mtx_lock_spin(&inpcb_timeout_lock);
426 	}
427 
428 	/* lock was dropped above, so check first before overriding */
429 	if (!inpcb_garbage_collecting) {
430 		inpcb_garbage_collecting = INPCB_HAVE_TIMER_REQ(gccnt);
431 	}
432 	if (!inpcb_ticking) {
433 		inpcb_ticking = INPCB_HAVE_TIMER_REQ(tmcnt);
434 	}
435 
436 	/* arg0 will be set if we are the fast timer */
437 	if (arg0 != NULL) {
438 		inpcb_fast_timer_on = FALSE;
439 	}
440 	inpcb_timeout_run--;
441 	VERIFY(inpcb_timeout_run >= 0 && inpcb_timeout_run < 2);
442 
443 	/* re-arm the timer if there's work to do */
444 	if (gccnt.intimer_nodelay > 0 || tmcnt.intimer_nodelay > 0) {
445 		inpcb_sched_timeout();
446 	} else if ((gccnt.intimer_fast + tmcnt.intimer_fast) <= 5) {
447 		/* be lazy when idle with little activity */
448 		inpcb_sched_lazy_timeout();
449 	} else {
450 		inpcb_sched_timeout();
451 	}
452 
453 	lck_mtx_unlock(&inpcb_timeout_lock);
454 }
455 
456 static void
inpcb_sched_timeout(void)457 inpcb_sched_timeout(void)
458 {
459 	_inpcb_sched_timeout(0);
460 }
461 
462 static void
inpcb_sched_lazy_timeout(void)463 inpcb_sched_lazy_timeout(void)
464 {
465 	_inpcb_sched_timeout(inpcb_timeout_lazy);
466 }
467 
468 static void
_inpcb_sched_timeout(unsigned int offset)469 _inpcb_sched_timeout(unsigned int offset)
470 {
471 	uint64_t deadline, leeway;
472 
473 	clock_interval_to_deadline(1, NSEC_PER_SEC, &deadline);
474 	LCK_MTX_ASSERT(&inpcb_timeout_lock, LCK_MTX_ASSERT_OWNED);
475 	if (inpcb_timeout_run == 0 &&
476 	    (inpcb_garbage_collecting || inpcb_ticking)) {
477 		lck_mtx_convert_spin(&inpcb_timeout_lock);
478 		inpcb_timeout_run++;
479 		if (offset == 0) {
480 			inpcb_fast_timer_on = TRUE;
481 			thread_call_enter_delayed(inpcb_fast_thread_call,
482 			    deadline);
483 		} else {
484 			inpcb_fast_timer_on = FALSE;
485 			clock_interval_to_absolutetime_interval(offset,
486 			    NSEC_PER_SEC, &leeway);
487 			thread_call_enter_delayed_with_leeway(
488 				inpcb_thread_call, NULL, deadline, leeway,
489 				THREAD_CALL_DELAY_LEEWAY);
490 		}
491 	} else if (inpcb_timeout_run == 1 &&
492 	    offset == 0 && !inpcb_fast_timer_on) {
493 		/*
494 		 * Since the request was for a fast timer but the
495 		 * scheduled timer is a lazy timer, try to schedule
496 		 * another instance of fast timer also.
497 		 */
498 		lck_mtx_convert_spin(&inpcb_timeout_lock);
499 		inpcb_timeout_run++;
500 		inpcb_fast_timer_on = TRUE;
501 		thread_call_enter_delayed(inpcb_fast_thread_call, deadline);
502 	}
503 }
504 
505 void
inpcb_gc_sched(struct inpcbinfo * ipi,u_int32_t type)506 inpcb_gc_sched(struct inpcbinfo *ipi, u_int32_t type)
507 {
508 	u_int32_t gccnt;
509 
510 	lck_mtx_lock_spin(&inpcb_timeout_lock);
511 	inpcb_garbage_collecting = TRUE;
512 	gccnt = ipi->ipi_gc_req.intimer_nodelay +
513 	    ipi->ipi_gc_req.intimer_fast;
514 
515 	if (gccnt > INPCB_GCREQ_THRESHOLD) {
516 		type = INPCB_TIMER_FAST;
517 	}
518 
519 	switch (type) {
520 	case INPCB_TIMER_NODELAY:
521 		os_atomic_inc(&ipi->ipi_gc_req.intimer_nodelay, relaxed);
522 		inpcb_sched_timeout();
523 		break;
524 	case INPCB_TIMER_FAST:
525 		os_atomic_inc(&ipi->ipi_gc_req.intimer_fast, relaxed);
526 		inpcb_sched_timeout();
527 		break;
528 	default:
529 		os_atomic_inc(&ipi->ipi_gc_req.intimer_lazy, relaxed);
530 		inpcb_sched_lazy_timeout();
531 		break;
532 	}
533 	lck_mtx_unlock(&inpcb_timeout_lock);
534 }
535 
536 void
inpcb_timer_sched(struct inpcbinfo * ipi,u_int32_t type)537 inpcb_timer_sched(struct inpcbinfo *ipi, u_int32_t type)
538 {
539 	lck_mtx_lock_spin(&inpcb_timeout_lock);
540 	inpcb_ticking = TRUE;
541 	switch (type) {
542 	case INPCB_TIMER_NODELAY:
543 		os_atomic_inc(&ipi->ipi_timer_req.intimer_nodelay, relaxed);
544 		inpcb_sched_timeout();
545 		break;
546 	case INPCB_TIMER_FAST:
547 		os_atomic_inc(&ipi->ipi_timer_req.intimer_fast, relaxed);
548 		inpcb_sched_timeout();
549 		break;
550 	default:
551 		os_atomic_inc(&ipi->ipi_timer_req.intimer_lazy, relaxed);
552 		inpcb_sched_lazy_timeout();
553 		break;
554 	}
555 	lck_mtx_unlock(&inpcb_timeout_lock);
556 }
557 
558 void
in_pcbinfo_attach(struct inpcbinfo * ipi)559 in_pcbinfo_attach(struct inpcbinfo *ipi)
560 {
561 	struct inpcbinfo *ipi0;
562 
563 	lck_mtx_lock(&inpcb_lock);
564 	TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
565 		if (ipi0 == ipi) {
566 			panic("%s: ipi %p already in the list",
567 			    __func__, ipi);
568 			/* NOTREACHED */
569 		}
570 	}
571 	TAILQ_INSERT_TAIL(&inpcb_head, ipi, ipi_entry);
572 	lck_mtx_unlock(&inpcb_lock);
573 }
574 
575 int
in_pcbinfo_detach(struct inpcbinfo * ipi)576 in_pcbinfo_detach(struct inpcbinfo *ipi)
577 {
578 	struct inpcbinfo *ipi0;
579 	int error = 0;
580 
581 	lck_mtx_lock(&inpcb_lock);
582 	TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
583 		if (ipi0 == ipi) {
584 			break;
585 		}
586 	}
587 	if (ipi0 != NULL) {
588 		TAILQ_REMOVE(&inpcb_head, ipi0, ipi_entry);
589 	} else {
590 		error = ENXIO;
591 	}
592 	lck_mtx_unlock(&inpcb_lock);
593 
594 	return error;
595 }
596 
597 __attribute__((noinline))
598 char *
inp_snprintf_tuple(struct inpcb * inp,char * buf,size_t buflen)599 inp_snprintf_tuple(struct inpcb *inp, char *buf, size_t buflen)
600 {
601 	char laddrstr[MAX_IPv6_STR_LEN];
602 	char faddrstr[MAX_IPv6_STR_LEN];
603 	uint16_t lport = 0;
604 	uint16_t fport = 0;
605 	uint16_t proto = IPPROTO_IP;
606 
607 	if (inp->inp_socket != NULL) {
608 		proto = SOCK_PROTO(inp->inp_socket);
609 
610 		if (proto == IPPROTO_TCP || proto == IPPROTO_UDP) {
611 			lport  = inp->inp_lport;
612 			fport = inp->inp_fport;
613 		}
614 	}
615 	if (inp->inp_vflag & INP_IPV4) {
616 		inet_ntop(AF_INET, (void *)&inp->inp_laddr.s_addr, laddrstr, sizeof(laddrstr));
617 		inet_ntop(AF_INET, (void *)&inp->inp_faddr.s_addr, faddrstr, sizeof(faddrstr));
618 	} else if (inp->inp_vflag & INP_IPV6) {
619 		inet_ntop(AF_INET6, (void *)&inp->in6p_faddr, laddrstr, sizeof(laddrstr));
620 		inet_ntop(AF_INET6, (void *)&inp->in6p_faddr, faddrstr, sizeof(faddrstr));
621 	}
622 	snprintf(buf, buflen, "[%u %s:%u %s:%u]",
623 	    proto, laddrstr, ntohs(lport), faddrstr, ntohs(fport));
624 
625 	return buf;
626 }
627 
628 __attribute__((noinline))
629 void
in_pcb_check_management_entitled(struct inpcb * inp)630 in_pcb_check_management_entitled(struct inpcb *inp)
631 {
632 	if (inp->inp_flags2 & INP2_MANAGEMENT_CHECKED) {
633 		return;
634 	}
635 
636 	if (management_data_unrestricted) {
637 		inp->inp_flags2 |= INP2_MANAGEMENT_ALLOWED;
638 		inp->inp_flags2 |= INP2_MANAGEMENT_CHECKED;
639 	} else if (if_management_interface_check_needed == true) {
640 		inp->inp_flags2 |= INP2_MANAGEMENT_CHECKED;
641 		/*
642 		 * Note that soopt_cred_check check both intcoproc entitlements
643 		 * We check MANAGEMENT_DATA_ENTITLEMENT as there is no corresponding PRIV value
644 		 */
645 		if (soopt_cred_check(inp->inp_socket, PRIV_NET_RESTRICTED_INTCOPROC, false, false) == 0
646 		    || IOCurrentTaskHasEntitlement(MANAGEMENT_DATA_ENTITLEMENT) == true
647 #if DEBUG || DEVELOPMENT
648 		    || IOCurrentTaskHasEntitlement(MANAGEMENT_DATA_ENTITLEMENT_DEVELOPMENT) == true
649 #endif /* DEBUG || DEVELOPMENT */
650 		    ) {
651 			inp->inp_flags2 |= INP2_MANAGEMENT_ALLOWED;
652 		} else {
653 			if (__improbable(if_management_verbose > 1)) {
654 				char buf[128];
655 
656 				os_log(OS_LOG_DEFAULT, "in_pcb_check_management_entitled %s:%d not management entitled %s",
657 				    proc_best_name(current_proc()),
658 				    proc_selfpid(),
659 				    inp_snprintf_tuple(inp, buf, sizeof(buf)));
660 			}
661 		}
662 	}
663 }
664 
665 /*
666  * Allocate a PCB and associate it with the socket.
667  *
668  * Returns:	0			Success
669  *		ENOBUFS
670  *		ENOMEM
671  */
672 int
in_pcballoc(struct socket * so,struct inpcbinfo * pcbinfo,struct proc * p)673 in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p)
674 {
675 #pragma unused(p)
676 	struct inpcb *inp;
677 	caddr_t temp;
678 
679 	if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
680 		inp = zalloc_flags(pcbinfo->ipi_zone,
681 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
682 	} else {
683 		inp = (struct inpcb *)(void *)so->so_saved_pcb;
684 		temp = inp->inp_saved_ppcb;
685 		bzero((caddr_t)inp, sizeof(*inp));
686 		inp->inp_saved_ppcb = temp;
687 	}
688 
689 	inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
690 	inp->inp_pcbinfo = pcbinfo;
691 	inp->inp_socket = so;
692 	/* make sure inp_stat is always 64-bit aligned */
693 	inp->inp_stat = (struct inp_stat *)P2ROUNDUP(inp->inp_stat_store,
694 	    sizeof(u_int64_t));
695 	if (((uintptr_t)inp->inp_stat - (uintptr_t)inp->inp_stat_store) +
696 	    sizeof(*inp->inp_stat) > sizeof(inp->inp_stat_store)) {
697 		panic("%s: insufficient space to align inp_stat", __func__);
698 		/* NOTREACHED */
699 	}
700 
701 	/* make sure inp_cstat is always 64-bit aligned */
702 	inp->inp_cstat = (struct inp_stat *)P2ROUNDUP(inp->inp_cstat_store,
703 	    sizeof(u_int64_t));
704 	if (((uintptr_t)inp->inp_cstat - (uintptr_t)inp->inp_cstat_store) +
705 	    sizeof(*inp->inp_cstat) > sizeof(inp->inp_cstat_store)) {
706 		panic("%s: insufficient space to align inp_cstat", __func__);
707 		/* NOTREACHED */
708 	}
709 
710 	/* make sure inp_wstat is always 64-bit aligned */
711 	inp->inp_wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_wstat_store,
712 	    sizeof(u_int64_t));
713 	if (((uintptr_t)inp->inp_wstat - (uintptr_t)inp->inp_wstat_store) +
714 	    sizeof(*inp->inp_wstat) > sizeof(inp->inp_wstat_store)) {
715 		panic("%s: insufficient space to align inp_wstat", __func__);
716 		/* NOTREACHED */
717 	}
718 
719 	/* make sure inp_Wstat is always 64-bit aligned */
720 	inp->inp_Wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_Wstat_store,
721 	    sizeof(u_int64_t));
722 	if (((uintptr_t)inp->inp_Wstat - (uintptr_t)inp->inp_Wstat_store) +
723 	    sizeof(*inp->inp_Wstat) > sizeof(inp->inp_Wstat_store)) {
724 		panic("%s: insufficient space to align inp_Wstat", __func__);
725 		/* NOTREACHED */
726 	}
727 
728 	so->so_pcb = (caddr_t)inp;
729 
730 	if (so->so_proto->pr_flags & PR_PCBLOCK) {
731 		lck_mtx_init(&inp->inpcb_mtx, pcbinfo->ipi_lock_grp,
732 		    &pcbinfo->ipi_lock_attr);
733 	}
734 
735 	if (SOCK_DOM(so) == PF_INET6 && !ip6_mapped_addr_on) {
736 		inp->inp_flags |= IN6P_IPV6_V6ONLY;
737 	}
738 
739 	if (ip6_auto_flowlabel) {
740 		inp->inp_flags |= IN6P_AUTOFLOWLABEL;
741 	}
742 	if (intcoproc_unrestricted) {
743 		inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
744 	}
745 
746 	(void) inp_update_policy(inp);
747 
748 	lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
749 	inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
750 	LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
751 	pcbinfo->ipi_count++;
752 	lck_rw_done(&pcbinfo->ipi_lock);
753 	return 0;
754 }
755 
756 /*
757  * in_pcblookup_local_and_cleanup does everything
758  * in_pcblookup_local does but it checks for a socket
759  * that's going away. Since we know that the lock is
760  * held read+write when this function is called, we
761  * can safely dispose of this socket like the slow
762  * timer would usually do and return NULL. This is
763  * great for bind.
764  */
765 struct inpcb *
in_pcblookup_local_and_cleanup(struct inpcbinfo * pcbinfo,struct in_addr laddr,u_int lport_arg,int wild_okay)766 in_pcblookup_local_and_cleanup(struct inpcbinfo *pcbinfo, struct in_addr laddr,
767     u_int lport_arg, int wild_okay)
768 {
769 	struct inpcb *inp;
770 
771 	/* Perform normal lookup */
772 	inp = in_pcblookup_local(pcbinfo, laddr, lport_arg, wild_okay);
773 
774 	/* Check if we found a match but it's waiting to be disposed */
775 	if (inp != NULL && inp->inp_wantcnt == WNT_STOPUSING) {
776 		struct socket *so = inp->inp_socket;
777 
778 		socket_lock(so, 0);
779 
780 		if (so->so_usecount == 0) {
781 			if (inp->inp_state != INPCB_STATE_DEAD) {
782 				in_pcbdetach(inp);
783 			}
784 			in_pcbdispose(inp);     /* will unlock & destroy */
785 			inp = NULL;
786 		} else {
787 			socket_unlock(so, 0);
788 		}
789 	}
790 
791 	return inp;
792 }
793 
794 static void
in_pcb_conflict_post_msg(u_int16_t port)795 in_pcb_conflict_post_msg(u_int16_t port)
796 {
797 	/*
798 	 * Radar 5523020 send a kernel event notification if a
799 	 * non-participating socket tries to bind the port a socket
800 	 * who has set SOF_NOTIFYCONFLICT owns.
801 	 */
802 	struct kev_msg ev_msg;
803 	struct kev_in_portinuse in_portinuse;
804 
805 	bzero(&in_portinuse, sizeof(struct kev_in_portinuse));
806 	bzero(&ev_msg, sizeof(struct kev_msg));
807 	in_portinuse.port = ntohs(port);        /* port in host order */
808 	in_portinuse.req_pid = proc_selfpid();
809 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
810 	ev_msg.kev_class = KEV_NETWORK_CLASS;
811 	ev_msg.kev_subclass = KEV_INET_SUBCLASS;
812 	ev_msg.event_code = KEV_INET_PORTINUSE;
813 	ev_msg.dv[0].data_ptr = &in_portinuse;
814 	ev_msg.dv[0].data_length = sizeof(struct kev_in_portinuse);
815 	ev_msg.dv[1].data_length = 0;
816 	dlil_post_complete_msg(NULL, &ev_msg);
817 }
818 
819 /*
820  * Bind an INPCB to an address and/or port.  This routine should not alter
821  * the caller-supplied local address "nam".
822  *
823  * Returns:	0			Success
824  *		EADDRNOTAVAIL		Address not available.
825  *		EINVAL			Invalid argument
826  *		EAFNOSUPPORT		Address family not supported [notdef]
827  *		EACCES			Permission denied
828  *		EADDRINUSE		Address in use
829  *		EAGAIN			Resource unavailable, try again
830  *		priv_check_cred:EPERM	Operation not permitted
831  */
832 int
in_pcbbind(struct inpcb * inp,struct sockaddr * nam,struct proc * p)833 in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p)
834 {
835 	struct socket *so = inp->inp_socket;
836 	unsigned short *lastport;
837 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
838 	u_short lport = 0, rand_port = 0;
839 	int wild = 0;
840 	int reuseport = (so->so_options & SO_REUSEPORT);
841 	int error = 0;
842 	int randomport;
843 	int conflict = 0;
844 	boolean_t anonport = FALSE;
845 	kauth_cred_t cred;
846 	struct in_addr laddr;
847 	struct ifnet *outif = NULL;
848 
849 	if (inp->inp_flags2 & INP2_BIND_IN_PROGRESS) {
850 		return EINVAL;
851 	}
852 	inp->inp_flags2 |= INP2_BIND_IN_PROGRESS;
853 
854 	if (TAILQ_EMPTY(&in_ifaddrhead)) { /* XXX broken! */
855 		error = EADDRNOTAVAIL;
856 		goto done;
857 	}
858 	if (!(so->so_options & (SO_REUSEADDR | SO_REUSEPORT))) {
859 		wild = 1;
860 	}
861 
862 	bzero(&laddr, sizeof(laddr));
863 
864 	socket_unlock(so, 0); /* keep reference on socket */
865 	lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
866 	if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
867 		/* another thread completed the bind */
868 		lck_rw_done(&pcbinfo->ipi_lock);
869 		socket_lock(so, 0);
870 		error = EINVAL;
871 		goto done;
872 	}
873 
874 	if (nam != NULL) {
875 		if (nam->sa_len != sizeof(struct sockaddr_in)) {
876 			lck_rw_done(&pcbinfo->ipi_lock);
877 			socket_lock(so, 0);
878 			error = EINVAL;
879 			goto done;
880 		}
881 #if 0
882 		/*
883 		 * We should check the family, but old programs
884 		 * incorrectly fail to initialize it.
885 		 */
886 		if (nam->sa_family != AF_INET) {
887 			lck_rw_done(&pcbinfo->ipi_lock);
888 			socket_lock(so, 0);
889 			error = EAFNOSUPPORT;
890 			goto done;
891 		}
892 #endif /* 0 */
893 		lport = SIN(nam)->sin_port;
894 
895 		if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr))) {
896 			/*
897 			 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
898 			 * allow complete duplication of binding if
899 			 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
900 			 * and a multicast address is bound on both
901 			 * new and duplicated sockets.
902 			 */
903 			if (so->so_options & SO_REUSEADDR) {
904 				reuseport = SO_REUSEADDR | SO_REUSEPORT;
905 			}
906 		} else if (SIN(nam)->sin_addr.s_addr != INADDR_ANY) {
907 			struct sockaddr_in sin;
908 			struct ifaddr *ifa;
909 
910 			/* Sanitized for interface address searches */
911 			SOCKADDR_ZERO(&sin, sizeof(sin));
912 			sin.sin_family = AF_INET;
913 			sin.sin_len = sizeof(struct sockaddr_in);
914 			sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
915 
916 			ifa = ifa_ifwithaddr(SA(&sin));
917 			if (ifa == NULL) {
918 				lck_rw_done(&pcbinfo->ipi_lock);
919 				socket_lock(so, 0);
920 				error = EADDRNOTAVAIL;
921 				goto done;
922 			} else {
923 				/*
924 				 * Opportunistically determine the outbound
925 				 * interface that may be used; this may not
926 				 * hold true if we end up using a route
927 				 * going over a different interface, e.g.
928 				 * when sending to a local address.  This
929 				 * will get updated again after sending.
930 				 */
931 				IFA_LOCK(ifa);
932 				outif = ifa->ifa_ifp;
933 				IFA_UNLOCK(ifa);
934 				ifa_remref(ifa);
935 			}
936 		}
937 
938 #if SKYWALK
939 		if (inp->inp_flags2 & INP2_EXTERNAL_PORT) {
940 			// Extract the external flow info
941 			struct ns_flow_info nfi = {};
942 			error = necp_client_get_netns_flow_info(inp->necp_client_uuid,
943 			    &nfi);
944 			if (error != 0) {
945 				lck_rw_done(&pcbinfo->ipi_lock);
946 				socket_lock(so, 0);
947 				goto done;
948 			}
949 
950 			// Extract the reserved port
951 			u_int16_t reserved_lport = 0;
952 			if (nfi.nfi_laddr.sa.sa_family == AF_INET) {
953 				reserved_lport = nfi.nfi_laddr.sin.sin_port;
954 			} else if (nfi.nfi_laddr.sa.sa_family == AF_INET6) {
955 				reserved_lport = nfi.nfi_laddr.sin6.sin6_port;
956 			} else {
957 				lck_rw_done(&pcbinfo->ipi_lock);
958 				socket_lock(so, 0);
959 				error = EINVAL;
960 				goto done;
961 			}
962 
963 			// Validate or use the reserved port
964 			if (lport == 0) {
965 				lport = reserved_lport;
966 			} else if (lport != reserved_lport) {
967 				lck_rw_done(&pcbinfo->ipi_lock);
968 				socket_lock(so, 0);
969 				error = EINVAL;
970 				goto done;
971 			}
972 		}
973 
974 		/* Do not allow reserving a UDP port if remaining UDP port count is below 4096 */
975 		if (SOCK_PROTO(so) == IPPROTO_UDP && !allow_udp_port_exhaustion) {
976 			uint32_t current_reservations = 0;
977 			if (inp->inp_vflag & INP_IPV6) {
978 				current_reservations = netns_lookup_reservations_count_in6(inp->in6p_laddr, IPPROTO_UDP);
979 			} else {
980 				current_reservations = netns_lookup_reservations_count_in(inp->inp_laddr, IPPROTO_UDP);
981 			}
982 			if (USHRT_MAX - UDP_RANDOM_PORT_RESERVE < current_reservations) {
983 				log(LOG_ERR, "UDP port not available, less than 4096 UDP ports left");
984 				lck_rw_done(&pcbinfo->ipi_lock);
985 				socket_lock(so, 0);
986 				error = EADDRNOTAVAIL;
987 				goto done;
988 			}
989 		}
990 
991 #endif /* SKYWALK */
992 
993 		if (lport != 0) {
994 			struct inpcb *t;
995 			uid_t u;
996 
997 #if XNU_TARGET_OS_OSX
998 			if (ntohs(lport) < IPPORT_RESERVED &&
999 			    SIN(nam)->sin_addr.s_addr != 0 &&
1000 			    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
1001 				cred = kauth_cred_proc_ref(p);
1002 				error = priv_check_cred(cred,
1003 				    PRIV_NETINET_RESERVEDPORT, 0);
1004 				kauth_cred_unref(&cred);
1005 				if (error != 0) {
1006 					lck_rw_done(&pcbinfo->ipi_lock);
1007 					socket_lock(so, 0);
1008 					error = EACCES;
1009 					goto done;
1010 				}
1011 			}
1012 #endif /* XNU_TARGET_OS_OSX */
1013 			/*
1014 			 * Check wether the process is allowed to bind to a restricted port
1015 			 */
1016 			if (!current_task_can_use_restricted_in_port(lport,
1017 			    (uint8_t)SOCK_PROTO(so), PORT_FLAGS_BSD)) {
1018 				lck_rw_done(&pcbinfo->ipi_lock);
1019 				socket_lock(so, 0);
1020 				error = EADDRINUSE;
1021 				goto done;
1022 			}
1023 
1024 			if (!IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
1025 			    (u = kauth_cred_getuid(so->so_cred)) != 0 &&
1026 			    (t = in_pcblookup_local_and_cleanup(
1027 				    inp->inp_pcbinfo, SIN(nam)->sin_addr, lport,
1028 				    INPLOOKUP_WILDCARD)) != NULL &&
1029 			    (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
1030 			    t->inp_laddr.s_addr != INADDR_ANY ||
1031 			    !(t->inp_socket->so_options & SO_REUSEPORT)) &&
1032 			    (u != kauth_cred_getuid(t->inp_socket->so_cred)) &&
1033 			    !(t->inp_socket->so_flags & SOF_REUSESHAREUID) &&
1034 			    (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
1035 			    t->inp_laddr.s_addr != INADDR_ANY) &&
1036 			    (!(t->inp_flags2 & INP2_EXTERNAL_PORT) ||
1037 			    !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
1038 			    uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) {
1039 				if ((t->inp_socket->so_flags &
1040 				    SOF_NOTIFYCONFLICT) &&
1041 				    !(so->so_flags & SOF_NOTIFYCONFLICT)) {
1042 					conflict = 1;
1043 				}
1044 
1045 				lck_rw_done(&pcbinfo->ipi_lock);
1046 
1047 				if (conflict) {
1048 					in_pcb_conflict_post_msg(lport);
1049 				}
1050 
1051 				socket_lock(so, 0);
1052 				error = EADDRINUSE;
1053 				goto done;
1054 			}
1055 			t = in_pcblookup_local_and_cleanup(pcbinfo,
1056 			    SIN(nam)->sin_addr, lport, wild);
1057 			if (t != NULL &&
1058 			    (reuseport & t->inp_socket->so_options) == 0 &&
1059 			    (!(t->inp_flags2 & INP2_EXTERNAL_PORT) ||
1060 			    !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
1061 			    uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) {
1062 				if (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
1063 				    t->inp_laddr.s_addr != INADDR_ANY ||
1064 				    SOCK_DOM(so) != PF_INET6 ||
1065 				    SOCK_DOM(t->inp_socket) != PF_INET6) {
1066 					if ((t->inp_socket->so_flags &
1067 					    SOF_NOTIFYCONFLICT) &&
1068 					    !(so->so_flags & SOF_NOTIFYCONFLICT)) {
1069 						conflict = 1;
1070 					}
1071 
1072 					lck_rw_done(&pcbinfo->ipi_lock);
1073 
1074 					if (conflict) {
1075 						in_pcb_conflict_post_msg(lport);
1076 					}
1077 					socket_lock(so, 0);
1078 					error = EADDRINUSE;
1079 					goto done;
1080 				}
1081 			}
1082 #if SKYWALK
1083 			if ((SOCK_PROTO(so) == IPPROTO_TCP ||
1084 			    SOCK_PROTO(so) == IPPROTO_UDP) &&
1085 			    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
1086 				int res_err = 0;
1087 				if (inp->inp_vflag & INP_IPV6) {
1088 					res_err = netns_reserve_in6(
1089 						&inp->inp_netns_token,
1090 						SIN6(nam)->sin6_addr,
1091 						(uint8_t)SOCK_PROTO(so), lport, NETNS_BSD,
1092 						NULL);
1093 				} else {
1094 					res_err = netns_reserve_in(
1095 						&inp->inp_netns_token,
1096 						SIN(nam)->sin_addr, (uint8_t)SOCK_PROTO(so),
1097 						lport, NETNS_BSD, NULL);
1098 				}
1099 				if (res_err != 0) {
1100 					lck_rw_done(&pcbinfo->ipi_lock);
1101 					socket_lock(so, 0);
1102 					error = EADDRINUSE;
1103 					goto done;
1104 				}
1105 			}
1106 #endif /* SKYWALK */
1107 		}
1108 		laddr = SIN(nam)->sin_addr;
1109 	}
1110 	if (lport == 0) {
1111 		u_short first, last;
1112 		int count;
1113 		bool found;
1114 
1115 		/*
1116 		 * Override wild = 1 for implicit bind (mainly used by connect)
1117 		 * For implicit bind (lport == 0), we always use an unused port,
1118 		 * so REUSEADDR|REUSEPORT don't apply
1119 		 */
1120 		wild = 1;
1121 
1122 		randomport = (so->so_flags & SOF_BINDRANDOMPORT) ||
1123 		    (so->so_type == SOCK_STREAM ? tcp_use_randomport :
1124 		    udp_use_randomport);
1125 
1126 		/*
1127 		 * Even though this looks similar to the code in
1128 		 * in6_pcbsetport, the v6 vs v4 checks are different.
1129 		 */
1130 		anonport = TRUE;
1131 		if (inp->inp_flags & INP_HIGHPORT) {
1132 			first = (u_short)ipport_hifirstauto;     /* sysctl */
1133 			last  = (u_short)ipport_hilastauto;
1134 			lastport = &pcbinfo->ipi_lasthi;
1135 		} else if (inp->inp_flags & INP_LOWPORT) {
1136 			cred = kauth_cred_proc_ref(p);
1137 			error = priv_check_cred(cred,
1138 			    PRIV_NETINET_RESERVEDPORT, 0);
1139 			kauth_cred_unref(&cred);
1140 			if (error != 0) {
1141 				lck_rw_done(&pcbinfo->ipi_lock);
1142 				socket_lock(so, 0);
1143 				goto done;
1144 			}
1145 			first = (u_short)ipport_lowfirstauto;    /* 1023 */
1146 			last  = (u_short)ipport_lowlastauto;     /* 600 */
1147 			lastport = &pcbinfo->ipi_lastlow;
1148 		} else {
1149 			first = (u_short)ipport_firstauto;       /* sysctl */
1150 			last  = (u_short)ipport_lastauto;
1151 			lastport = &pcbinfo->ipi_lastport;
1152 		}
1153 		/* No point in randomizing if only one port is available */
1154 
1155 		if (first == last) {
1156 			randomport = 0;
1157 		}
1158 		/*
1159 		 * Simple check to ensure all ports are not used up causing
1160 		 * a deadlock here.
1161 		 *
1162 		 * We split the two cases (up and down) so that the direction
1163 		 * is not being tested on each round of the loop.
1164 		 */
1165 		if (first > last) {
1166 			struct in_addr lookup_addr;
1167 
1168 			/*
1169 			 * counting down
1170 			 */
1171 			if (randomport) {
1172 				read_frandom(&rand_port, sizeof(rand_port));
1173 				*lastport =
1174 				    first - (rand_port % (first - last));
1175 			}
1176 			count = first - last;
1177 
1178 			lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
1179 			    inp->inp_laddr;
1180 
1181 			found = false;
1182 			do {
1183 				if (count-- < 0) {      /* completely used? */
1184 					lck_rw_done(&pcbinfo->ipi_lock);
1185 					socket_lock(so, 0);
1186 					error = EADDRNOTAVAIL;
1187 					goto done;
1188 				}
1189 				--*lastport;
1190 				if (*lastport > first || *lastport < last) {
1191 					*lastport = first;
1192 				}
1193 				lport = htons(*lastport);
1194 
1195 				/*
1196 				 * Skip if this is a restricted port as we do not want to
1197 				 * restricted ports as ephemeral
1198 				 */
1199 				if (IS_RESTRICTED_IN_PORT(lport)) {
1200 					continue;
1201 				}
1202 
1203 				found = in_pcblookup_local_and_cleanup(pcbinfo,
1204 				    lookup_addr, lport, wild) == NULL;
1205 #if SKYWALK
1206 				if (found &&
1207 				    (SOCK_PROTO(so) == IPPROTO_TCP ||
1208 				    SOCK_PROTO(so) == IPPROTO_UDP) &&
1209 				    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
1210 					int res_err;
1211 					if (inp->inp_vflag & INP_IPV6) {
1212 						res_err = netns_reserve_in6(
1213 							&inp->inp_netns_token,
1214 							inp->in6p_laddr,
1215 							(uint8_t)SOCK_PROTO(so), lport,
1216 							NETNS_BSD, NULL);
1217 					} else {
1218 						res_err = netns_reserve_in(
1219 							&inp->inp_netns_token,
1220 							lookup_addr, (uint8_t)SOCK_PROTO(so),
1221 							lport, NETNS_BSD, NULL);
1222 					}
1223 					found = res_err == 0;
1224 				}
1225 #endif /* SKYWALK */
1226 			} while (!found);
1227 		} else {
1228 			struct in_addr lookup_addr;
1229 
1230 			/*
1231 			 * counting up
1232 			 */
1233 			if (randomport) {
1234 				read_frandom(&rand_port, sizeof(rand_port));
1235 				*lastport =
1236 				    first + (rand_port % (first - last));
1237 			}
1238 			count = last - first;
1239 
1240 			lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
1241 			    inp->inp_laddr;
1242 
1243 			found = false;
1244 			do {
1245 				if (count-- < 0) {      /* completely used? */
1246 					lck_rw_done(&pcbinfo->ipi_lock);
1247 					socket_lock(so, 0);
1248 					error = EADDRNOTAVAIL;
1249 					goto done;
1250 				}
1251 				++*lastport;
1252 				if (*lastport < first || *lastport > last) {
1253 					*lastport = first;
1254 				}
1255 				lport = htons(*lastport);
1256 
1257 				/*
1258 				 * Skip if this is a restricted port as we do not want to
1259 				 * restricted ports as ephemeral
1260 				 */
1261 				if (IS_RESTRICTED_IN_PORT(lport)) {
1262 					continue;
1263 				}
1264 
1265 				found = in_pcblookup_local_and_cleanup(pcbinfo,
1266 				    lookup_addr, lport, wild) == NULL;
1267 #if SKYWALK
1268 				if (found &&
1269 				    (SOCK_PROTO(so) == IPPROTO_TCP ||
1270 				    SOCK_PROTO(so) == IPPROTO_UDP) &&
1271 				    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
1272 					int res_err;
1273 					if (inp->inp_vflag & INP_IPV6) {
1274 						res_err = netns_reserve_in6(
1275 							&inp->inp_netns_token,
1276 							inp->in6p_laddr,
1277 							(uint8_t)SOCK_PROTO(so), lport,
1278 							NETNS_BSD, NULL);
1279 					} else {
1280 						res_err = netns_reserve_in(
1281 							&inp->inp_netns_token,
1282 							lookup_addr, (uint8_t)SOCK_PROTO(so),
1283 							lport, NETNS_BSD, NULL);
1284 					}
1285 					found = res_err == 0;
1286 				}
1287 #endif /* SKYWALK */
1288 			} while (!found);
1289 		}
1290 	}
1291 	socket_lock(so, 0);
1292 
1293 	/*
1294 	 * We unlocked socket's protocol lock for a long time.
1295 	 * The socket might have been dropped/defuncted.
1296 	 * Checking if world has changed since.
1297 	 */
1298 	if (inp->inp_state == INPCB_STATE_DEAD) {
1299 #if SKYWALK
1300 		netns_release(&inp->inp_netns_token);
1301 #endif /* SKYWALK */
1302 		lck_rw_done(&pcbinfo->ipi_lock);
1303 		error = ECONNABORTED;
1304 		goto done;
1305 	}
1306 
1307 	if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
1308 #if SKYWALK
1309 		netns_release(&inp->inp_netns_token);
1310 #endif /* SKYWALK */
1311 		lck_rw_done(&pcbinfo->ipi_lock);
1312 		error = EINVAL;
1313 		goto done;
1314 	}
1315 
1316 	if (laddr.s_addr != INADDR_ANY) {
1317 		inp->inp_laddr = laddr;
1318 		inp->inp_last_outifp = outif;
1319 #if SKYWALK
1320 		if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
1321 			netns_set_ifnet(&inp->inp_netns_token, outif);
1322 		}
1323 #endif /* SKYWALK */
1324 	}
1325 	inp->inp_lport = lport;
1326 	if (anonport) {
1327 		inp->inp_flags |= INP_ANONPORT;
1328 	}
1329 
1330 	if (in_pcbinshash(inp, 1) != 0) {
1331 		inp->inp_laddr.s_addr = INADDR_ANY;
1332 		inp->inp_last_outifp = NULL;
1333 
1334 #if SKYWALK
1335 		netns_release(&inp->inp_netns_token);
1336 #endif /* SKYWALK */
1337 		inp->inp_lport = 0;
1338 		if (anonport) {
1339 			inp->inp_flags &= ~INP_ANONPORT;
1340 		}
1341 		lck_rw_done(&pcbinfo->ipi_lock);
1342 		error = EAGAIN;
1343 		goto done;
1344 	}
1345 	lck_rw_done(&pcbinfo->ipi_lock);
1346 	sflt_notify(so, sock_evt_bound, NULL);
1347 
1348 	in_pcb_check_management_entitled(inp);
1349 done:
1350 	inp->inp_flags2 &= ~INP2_BIND_IN_PROGRESS;
1351 	return error;
1352 }
1353 
1354 #define APN_FALLBACK_IP_FILTER(a)       \
1355 	(IN_LINKLOCAL(ntohl((a)->sin_addr.s_addr)) || \
1356 	 IN_LOOPBACK(ntohl((a)->sin_addr.s_addr)) || \
1357 	 IN_ZERONET(ntohl((a)->sin_addr.s_addr)) || \
1358 	 IN_MULTICAST(ntohl((a)->sin_addr.s_addr)) || \
1359 	 IN_PRIVATE(ntohl((a)->sin_addr.s_addr)))
1360 
1361 #define APN_FALLBACK_NOTIF_INTERVAL     2 /* Magic Number */
1362 static uint64_t last_apn_fallback = 0;
1363 
1364 static boolean_t
apn_fallback_required(proc_t proc,struct socket * so,struct sockaddr_in * p_dstv4)1365 apn_fallback_required(proc_t proc, struct socket *so, struct sockaddr_in *p_dstv4)
1366 {
1367 	uint64_t timenow;
1368 	struct sockaddr_storage lookup_default_addr;
1369 	struct rtentry *rt = NULL;
1370 
1371 	VERIFY(proc != NULL);
1372 
1373 	if (apn_fallbk_enabled == FALSE) {
1374 		return FALSE;
1375 	}
1376 
1377 	if (proc == kernproc) {
1378 		return FALSE;
1379 	}
1380 
1381 	if (so && (so->so_options & SO_NOAPNFALLBK)) {
1382 		return FALSE;
1383 	}
1384 
1385 	timenow = net_uptime();
1386 	if ((timenow - last_apn_fallback) < APN_FALLBACK_NOTIF_INTERVAL) {
1387 		apn_fallbk_log((LOG_INFO, "APN fallback notification throttled.\n"));
1388 		return FALSE;
1389 	}
1390 
1391 	if (p_dstv4 && APN_FALLBACK_IP_FILTER(p_dstv4)) {
1392 		return FALSE;
1393 	}
1394 
1395 	/* Check if we have unscoped IPv6 default route through cellular */
1396 	bzero(&lookup_default_addr, sizeof(lookup_default_addr));
1397 	lookup_default_addr.ss_family = AF_INET6;
1398 	lookup_default_addr.ss_len = sizeof(struct sockaddr_in6);
1399 
1400 	rt = rtalloc1(SA(&lookup_default_addr), 0, 0);
1401 	if (NULL == rt) {
1402 		apn_fallbk_log((LOG_INFO, "APN fallback notification could not find "
1403 		    "unscoped default IPv6 route.\n"));
1404 		return FALSE;
1405 	}
1406 
1407 	if (!IFNET_IS_CELLULAR(rt->rt_ifp)) {
1408 		rtfree(rt);
1409 		apn_fallbk_log((LOG_INFO, "APN fallback notification could not find "
1410 		    "unscoped default IPv6 route through cellular interface.\n"));
1411 		return FALSE;
1412 	}
1413 
1414 	/*
1415 	 * We have a default IPv6 route, ensure that
1416 	 * we do not have IPv4 default route before triggering
1417 	 * the event
1418 	 */
1419 	rtfree(rt);
1420 	rt = NULL;
1421 
1422 	bzero(&lookup_default_addr, sizeof(lookup_default_addr));
1423 	lookup_default_addr.ss_family = AF_INET;
1424 	lookup_default_addr.ss_len = sizeof(struct sockaddr_in);
1425 
1426 	rt = rtalloc1(SA(&lookup_default_addr), 0, 0);
1427 
1428 	if (rt) {
1429 		rtfree(rt);
1430 		rt = NULL;
1431 		apn_fallbk_log((LOG_INFO, "APN fallback notification found unscoped "
1432 		    "IPv4 default route!\n"));
1433 		return FALSE;
1434 	}
1435 
1436 	{
1437 		/*
1438 		 * We disable APN fallback if the binary is not a third-party app.
1439 		 * Note that platform daemons use their process name as a
1440 		 * bundle ID so we filter out bundle IDs without dots.
1441 		 */
1442 		const char *bundle_id = cs_identity_get(proc);
1443 		if (bundle_id == NULL ||
1444 		    bundle_id[0] == '\0' ||
1445 		    strchr(bundle_id, '.') == NULL ||
1446 		    strncmp(bundle_id, "com.apple.", sizeof("com.apple.") - 1) == 0) {
1447 			apn_fallbk_log((LOG_INFO, "Abort: APN fallback notification found first-"
1448 			    "party bundle ID \"%s\"!\n", (bundle_id ? bundle_id : "NULL")));
1449 			return FALSE;
1450 		}
1451 	}
1452 
1453 	{
1454 		/*
1455 		 * The Apple App Store IPv6 requirement started on
1456 		 * June 1st, 2016 at 12:00:00 AM PDT.
1457 		 * We disable APN fallback if the binary is more recent than that.
1458 		 * We check both atime and birthtime since birthtime is not always supported.
1459 		 */
1460 		static const long ipv6_start_date = 1464764400L;
1461 		vfs_context_t context;
1462 		struct stat64 sb;
1463 		int vn_stat_error;
1464 
1465 		bzero(&sb, sizeof(struct stat64));
1466 		context = vfs_context_create(NULL);
1467 		vn_stat_error = vn_stat(proc->p_textvp, &sb, NULL, 1, 0, context);
1468 		(void)vfs_context_rele(context);
1469 
1470 		if (vn_stat_error != 0 ||
1471 		    sb.st_atimespec.tv_sec >= ipv6_start_date ||
1472 		    sb.st_birthtimespec.tv_sec >= ipv6_start_date) {
1473 			apn_fallbk_log((LOG_INFO, "Abort: APN fallback notification found binary "
1474 			    "too recent! (err %d atime %ld mtime %ld ctime %ld birthtime %ld)\n",
1475 			    vn_stat_error, sb.st_atimespec.tv_sec, sb.st_mtimespec.tv_sec,
1476 			    sb.st_ctimespec.tv_sec, sb.st_birthtimespec.tv_sec));
1477 			return FALSE;
1478 		}
1479 	}
1480 	return TRUE;
1481 }
1482 
1483 static void
apn_fallback_trigger(proc_t proc,struct socket * so)1484 apn_fallback_trigger(proc_t proc, struct socket *so)
1485 {
1486 	pid_t pid = 0;
1487 	struct kev_msg ev_msg;
1488 	struct kev_netevent_apnfallbk_data apnfallbk_data;
1489 
1490 	last_apn_fallback = net_uptime();
1491 	pid = proc_pid(proc);
1492 	uuid_t application_uuid;
1493 	uuid_clear(application_uuid);
1494 	proc_getexecutableuuid(proc, application_uuid,
1495 	    sizeof(application_uuid));
1496 
1497 	bzero(&ev_msg, sizeof(struct kev_msg));
1498 	ev_msg.vendor_code      = KEV_VENDOR_APPLE;
1499 	ev_msg.kev_class        = KEV_NETWORK_CLASS;
1500 	ev_msg.kev_subclass     = KEV_NETEVENT_SUBCLASS;
1501 	ev_msg.event_code       = KEV_NETEVENT_APNFALLBACK;
1502 
1503 	bzero(&apnfallbk_data, sizeof(apnfallbk_data));
1504 
1505 	if (so->so_flags & SOF_DELEGATED) {
1506 		apnfallbk_data.epid = so->e_pid;
1507 		uuid_copy(apnfallbk_data.euuid, so->e_uuid);
1508 	} else {
1509 		apnfallbk_data.epid = so->last_pid;
1510 		uuid_copy(apnfallbk_data.euuid, so->last_uuid);
1511 	}
1512 
1513 	ev_msg.dv[0].data_ptr   = &apnfallbk_data;
1514 	ev_msg.dv[0].data_length = sizeof(apnfallbk_data);
1515 	kev_post_msg(&ev_msg);
1516 	apn_fallbk_log((LOG_INFO, "APN fallback notification issued.\n"));
1517 }
1518 
1519 /*
1520  * Transform old in_pcbconnect() into an inner subroutine for new
1521  * in_pcbconnect(); do some validity-checking on the remote address
1522  * (in "nam") and then determine local host address (i.e., which
1523  * interface) to use to access that remote host.
1524  *
1525  * This routine may alter the caller-supplied remote address "nam".
1526  *
1527  * The caller may override the bound-to-interface setting of the socket
1528  * by specifying the ifscope parameter (e.g. from IP_PKTINFO.)
1529  *
1530  * This routine might return an ifp with a reference held if the caller
1531  * provides a non-NULL outif, even in the error case.  The caller is
1532  * responsible for releasing its reference.
1533  *
1534  * Returns:	0			Success
1535  *		EINVAL			Invalid argument
1536  *		EAFNOSUPPORT		Address family not supported
1537  *		EADDRNOTAVAIL		Address not available
1538  */
1539 int
in_pcbladdr(struct inpcb * inp,struct sockaddr * nam,struct in_addr * laddr,unsigned int ifscope,struct ifnet ** outif,int raw)1540 in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr,
1541     unsigned int ifscope, struct ifnet **outif, int raw)
1542 {
1543 	struct route *ro = &inp->inp_route;
1544 	struct in_ifaddr *ia = NULL;
1545 	struct sockaddr_in sin;
1546 	int error = 0;
1547 	boolean_t restricted = FALSE;
1548 
1549 	if (outif != NULL) {
1550 		*outif = NULL;
1551 	}
1552 	if (nam->sa_len != sizeof(struct sockaddr_in)) {
1553 		return EINVAL;
1554 	}
1555 	if (SIN(nam)->sin_family != AF_INET) {
1556 		return EAFNOSUPPORT;
1557 	}
1558 	if (raw == 0 && SIN(nam)->sin_port == 0) {
1559 		return EADDRNOTAVAIL;
1560 	}
1561 
1562 	in_pcb_check_management_entitled(inp);
1563 
1564 	/*
1565 	 * If the destination address is INADDR_ANY,
1566 	 * use the primary local address.
1567 	 * If the supplied address is INADDR_BROADCAST,
1568 	 * and the primary interface supports broadcast,
1569 	 * choose the broadcast address for that interface.
1570 	 */
1571 	if (raw == 0 && (SIN(nam)->sin_addr.s_addr == INADDR_ANY ||
1572 	    SIN(nam)->sin_addr.s_addr == (u_int32_t)INADDR_BROADCAST)) {
1573 		lck_rw_lock_shared(&in_ifaddr_rwlock);
1574 		if (!TAILQ_EMPTY(&in_ifaddrhead)) {
1575 			ia = TAILQ_FIRST(&in_ifaddrhead);
1576 			IFA_LOCK_SPIN(&ia->ia_ifa);
1577 			if (SIN(nam)->sin_addr.s_addr == INADDR_ANY) {
1578 				SIN(nam)->sin_addr = IA_SIN(ia)->sin_addr;
1579 			} else if (ia->ia_ifp->if_flags & IFF_BROADCAST) {
1580 				SIN(nam)->sin_addr =
1581 				    SIN(&ia->ia_broadaddr)->sin_addr;
1582 			}
1583 			IFA_UNLOCK(&ia->ia_ifa);
1584 			ia = NULL;
1585 		}
1586 		lck_rw_done(&in_ifaddr_rwlock);
1587 	}
1588 	/*
1589 	 * Otherwise, if the socket has already bound the source, just use it.
1590 	 */
1591 	if (inp->inp_laddr.s_addr != INADDR_ANY) {
1592 		VERIFY(ia == NULL);
1593 		*laddr = inp->inp_laddr;
1594 		return 0;
1595 	}
1596 
1597 	/*
1598 	 * If the ifscope is specified by the caller (e.g. IP_PKTINFO)
1599 	 * then it overrides the sticky ifscope set for the socket.
1600 	 */
1601 	if (ifscope == IFSCOPE_NONE && (inp->inp_flags & INP_BOUND_IF)) {
1602 		ifscope = inp->inp_boundifp->if_index;
1603 	}
1604 
1605 	/*
1606 	 * If route is known or can be allocated now,
1607 	 * our src addr is taken from the i/f, else punt.
1608 	 * Note that we should check the address family of the cached
1609 	 * destination, in case of sharing the cache with IPv6.
1610 	 */
1611 	if (ro->ro_rt != NULL) {
1612 		RT_LOCK_SPIN(ro->ro_rt);
1613 	}
1614 	if (ROUTE_UNUSABLE(ro) || ro->ro_dst.sa_family != AF_INET ||
1615 	    SIN(&ro->ro_dst)->sin_addr.s_addr != SIN(nam)->sin_addr.s_addr ||
1616 	    (inp->inp_socket->so_options & SO_DONTROUTE)) {
1617 		if (ro->ro_rt != NULL) {
1618 			RT_UNLOCK(ro->ro_rt);
1619 		}
1620 		ROUTE_RELEASE(ro);
1621 	}
1622 	if (!(inp->inp_socket->so_options & SO_DONTROUTE) &&
1623 	    (ro->ro_rt == NULL || ro->ro_rt->rt_ifp == NULL)) {
1624 		if (ro->ro_rt != NULL) {
1625 			RT_UNLOCK(ro->ro_rt);
1626 		}
1627 		ROUTE_RELEASE(ro);
1628 		/* No route yet, so try to acquire one */
1629 		SOCKADDR_ZERO(&ro->ro_dst, sizeof(struct sockaddr_in));
1630 		ro->ro_dst.sa_family = AF_INET;
1631 		ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1632 		SIN(&ro->ro_dst)->sin_addr = SIN(nam)->sin_addr;
1633 		rtalloc_scoped(ro, ifscope);
1634 		if (ro->ro_rt != NULL) {
1635 			RT_LOCK_SPIN(ro->ro_rt);
1636 		}
1637 	}
1638 	/* Sanitized local copy for interface address searches */
1639 	SOCKADDR_ZERO(&sin, sizeof(sin));
1640 	sin.sin_family = AF_INET;
1641 	sin.sin_len = sizeof(struct sockaddr_in);
1642 	sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
1643 	/*
1644 	 * If we did not find (or use) a route, assume dest is reachable
1645 	 * on a directly connected network and try to find a corresponding
1646 	 * interface to take the source address from.
1647 	 */
1648 	if (ro->ro_rt == NULL) {
1649 		proc_t proc = current_proc();
1650 
1651 		VERIFY(ia == NULL);
1652 		ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
1653 		if (ia == NULL) {
1654 			ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
1655 		}
1656 		error = ((ia == NULL) ? ENETUNREACH : 0);
1657 
1658 		if (apn_fallback_required(proc, inp->inp_socket,
1659 		    (void *)nam)) {
1660 			apn_fallback_trigger(proc, inp->inp_socket);
1661 		}
1662 
1663 		goto done;
1664 	}
1665 	RT_LOCK_ASSERT_HELD(ro->ro_rt);
1666 	/*
1667 	 * If the outgoing interface on the route found is not
1668 	 * a loopback interface, use the address from that interface.
1669 	 */
1670 	if (!(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK)) {
1671 		VERIFY(ia == NULL);
1672 		/*
1673 		 * If the route points to a cellular interface and the
1674 		 * caller forbids our using interfaces of such type,
1675 		 * pretend that there is no route.
1676 		 * Apply the same logic for expensive interfaces.
1677 		 */
1678 		if (inp_restricted_send(inp, ro->ro_rt->rt_ifp)) {
1679 			RT_UNLOCK(ro->ro_rt);
1680 			ROUTE_RELEASE(ro);
1681 			error = EHOSTUNREACH;
1682 			restricted = TRUE;
1683 		} else {
1684 			/* Become a regular mutex */
1685 			RT_CONVERT_LOCK(ro->ro_rt);
1686 			ia = ifatoia(ro->ro_rt->rt_ifa);
1687 			ifa_addref(&ia->ia_ifa);
1688 
1689 			/*
1690 			 * Mark the control block for notification of
1691 			 * a possible flow that might undergo clat46
1692 			 * translation.
1693 			 *
1694 			 * We defer the decision to a later point when
1695 			 * inpcb is being disposed off.
1696 			 * The reason is that we only want to send notification
1697 			 * if the flow was ever used to send data.
1698 			 */
1699 			if (IS_INTF_CLAT46(ro->ro_rt->rt_ifp)) {
1700 				inp->inp_flags2 |= INP2_CLAT46_FLOW;
1701 			}
1702 
1703 			RT_UNLOCK(ro->ro_rt);
1704 			error = 0;
1705 		}
1706 		goto done;
1707 	}
1708 	VERIFY(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK);
1709 	RT_UNLOCK(ro->ro_rt);
1710 	/*
1711 	 * The outgoing interface is marked with 'loopback net', so a route
1712 	 * to ourselves is here.
1713 	 * Try to find the interface of the destination address and then
1714 	 * take the address from there. That interface is not necessarily
1715 	 * a loopback interface.
1716 	 */
1717 	VERIFY(ia == NULL);
1718 	ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
1719 	if (ia == NULL) {
1720 		ia = ifatoia(ifa_ifwithaddr_scoped(SA(&sin), ifscope));
1721 	}
1722 	if (ia == NULL) {
1723 		ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
1724 	}
1725 	if (ia == NULL) {
1726 		RT_LOCK(ro->ro_rt);
1727 		ia = ifatoia(ro->ro_rt->rt_ifa);
1728 		if (ia != NULL) {
1729 			ifa_addref(&ia->ia_ifa);
1730 		}
1731 		RT_UNLOCK(ro->ro_rt);
1732 	}
1733 	error = ((ia == NULL) ? ENETUNREACH : 0);
1734 
1735 done:
1736 	/*
1737 	 * If the destination address is multicast and an outgoing
1738 	 * interface has been set as a multicast option, use the
1739 	 * address of that interface as our source address.
1740 	 */
1741 	if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
1742 	    inp->inp_moptions != NULL) {
1743 		struct ip_moptions *imo;
1744 		struct ifnet *ifp;
1745 
1746 		imo = inp->inp_moptions;
1747 		IMO_LOCK(imo);
1748 		if (imo->imo_multicast_ifp != NULL && (ia == NULL ||
1749 		    ia->ia_ifp != imo->imo_multicast_ifp)) {
1750 			ifp = imo->imo_multicast_ifp;
1751 			if (ia != NULL) {
1752 				ifa_remref(&ia->ia_ifa);
1753 			}
1754 			lck_rw_lock_shared(&in_ifaddr_rwlock);
1755 			TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
1756 				if (ia->ia_ifp == ifp) {
1757 					break;
1758 				}
1759 			}
1760 			if (ia != NULL) {
1761 				ifa_addref(&ia->ia_ifa);
1762 			}
1763 			lck_rw_done(&in_ifaddr_rwlock);
1764 			if (ia == NULL) {
1765 				error = EADDRNOTAVAIL;
1766 			} else {
1767 				error = 0;
1768 			}
1769 		}
1770 		IMO_UNLOCK(imo);
1771 	}
1772 	/*
1773 	 * Don't do pcblookup call here; return interface in laddr
1774 	 * and exit to caller, that will do the lookup.
1775 	 */
1776 	if (ia != NULL) {
1777 		/*
1778 		 * If the source address belongs to a cellular interface
1779 		 * and the socket forbids our using interfaces of such
1780 		 * type, pretend that there is no source address.
1781 		 * Apply the same logic for expensive interfaces.
1782 		 */
1783 		IFA_LOCK_SPIN(&ia->ia_ifa);
1784 		if (inp_restricted_send(inp, ia->ia_ifa.ifa_ifp)) {
1785 			IFA_UNLOCK(&ia->ia_ifa);
1786 			error = EHOSTUNREACH;
1787 			restricted = TRUE;
1788 		} else if (error == 0) {
1789 			*laddr = ia->ia_addr.sin_addr;
1790 			if (outif != NULL) {
1791 				struct ifnet *ifp;
1792 
1793 				if (ro->ro_rt != NULL) {
1794 					ifp = ro->ro_rt->rt_ifp;
1795 				} else {
1796 					ifp = ia->ia_ifp;
1797 				}
1798 
1799 				VERIFY(ifp != NULL);
1800 				IFA_CONVERT_LOCK(&ia->ia_ifa);
1801 				ifnet_reference(ifp);   /* for caller */
1802 				if (*outif != NULL) {
1803 					ifnet_release(*outif);
1804 				}
1805 				*outif = ifp;
1806 			}
1807 			IFA_UNLOCK(&ia->ia_ifa);
1808 		} else {
1809 			IFA_UNLOCK(&ia->ia_ifa);
1810 		}
1811 		ifa_remref(&ia->ia_ifa);
1812 		ia = NULL;
1813 	}
1814 
1815 	if (restricted && error == EHOSTUNREACH) {
1816 		soevent(inp->inp_socket, (SO_FILT_HINT_LOCKED |
1817 		    SO_FILT_HINT_IFDENIED));
1818 	}
1819 
1820 	return error;
1821 }
1822 
1823 /*
1824  * Outer subroutine:
1825  * Connect from a socket to a specified address.
1826  * Both address and port must be specified in argument sin.
1827  * If don't have a local address for this socket yet,
1828  * then pick one.
1829  *
1830  * The caller may override the bound-to-interface setting of the socket
1831  * by specifying the ifscope parameter (e.g. from IP_PKTINFO.)
1832  */
1833 int
in_pcbconnect(struct inpcb * inp,struct sockaddr * nam,struct proc * p,unsigned int ifscope,struct ifnet ** outif)1834 in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p,
1835     unsigned int ifscope, struct ifnet **outif)
1836 {
1837 	struct in_addr laddr;
1838 	struct sockaddr_in *sin = SIN(nam);
1839 	struct inpcb *pcb;
1840 	int error;
1841 	struct socket *so = inp->inp_socket;
1842 
1843 #if CONTENT_FILTER
1844 	if (so) {
1845 		so->so_state_change_cnt++;
1846 	}
1847 #endif
1848 
1849 	/*
1850 	 *   Call inner routine, to assign local interface address.
1851 	 */
1852 	if ((error = in_pcbladdr(inp, nam, &laddr, ifscope, outif, 0)) != 0) {
1853 		return error;
1854 	}
1855 
1856 	socket_unlock(so, 0);
1857 	pcb = in_pcblookup_hash(inp->inp_pcbinfo, sin->sin_addr, sin->sin_port,
1858 	    inp->inp_laddr.s_addr ? inp->inp_laddr : laddr,
1859 	    inp->inp_lport, 0, NULL);
1860 	socket_lock(so, 0);
1861 
1862 	/*
1863 	 * Check if the socket is still in a valid state. When we unlock this
1864 	 * embryonic socket, it can get aborted if another thread is closing
1865 	 * the listener (radar 7947600).
1866 	 */
1867 	if ((so->so_flags & SOF_ABORTED) != 0) {
1868 		return ECONNREFUSED;
1869 	}
1870 
1871 	if (pcb != NULL) {
1872 		in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0);
1873 		return EADDRINUSE;
1874 	}
1875 	if (inp->inp_laddr.s_addr == INADDR_ANY) {
1876 		if (inp->inp_lport == 0) {
1877 			error = in_pcbbind(inp, NULL, p);
1878 			if (error) {
1879 				return error;
1880 			}
1881 		}
1882 		if (!lck_rw_try_lock_exclusive(&inp->inp_pcbinfo->ipi_lock)) {
1883 			/*
1884 			 * Lock inversion issue, mostly with udp
1885 			 * multicast packets.
1886 			 */
1887 			socket_unlock(so, 0);
1888 			lck_rw_lock_exclusive(&inp->inp_pcbinfo->ipi_lock);
1889 			socket_lock(so, 0);
1890 		}
1891 		inp->inp_laddr = laddr;
1892 		/* no reference needed */
1893 		inp->inp_last_outifp = (outif != NULL) ? *outif : NULL;
1894 #if SKYWALK
1895 		if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
1896 			netns_set_ifnet(&inp->inp_netns_token,
1897 			    inp->inp_last_outifp);
1898 		}
1899 #endif /* SKYWALK */
1900 		inp->inp_flags |= INP_INADDR_ANY;
1901 	} else {
1902 		/*
1903 		 * Usage of IP_PKTINFO, without local port already
1904 		 * speficified will cause kernel to panic,
1905 		 * see rdar://problem/18508185.
1906 		 * For now returning error to avoid a kernel panic
1907 		 * This routines can be refactored and handle this better
1908 		 * in future.
1909 		 */
1910 		if (inp->inp_lport == 0) {
1911 			return EINVAL;
1912 		}
1913 		if (!lck_rw_try_lock_exclusive(&inp->inp_pcbinfo->ipi_lock)) {
1914 			/*
1915 			 * Lock inversion issue, mostly with udp
1916 			 * multicast packets.
1917 			 */
1918 			socket_unlock(so, 0);
1919 			lck_rw_lock_exclusive(&inp->inp_pcbinfo->ipi_lock);
1920 			socket_lock(so, 0);
1921 		}
1922 	}
1923 	inp->inp_faddr = sin->sin_addr;
1924 	inp->inp_fport = sin->sin_port;
1925 	if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) {
1926 		nstat_pcb_invalidate_cache(inp);
1927 	}
1928 	in_pcbrehash(inp);
1929 	lck_rw_done(&inp->inp_pcbinfo->ipi_lock);
1930 	return 0;
1931 }
1932 
1933 void
in_pcbdisconnect(struct inpcb * inp)1934 in_pcbdisconnect(struct inpcb *inp)
1935 {
1936 	struct socket *so = inp->inp_socket;
1937 
1938 	if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) {
1939 		nstat_pcb_cache(inp);
1940 	}
1941 
1942 	inp->inp_faddr.s_addr = INADDR_ANY;
1943 	inp->inp_fport = 0;
1944 
1945 #if CONTENT_FILTER
1946 	if (so) {
1947 		so->so_state_change_cnt++;
1948 	}
1949 #endif
1950 
1951 	if (!lck_rw_try_lock_exclusive(&inp->inp_pcbinfo->ipi_lock)) {
1952 		/* lock inversion issue, mostly with udp multicast packets */
1953 		socket_unlock(so, 0);
1954 		lck_rw_lock_exclusive(&inp->inp_pcbinfo->ipi_lock);
1955 		socket_lock(so, 0);
1956 	}
1957 
1958 	in_pcbrehash(inp);
1959 	lck_rw_done(&inp->inp_pcbinfo->ipi_lock);
1960 	/*
1961 	 * A multipath subflow socket would have its SS_NOFDREF set by default,
1962 	 * so check for SOF_MP_SUBFLOW socket flag before detaching the PCB;
1963 	 * when the socket is closed for real, SOF_MP_SUBFLOW would be cleared.
1964 	 */
1965 	if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) {
1966 		in_pcbdetach(inp);
1967 	}
1968 }
1969 
1970 void
in_pcbdetach(struct inpcb * inp)1971 in_pcbdetach(struct inpcb *inp)
1972 {
1973 	struct socket *so = inp->inp_socket;
1974 
1975 	if (so->so_pcb == NULL) {
1976 		/* PCB has been disposed */
1977 		panic("%s: inp=%p so=%p proto=%d so_pcb is null!", __func__,
1978 		    inp, so, SOCK_PROTO(so));
1979 		/* NOTREACHED */
1980 	}
1981 
1982 #if IPSEC
1983 	if (inp->inp_sp != NULL) {
1984 		(void) ipsec4_delete_pcbpolicy(inp);
1985 	}
1986 #endif /* IPSEC */
1987 
1988 	if (inp->inp_stat != NULL && SOCK_PROTO(so) == IPPROTO_UDP) {
1989 		if (inp->inp_stat->rxpackets == 0 && inp->inp_stat->txpackets == 0) {
1990 			INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_no_data);
1991 		}
1992 	}
1993 
1994 	/*
1995 	 * Let NetworkStatistics know this PCB is going away
1996 	 * before we detach it.
1997 	 */
1998 	if (nstat_collect &&
1999 	    (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP)) {
2000 		nstat_pcb_detach(inp);
2001 	}
2002 
2003 	/* Free memory buffer held for generating keep alives */
2004 	if (inp->inp_keepalive_data != NULL) {
2005 		kfree_data(inp->inp_keepalive_data, inp->inp_keepalive_datalen);
2006 		inp->inp_keepalive_data = NULL;
2007 	}
2008 
2009 	/* mark socket state as dead */
2010 	if (in_pcb_checkstate(inp, WNT_STOPUSING, 1) != WNT_STOPUSING) {
2011 		panic("%s: so=%p proto=%d couldn't set to STOPUSING",
2012 		    __func__, so, SOCK_PROTO(so));
2013 		/* NOTREACHED */
2014 	}
2015 
2016 #if SKYWALK
2017 	/* Free up the port in the namespace registrar if not in TIME_WAIT */
2018 	if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
2019 		netns_release(&inp->inp_netns_token);
2020 		netns_release(&inp->inp_wildcard_netns_token);
2021 	}
2022 #endif /* SKYWALK */
2023 
2024 	if (!(so->so_flags & SOF_PCBCLEARING)) {
2025 		struct ip_moptions *imo;
2026 
2027 		inp->inp_vflag = 0;
2028 		if (inp->inp_options != NULL) {
2029 			(void) m_free(inp->inp_options);
2030 			inp->inp_options = NULL;
2031 		}
2032 		ROUTE_RELEASE(&inp->inp_route);
2033 		imo = inp->inp_moptions;
2034 		if (imo != NULL) {
2035 			IMO_REMREF(imo);
2036 		}
2037 		inp->inp_moptions = NULL;
2038 		sofreelastref(so, 0);
2039 		inp->inp_state = INPCB_STATE_DEAD;
2040 
2041 		/*
2042 		 * Enqueue an event to send kernel event notification
2043 		 * if the flow has to CLAT46 for data packets
2044 		 */
2045 		if (inp->inp_flags2 & INP2_CLAT46_FLOW) {
2046 			/*
2047 			 * If there has been any exchange of data bytes
2048 			 * over this flow.
2049 			 * Schedule a notification to report that flow is
2050 			 * using client side translation.
2051 			 */
2052 			if (inp->inp_stat != NULL &&
2053 			    (inp->inp_stat->txbytes != 0 ||
2054 			    inp->inp_stat->rxbytes != 0)) {
2055 				if (so->so_flags & SOF_DELEGATED) {
2056 					in6_clat46_event_enqueue_nwk_wq_entry(
2057 						IN6_CLAT46_EVENT_V4_FLOW,
2058 						so->e_pid,
2059 						so->e_uuid);
2060 				} else {
2061 					in6_clat46_event_enqueue_nwk_wq_entry(
2062 						IN6_CLAT46_EVENT_V4_FLOW,
2063 						so->last_pid,
2064 						so->last_uuid);
2065 				}
2066 			}
2067 		}
2068 
2069 		/* makes sure we're not called twice from so_close */
2070 		so->so_flags |= SOF_PCBCLEARING;
2071 
2072 		inpcb_gc_sched(inp->inp_pcbinfo, INPCB_TIMER_FAST);
2073 	}
2074 }
2075 
2076 
2077 void
in_pcbdispose(struct inpcb * inp)2078 in_pcbdispose(struct inpcb *inp)
2079 {
2080 	struct socket *so = inp->inp_socket;
2081 	struct inpcbinfo *ipi = inp->inp_pcbinfo;
2082 
2083 	if (so != NULL && so->so_usecount != 0) {
2084 		panic("%s: so %p [%d,%d] usecount %d lockhistory %s",
2085 		    __func__, so, SOCK_DOM(so), SOCK_TYPE(so), so->so_usecount,
2086 		    solockhistory_nr(so));
2087 		/* NOTREACHED */
2088 	} else if (inp->inp_wantcnt != WNT_STOPUSING) {
2089 		if (so != NULL) {
2090 			panic_plain("%s: inp %p invalid wantcnt %d, so %p "
2091 			    "[%d,%d] usecount %d retaincnt %d state 0x%x "
2092 			    "flags 0x%x lockhistory %s\n", __func__, inp,
2093 			    inp->inp_wantcnt, so, SOCK_DOM(so), SOCK_TYPE(so),
2094 			    so->so_usecount, so->so_retaincnt, so->so_state,
2095 			    so->so_flags, solockhistory_nr(so));
2096 			/* NOTREACHED */
2097 		} else {
2098 			panic("%s: inp %p invalid wantcnt %d no socket",
2099 			    __func__, inp, inp->inp_wantcnt);
2100 			/* NOTREACHED */
2101 		}
2102 	}
2103 
2104 	LCK_RW_ASSERT(&ipi->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
2105 
2106 	inp->inp_gencnt = ++ipi->ipi_gencnt;
2107 	/* access ipi in in_pcbremlists */
2108 	in_pcbremlists(inp);
2109 
2110 	if (so != NULL) {
2111 		if (so->so_proto->pr_flags & PR_PCBLOCK) {
2112 			sofreelastref(so, 0);
2113 			if (so->so_rcv.sb_cc > 0 || so->so_snd.sb_cc > 0) {
2114 				/*
2115 				 * selthreadclear() already called
2116 				 * during sofreelastref() above.
2117 				 */
2118 				sbrelease(&so->so_rcv);
2119 				sbrelease(&so->so_snd);
2120 			}
2121 			if (so->so_head != NULL) {
2122 				panic("%s: so=%p head still exist",
2123 				    __func__, so);
2124 				/* NOTREACHED */
2125 			}
2126 			lck_mtx_unlock(&inp->inpcb_mtx);
2127 
2128 #if NECP
2129 			necp_inpcb_remove_cb(inp);
2130 #endif /* NECP */
2131 
2132 			lck_mtx_destroy(&inp->inpcb_mtx, ipi->ipi_lock_grp);
2133 		}
2134 		/* makes sure we're not called twice from so_close */
2135 		so->so_flags |= SOF_PCBCLEARING;
2136 		so->so_saved_pcb = (caddr_t)inp;
2137 		so->so_pcb = NULL;
2138 		inp->inp_socket = NULL;
2139 #if NECP
2140 		necp_inpcb_dispose(inp);
2141 #endif /* NECP */
2142 		/*
2143 		 * In case there a route cached after a detach (possible
2144 		 * in the tcp case), make sure that it is freed before
2145 		 * we deallocate the structure.
2146 		 */
2147 		ROUTE_RELEASE(&inp->inp_route);
2148 		if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
2149 			zfree(ipi->ipi_zone, inp);
2150 		}
2151 		sodealloc(so);
2152 	}
2153 }
2154 
2155 /*
2156  * The calling convention of in_getsockaddr() and in_getpeeraddr() was
2157  * modified to match the pru_sockaddr() and pru_peeraddr() entry points
2158  * in struct pr_usrreqs, so that protocols can just reference then directly
2159  * without the need for a wrapper function.
2160  */
2161 int
in_getsockaddr(struct socket * so,struct sockaddr ** nam)2162 in_getsockaddr(struct socket *so, struct sockaddr **nam)
2163 {
2164 	struct inpcb *inp;
2165 	struct sockaddr_in *sin;
2166 
2167 	/*
2168 	 * Do the malloc first in case it blocks.
2169 	 */
2170 	sin = SIN(alloc_sockaddr(sizeof(*sin),
2171 	    Z_WAITOK | Z_NOFAIL));
2172 
2173 	sin->sin_family = AF_INET;
2174 
2175 	if ((inp = sotoinpcb(so)) == NULL) {
2176 		free_sockaddr(sin);
2177 		return EINVAL;
2178 	}
2179 	sin->sin_port = inp->inp_lport;
2180 	sin->sin_addr = inp->inp_laddr;
2181 
2182 	*nam = SA(sin);
2183 	return 0;
2184 }
2185 
2186 int
in_getsockaddr_s(struct socket * so,struct sockaddr_in * ss)2187 in_getsockaddr_s(struct socket *so, struct sockaddr_in *ss)
2188 {
2189 	struct sockaddr_in *sin = ss;
2190 	struct inpcb *inp;
2191 
2192 	VERIFY(ss != NULL);
2193 	SOCKADDR_ZERO(ss, sizeof(*ss));
2194 
2195 	sin->sin_family = AF_INET;
2196 	sin->sin_len = sizeof(*sin);
2197 
2198 	if ((inp = sotoinpcb(so)) == NULL) {
2199 		return EINVAL;
2200 	}
2201 
2202 	sin->sin_port = inp->inp_lport;
2203 	sin->sin_addr = inp->inp_laddr;
2204 	return 0;
2205 }
2206 
2207 int
in_getpeeraddr(struct socket * so,struct sockaddr ** nam)2208 in_getpeeraddr(struct socket *so, struct sockaddr **nam)
2209 {
2210 	struct inpcb *inp;
2211 	struct sockaddr_in *sin;
2212 
2213 	/*
2214 	 * Do the malloc first in case it blocks.
2215 	 */
2216 	sin = SIN(alloc_sockaddr(sizeof(*sin),
2217 	    Z_WAITOK | Z_NOFAIL));
2218 
2219 	sin->sin_family = AF_INET;
2220 
2221 	if ((inp = sotoinpcb(so)) == NULL) {
2222 		free_sockaddr(sin);
2223 		return EINVAL;
2224 	}
2225 	sin->sin_port = inp->inp_fport;
2226 	sin->sin_addr = inp->inp_faddr;
2227 
2228 	*nam = SA(sin);
2229 	return 0;
2230 }
2231 
2232 void
in_pcbnotifyall(struct inpcbinfo * pcbinfo,struct in_addr faddr,int errno,void (* notify)(struct inpcb *,int))2233 in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2234     int errno, void (*notify)(struct inpcb *, int))
2235 {
2236 	struct inpcb *inp;
2237 
2238 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
2239 
2240 	LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
2241 		if (!(inp->inp_vflag & INP_IPV4)) {
2242 			continue;
2243 		}
2244 		if (inp->inp_faddr.s_addr != faddr.s_addr ||
2245 		    inp->inp_socket == NULL) {
2246 			continue;
2247 		}
2248 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2249 			continue;
2250 		}
2251 		socket_lock(inp->inp_socket, 1);
2252 		(*notify)(inp, errno);
2253 		(void) in_pcb_checkstate(inp, WNT_RELEASE, 1);
2254 		socket_unlock(inp->inp_socket, 1);
2255 	}
2256 	lck_rw_done(&pcbinfo->ipi_lock);
2257 }
2258 
2259 /*
2260  * Check for alternatives when higher level complains
2261  * about service problems.  For now, invalidate cached
2262  * routing information.  If the route was created dynamically
2263  * (by a redirect), time to try a default gateway again.
2264  */
2265 void
in_losing(struct inpcb * inp)2266 in_losing(struct inpcb *inp)
2267 {
2268 	boolean_t release = FALSE;
2269 	struct rtentry *rt;
2270 
2271 	if ((rt = inp->inp_route.ro_rt) != NULL) {
2272 		struct in_ifaddr *ia = NULL;
2273 
2274 		RT_LOCK(rt);
2275 		if (rt->rt_flags & RTF_DYNAMIC) {
2276 			/*
2277 			 * Prevent another thread from modifying rt_key,
2278 			 * rt_gateway via rt_setgate() after rt_lock is
2279 			 * dropped by marking the route as defunct.
2280 			 */
2281 			rt->rt_flags |= RTF_CONDEMNED;
2282 			RT_UNLOCK(rt);
2283 			(void) rtrequest(RTM_DELETE, rt_key(rt),
2284 			    rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
2285 		} else {
2286 			RT_UNLOCK(rt);
2287 		}
2288 		/* if the address is gone keep the old route in the pcb */
2289 		if (inp->inp_laddr.s_addr != INADDR_ANY &&
2290 		    (ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
2291 			/*
2292 			 * Address is around; ditch the route.  A new route
2293 			 * can be allocated the next time output is attempted.
2294 			 */
2295 			release = TRUE;
2296 		}
2297 		if (ia != NULL) {
2298 			ifa_remref(&ia->ia_ifa);
2299 		}
2300 	}
2301 	if (rt == NULL || release) {
2302 		ROUTE_RELEASE(&inp->inp_route);
2303 	}
2304 }
2305 
2306 /*
2307  * After a routing change, flush old routing
2308  * and allocate a (hopefully) better one.
2309  */
2310 void
in_rtchange(struct inpcb * inp,int errno)2311 in_rtchange(struct inpcb *inp, int errno)
2312 {
2313 #pragma unused(errno)
2314 	boolean_t release = FALSE;
2315 	struct rtentry *rt;
2316 
2317 	if ((rt = inp->inp_route.ro_rt) != NULL) {
2318 		struct in_ifaddr *ia = NULL;
2319 
2320 		/* if address is gone, keep the old route */
2321 		if (inp->inp_laddr.s_addr != INADDR_ANY &&
2322 		    (ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
2323 			/*
2324 			 * Address is around; ditch the route.  A new route
2325 			 * can be allocated the next time output is attempted.
2326 			 */
2327 			release = TRUE;
2328 		}
2329 		if (ia != NULL) {
2330 			ifa_remref(&ia->ia_ifa);
2331 		}
2332 	}
2333 	if (rt == NULL || release) {
2334 		ROUTE_RELEASE(&inp->inp_route);
2335 	}
2336 }
2337 
2338 /*
2339  * Lookup a PCB based on the local address and port.
2340  */
2341 struct inpcb *
in_pcblookup_local(struct inpcbinfo * pcbinfo,struct in_addr laddr,unsigned int lport_arg,int wild_okay)2342 in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr,
2343     unsigned int lport_arg, int wild_okay)
2344 {
2345 	struct inpcb *inp;
2346 	int matchwild = 3, wildcard;
2347 	u_short lport = (u_short)lport_arg;
2348 
2349 	KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_START, 0, 0, 0, 0, 0);
2350 
2351 	if (!wild_okay) {
2352 		struct inpcbhead *head;
2353 		/*
2354 		 * Look for an unconnected (wildcard foreign addr) PCB that
2355 		 * matches the local address and port we're looking for.
2356 		 */
2357 		head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2358 		    pcbinfo->ipi_hashmask)];
2359 		LIST_FOREACH(inp, head, inp_hash) {
2360 			if (!(inp->inp_vflag & INP_IPV4)) {
2361 				continue;
2362 			}
2363 			if (inp->inp_faddr.s_addr == INADDR_ANY &&
2364 			    inp->inp_laddr.s_addr == laddr.s_addr &&
2365 			    inp->inp_lport == lport) {
2366 				/*
2367 				 * Found.
2368 				 */
2369 				return inp;
2370 			}
2371 		}
2372 		/*
2373 		 * Not found.
2374 		 */
2375 		KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, 0, 0, 0, 0, 0);
2376 		return NULL;
2377 	} else {
2378 		struct inpcbporthead *porthash;
2379 		struct inpcbport *phd;
2380 		struct inpcb *match = NULL;
2381 		/*
2382 		 * Best fit PCB lookup.
2383 		 *
2384 		 * First see if this local port is in use by looking on the
2385 		 * port hash list.
2386 		 */
2387 		porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
2388 		    pcbinfo->ipi_porthashmask)];
2389 		LIST_FOREACH(phd, porthash, phd_hash) {
2390 			if (phd->phd_port == lport) {
2391 				break;
2392 			}
2393 		}
2394 		if (phd != NULL) {
2395 			/*
2396 			 * Port is in use by one or more PCBs. Look for best
2397 			 * fit.
2398 			 */
2399 			LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
2400 				wildcard = 0;
2401 				if (!(inp->inp_vflag & INP_IPV4)) {
2402 					continue;
2403 				}
2404 				if (inp->inp_faddr.s_addr != INADDR_ANY) {
2405 					wildcard++;
2406 				}
2407 				if (inp->inp_laddr.s_addr != INADDR_ANY) {
2408 					if (laddr.s_addr == INADDR_ANY) {
2409 						wildcard++;
2410 					} else if (inp->inp_laddr.s_addr !=
2411 					    laddr.s_addr) {
2412 						continue;
2413 					}
2414 				} else {
2415 					if (laddr.s_addr != INADDR_ANY) {
2416 						wildcard++;
2417 					}
2418 				}
2419 				if (wildcard < matchwild) {
2420 					match = inp;
2421 					matchwild = wildcard;
2422 					if (matchwild == 0) {
2423 						break;
2424 					}
2425 				}
2426 			}
2427 		}
2428 		KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, match,
2429 		    0, 0, 0, 0);
2430 		return match;
2431 	}
2432 }
2433 
2434 /*
2435  * Check if PCB exists in hash list.
2436  */
2437 int
in_pcblookup_hash_exists(struct inpcbinfo * pcbinfo,struct in_addr faddr,u_int fport_arg,struct in_addr laddr,u_int lport_arg,int wildcard,uid_t * uid,gid_t * gid,struct ifnet * ifp)2438 in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2439     u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
2440     uid_t *uid, gid_t *gid, struct ifnet *ifp)
2441 {
2442 	struct inpcbhead *head;
2443 	struct inpcb *inp;
2444 	u_short fport = (u_short)fport_arg, lport = (u_short)lport_arg;
2445 	int found = 0;
2446 	struct inpcb *local_wild = NULL;
2447 	struct inpcb *local_wild_mapped = NULL;
2448 
2449 	*uid = UID_MAX;
2450 	*gid = GID_MAX;
2451 
2452 	/*
2453 	 * We may have found the pcb in the last lookup - check this first.
2454 	 */
2455 
2456 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
2457 
2458 	/*
2459 	 * First look for an exact match.
2460 	 */
2461 	head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
2462 	    pcbinfo->ipi_hashmask)];
2463 	LIST_FOREACH(inp, head, inp_hash) {
2464 		if (!(inp->inp_vflag & INP_IPV4)) {
2465 			continue;
2466 		}
2467 		if (inp_restricted_recv(inp, ifp)) {
2468 			continue;
2469 		}
2470 
2471 #if NECP
2472 		if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2473 			continue;
2474 		}
2475 #endif /* NECP */
2476 
2477 		if (inp->inp_faddr.s_addr == faddr.s_addr &&
2478 		    inp->inp_laddr.s_addr == laddr.s_addr &&
2479 		    inp->inp_fport == fport &&
2480 		    inp->inp_lport == lport) {
2481 			if ((found = (inp->inp_socket != NULL))) {
2482 				/*
2483 				 * Found.
2484 				 */
2485 				*uid = kauth_cred_getuid(
2486 					inp->inp_socket->so_cred);
2487 				*gid = kauth_cred_getgid(
2488 					inp->inp_socket->so_cred);
2489 			}
2490 			lck_rw_done(&pcbinfo->ipi_lock);
2491 			return found;
2492 		}
2493 	}
2494 
2495 	if (!wildcard) {
2496 		/*
2497 		 * Not found.
2498 		 */
2499 		lck_rw_done(&pcbinfo->ipi_lock);
2500 		return 0;
2501 	}
2502 
2503 	head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2504 	    pcbinfo->ipi_hashmask)];
2505 	LIST_FOREACH(inp, head, inp_hash) {
2506 		if (!(inp->inp_vflag & INP_IPV4)) {
2507 			continue;
2508 		}
2509 		if (inp_restricted_recv(inp, ifp)) {
2510 			continue;
2511 		}
2512 
2513 #if NECP
2514 		if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2515 			continue;
2516 		}
2517 #endif /* NECP */
2518 
2519 		if (inp->inp_faddr.s_addr == INADDR_ANY &&
2520 		    inp->inp_lport == lport) {
2521 			if (inp->inp_laddr.s_addr == laddr.s_addr) {
2522 				if ((found = (inp->inp_socket != NULL))) {
2523 					*uid = kauth_cred_getuid(
2524 						inp->inp_socket->so_cred);
2525 					*gid = kauth_cred_getgid(
2526 						inp->inp_socket->so_cred);
2527 				}
2528 				lck_rw_done(&pcbinfo->ipi_lock);
2529 				return found;
2530 			} else if (inp->inp_laddr.s_addr == INADDR_ANY) {
2531 				if (inp->inp_socket &&
2532 				    SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) {
2533 					local_wild_mapped = inp;
2534 				} else {
2535 					local_wild = inp;
2536 				}
2537 			}
2538 		}
2539 	}
2540 	if (local_wild == NULL) {
2541 		if (local_wild_mapped != NULL) {
2542 			if ((found = (local_wild_mapped->inp_socket != NULL))) {
2543 				*uid = kauth_cred_getuid(
2544 					local_wild_mapped->inp_socket->so_cred);
2545 				*gid = kauth_cred_getgid(
2546 					local_wild_mapped->inp_socket->so_cred);
2547 			}
2548 			lck_rw_done(&pcbinfo->ipi_lock);
2549 			return found;
2550 		}
2551 		lck_rw_done(&pcbinfo->ipi_lock);
2552 		return 0;
2553 	}
2554 	if ((found = (local_wild->inp_socket != NULL))) {
2555 		*uid = kauth_cred_getuid(
2556 			local_wild->inp_socket->so_cred);
2557 		*gid = kauth_cred_getgid(
2558 			local_wild->inp_socket->so_cred);
2559 	}
2560 	lck_rw_done(&pcbinfo->ipi_lock);
2561 	return found;
2562 }
2563 
2564 /*
2565  * Lookup PCB in hash list.
2566  */
2567 struct inpcb *
in_pcblookup_hash(struct inpcbinfo * pcbinfo,struct in_addr faddr,u_int fport_arg,struct in_addr laddr,u_int lport_arg,int wildcard,struct ifnet * ifp)2568 in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2569     u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
2570     struct ifnet *ifp)
2571 {
2572 	struct inpcbhead *head;
2573 	struct inpcb *inp;
2574 	u_short fport = (u_short)fport_arg, lport = (u_short)lport_arg;
2575 	struct inpcb *local_wild = NULL;
2576 	struct inpcb *local_wild_mapped = NULL;
2577 
2578 	/*
2579 	 * We may have found the pcb in the last lookup - check this first.
2580 	 */
2581 
2582 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
2583 
2584 	/*
2585 	 * First look for an exact match.
2586 	 */
2587 	head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
2588 	    pcbinfo->ipi_hashmask)];
2589 	LIST_FOREACH(inp, head, inp_hash) {
2590 		if (!(inp->inp_vflag & INP_IPV4)) {
2591 			continue;
2592 		}
2593 		if (inp_restricted_recv(inp, ifp)) {
2594 			continue;
2595 		}
2596 
2597 #if NECP
2598 		if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2599 			continue;
2600 		}
2601 #endif /* NECP */
2602 
2603 		if (inp->inp_faddr.s_addr == faddr.s_addr &&
2604 		    inp->inp_laddr.s_addr == laddr.s_addr &&
2605 		    inp->inp_fport == fport &&
2606 		    inp->inp_lport == lport) {
2607 			/*
2608 			 * Found.
2609 			 */
2610 			if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2611 			    WNT_STOPUSING) {
2612 				lck_rw_done(&pcbinfo->ipi_lock);
2613 				return inp;
2614 			} else {
2615 				/* it's there but dead, say it isn't found */
2616 				lck_rw_done(&pcbinfo->ipi_lock);
2617 				return NULL;
2618 			}
2619 		}
2620 	}
2621 
2622 	if (!wildcard) {
2623 		/*
2624 		 * Not found.
2625 		 */
2626 		lck_rw_done(&pcbinfo->ipi_lock);
2627 		return NULL;
2628 	}
2629 
2630 	head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2631 	    pcbinfo->ipi_hashmask)];
2632 	LIST_FOREACH(inp, head, inp_hash) {
2633 		if (!(inp->inp_vflag & INP_IPV4)) {
2634 			continue;
2635 		}
2636 		if (inp_restricted_recv(inp, ifp)) {
2637 			continue;
2638 		}
2639 
2640 #if NECP
2641 		if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2642 			continue;
2643 		}
2644 #endif /* NECP */
2645 
2646 		if (inp->inp_faddr.s_addr == INADDR_ANY &&
2647 		    inp->inp_lport == lport) {
2648 			if (inp->inp_laddr.s_addr == laddr.s_addr) {
2649 				if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2650 				    WNT_STOPUSING) {
2651 					lck_rw_done(&pcbinfo->ipi_lock);
2652 					return inp;
2653 				} else {
2654 					/* it's dead; say it isn't found */
2655 					lck_rw_done(&pcbinfo->ipi_lock);
2656 					return NULL;
2657 				}
2658 			} else if (inp->inp_laddr.s_addr == INADDR_ANY) {
2659 				if (SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) {
2660 					local_wild_mapped = inp;
2661 				} else {
2662 					local_wild = inp;
2663 				}
2664 			}
2665 		}
2666 	}
2667 	if (local_wild == NULL) {
2668 		if (local_wild_mapped != NULL) {
2669 			if (in_pcb_checkstate(local_wild_mapped,
2670 			    WNT_ACQUIRE, 0) != WNT_STOPUSING) {
2671 				lck_rw_done(&pcbinfo->ipi_lock);
2672 				return local_wild_mapped;
2673 			} else {
2674 				/* it's dead; say it isn't found */
2675 				lck_rw_done(&pcbinfo->ipi_lock);
2676 				return NULL;
2677 			}
2678 		}
2679 		lck_rw_done(&pcbinfo->ipi_lock);
2680 		return NULL;
2681 	}
2682 	if (in_pcb_checkstate(local_wild, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
2683 		lck_rw_done(&pcbinfo->ipi_lock);
2684 		return local_wild;
2685 	}
2686 	/*
2687 	 * It's either not found or is already dead.
2688 	 */
2689 	lck_rw_done(&pcbinfo->ipi_lock);
2690 	return NULL;
2691 }
2692 
2693 /*
2694  * @brief	Insert PCB onto various hash lists.
2695  *
2696  * @param	inp Pointer to internet protocol control block
2697  * @param	locked	Implies if ipi_lock (protecting pcb list)
2698  *              is already locked or not.
2699  *
2700  * @return	int error on failure and 0 on success
2701  */
2702 int
in_pcbinshash(struct inpcb * inp,int locked)2703 in_pcbinshash(struct inpcb *inp, int locked)
2704 {
2705 	struct inpcbhead *pcbhash;
2706 	struct inpcbporthead *pcbporthash;
2707 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2708 	struct inpcbport *phd;
2709 	u_int32_t hashkey_faddr;
2710 
2711 	if (!locked) {
2712 		if (!lck_rw_try_lock_exclusive(&pcbinfo->ipi_lock)) {
2713 			/*
2714 			 * Lock inversion issue, mostly with udp
2715 			 * multicast packets
2716 			 */
2717 			socket_unlock(inp->inp_socket, 0);
2718 			lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
2719 			socket_lock(inp->inp_socket, 0);
2720 		}
2721 	}
2722 
2723 	/*
2724 	 * This routine or its caller may have given up
2725 	 * socket's protocol lock briefly.
2726 	 * During that time the socket may have been dropped.
2727 	 * Safe-guarding against that.
2728 	 */
2729 	if (inp->inp_state == INPCB_STATE_DEAD) {
2730 		if (!locked) {
2731 			lck_rw_done(&pcbinfo->ipi_lock);
2732 		}
2733 		return ECONNABORTED;
2734 	}
2735 
2736 
2737 	if (inp->inp_vflag & INP_IPV6) {
2738 		hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
2739 	} else {
2740 		hashkey_faddr = inp->inp_faddr.s_addr;
2741 	}
2742 
2743 	inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
2744 	    inp->inp_fport, pcbinfo->ipi_hashmask);
2745 
2746 	pcbhash = &pcbinfo->ipi_hashbase[inp->inp_hash_element];
2747 
2748 	pcbporthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(inp->inp_lport,
2749 	    pcbinfo->ipi_porthashmask)];
2750 
2751 	/*
2752 	 * Go through port list and look for a head for this lport.
2753 	 */
2754 	LIST_FOREACH(phd, pcbporthash, phd_hash) {
2755 		if (phd->phd_port == inp->inp_lport) {
2756 			break;
2757 		}
2758 	}
2759 
2760 	/*
2761 	 * If none exists, malloc one and tack it on.
2762 	 */
2763 	if (phd == NULL) {
2764 		phd = kalloc_type(struct inpcbport, Z_WAITOK | Z_NOFAIL);
2765 		phd->phd_port = inp->inp_lport;
2766 		LIST_INIT(&phd->phd_pcblist);
2767 		LIST_INSERT_HEAD(pcbporthash, phd, phd_hash);
2768 	}
2769 
2770 	VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2771 
2772 #if SKYWALK
2773 	int err;
2774 	struct socket *so = inp->inp_socket;
2775 	if ((SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP) &&
2776 	    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
2777 		if (inp->inp_vflag & INP_IPV6) {
2778 			err = netns_reserve_in6(&inp->inp_netns_token,
2779 			    inp->in6p_laddr, (uint8_t)SOCK_PROTO(so), inp->inp_lport,
2780 			    NETNS_BSD | NETNS_PRERESERVED, NULL);
2781 		} else {
2782 			err = netns_reserve_in(&inp->inp_netns_token,
2783 			    inp->inp_laddr, (uint8_t)SOCK_PROTO(so), inp->inp_lport,
2784 			    NETNS_BSD | NETNS_PRERESERVED, NULL);
2785 		}
2786 		if (err) {
2787 			if (!locked) {
2788 				lck_rw_done(&pcbinfo->ipi_lock);
2789 			}
2790 			return err;
2791 		}
2792 		netns_set_ifnet(&inp->inp_netns_token, inp->inp_last_outifp);
2793 		inp_update_netns_flags(so);
2794 	}
2795 #endif /* SKYWALK */
2796 
2797 	inp->inp_phd = phd;
2798 	LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
2799 	LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
2800 	inp->inp_flags2 |= INP2_INHASHLIST;
2801 
2802 	if (!locked) {
2803 		lck_rw_done(&pcbinfo->ipi_lock);
2804 	}
2805 
2806 #if NECP
2807 	// This call catches the original setting of the local address
2808 	inp_update_necp_policy(inp, NULL, NULL, 0);
2809 #endif /* NECP */
2810 
2811 	return 0;
2812 }
2813 
2814 /*
2815  * Move PCB to the proper hash bucket when { faddr, fport } have  been
2816  * changed. NOTE: This does not handle the case of the lport changing (the
2817  * hashed port list would have to be updated as well), so the lport must
2818  * not change after in_pcbinshash() has been called.
2819  */
2820 void
in_pcbrehash(struct inpcb * inp)2821 in_pcbrehash(struct inpcb *inp)
2822 {
2823 	struct inpcbhead *head;
2824 	u_int32_t hashkey_faddr;
2825 
2826 #if SKYWALK
2827 	struct socket *so = inp->inp_socket;
2828 	if ((SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP) &&
2829 	    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
2830 		int err;
2831 		if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
2832 			if (inp->inp_vflag & INP_IPV6) {
2833 				err = netns_change_addr_in6(
2834 					&inp->inp_netns_token, inp->in6p_laddr);
2835 			} else {
2836 				err = netns_change_addr_in(
2837 					&inp->inp_netns_token, inp->inp_laddr);
2838 			}
2839 		} else {
2840 			if (inp->inp_vflag & INP_IPV6) {
2841 				err = netns_reserve_in6(&inp->inp_netns_token,
2842 				    inp->in6p_laddr, (uint8_t)SOCK_PROTO(so),
2843 				    inp->inp_lport, NETNS_BSD, NULL);
2844 			} else {
2845 				err = netns_reserve_in(&inp->inp_netns_token,
2846 				    inp->inp_laddr, (uint8_t)SOCK_PROTO(so),
2847 				    inp->inp_lport, NETNS_BSD, NULL);
2848 			}
2849 		}
2850 		/* We are assuming that whatever code paths result in a rehash
2851 		 * did their due diligence and ensured that the given
2852 		 * <proto, laddr, lport> tuple was free ahead of time. Just
2853 		 * reserving the lport on INADDR_ANY should be enough, since
2854 		 * that will block Skywalk from trying to reserve that same
2855 		 * port. Given this assumption, the above netns calls should
2856 		 * never fail*/
2857 		VERIFY(err == 0);
2858 
2859 		netns_set_ifnet(&inp->inp_netns_token, inp->inp_last_outifp);
2860 		inp_update_netns_flags(so);
2861 	}
2862 #endif /* SKYWALK */
2863 	if (inp->inp_vflag & INP_IPV6) {
2864 		hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
2865 	} else {
2866 		hashkey_faddr = inp->inp_faddr.s_addr;
2867 	}
2868 
2869 	inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
2870 	    inp->inp_fport, inp->inp_pcbinfo->ipi_hashmask);
2871 	head = &inp->inp_pcbinfo->ipi_hashbase[inp->inp_hash_element];
2872 
2873 	if (inp->inp_flags2 & INP2_INHASHLIST) {
2874 		LIST_REMOVE(inp, inp_hash);
2875 		inp->inp_flags2 &= ~INP2_INHASHLIST;
2876 	}
2877 
2878 	VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2879 	LIST_INSERT_HEAD(head, inp, inp_hash);
2880 	inp->inp_flags2 |= INP2_INHASHLIST;
2881 
2882 #if NECP
2883 	// This call catches updates to the remote addresses
2884 	inp_update_necp_policy(inp, NULL, NULL, 0);
2885 #endif /* NECP */
2886 }
2887 
2888 /*
2889  * Remove PCB from various lists.
2890  * Must be called pcbinfo lock is held in exclusive mode.
2891  */
2892 void
in_pcbremlists(struct inpcb * inp)2893 in_pcbremlists(struct inpcb *inp)
2894 {
2895 	inp->inp_gencnt = ++inp->inp_pcbinfo->ipi_gencnt;
2896 
2897 	/*
2898 	 * Check if it's in hashlist -- an inp is placed in hashlist when
2899 	 * it's local port gets assigned. So it should also be present
2900 	 * in the port list.
2901 	 */
2902 	if (inp->inp_flags2 & INP2_INHASHLIST) {
2903 		struct inpcbport *phd = inp->inp_phd;
2904 
2905 		VERIFY(phd != NULL && inp->inp_lport > 0);
2906 
2907 		LIST_REMOVE(inp, inp_hash);
2908 		inp->inp_hash.le_next = NULL;
2909 		inp->inp_hash.le_prev = NULL;
2910 
2911 		LIST_REMOVE(inp, inp_portlist);
2912 		inp->inp_portlist.le_next = NULL;
2913 		inp->inp_portlist.le_prev = NULL;
2914 		if (LIST_EMPTY(&phd->phd_pcblist)) {
2915 			LIST_REMOVE(phd, phd_hash);
2916 			kfree_type(struct inpcbport, phd);
2917 		}
2918 		inp->inp_phd = NULL;
2919 		inp->inp_flags2 &= ~INP2_INHASHLIST;
2920 #if SKYWALK
2921 		/* Free up the port in the namespace registrar */
2922 		netns_release(&inp->inp_netns_token);
2923 		netns_release(&inp->inp_wildcard_netns_token);
2924 #endif /* SKYWALK */
2925 	}
2926 	VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2927 
2928 	if (inp->inp_flags2 & INP2_TIMEWAIT) {
2929 		/* Remove from time-wait queue */
2930 		tcp_remove_from_time_wait(inp);
2931 		inp->inp_flags2 &= ~INP2_TIMEWAIT;
2932 		VERIFY(inp->inp_pcbinfo->ipi_twcount != 0);
2933 		inp->inp_pcbinfo->ipi_twcount--;
2934 	} else {
2935 		/* Remove from global inp list if it is not time-wait */
2936 		LIST_REMOVE(inp, inp_list);
2937 	}
2938 
2939 	if (inp->inp_flags2 & INP2_IN_FCTREE) {
2940 		inp_fc_getinp(inp->inp_flowhash, (INPFC_SOLOCKED | INPFC_REMOVE));
2941 		VERIFY(!(inp->inp_flags2 & INP2_IN_FCTREE));
2942 	}
2943 
2944 	inp->inp_pcbinfo->ipi_count--;
2945 }
2946 
2947 /*
2948  * Mechanism used to defer the memory release of PCBs
2949  * The pcb list will contain the pcb until the reaper can clean it up if
2950  * the following conditions are met:
2951  *	1) state "DEAD",
2952  *	2) wantcnt is STOPUSING
2953  *	3) usecount is 0
2954  * This function will be called to either mark the pcb as
2955  */
2956 int
in_pcb_checkstate(struct inpcb * pcb,int mode,int locked)2957 in_pcb_checkstate(struct inpcb *pcb, int mode, int locked)
2958 {
2959 	volatile UInt32 *wantcnt = (volatile UInt32 *)&pcb->inp_wantcnt;
2960 	UInt32 origwant;
2961 	UInt32 newwant;
2962 
2963 	switch (mode) {
2964 	case WNT_STOPUSING:
2965 		/*
2966 		 * Try to mark the pcb as ready for recycling.  CAS with
2967 		 * STOPUSING, if success we're good, if it's in use, will
2968 		 * be marked later
2969 		 */
2970 		if (locked == 0) {
2971 			socket_lock(pcb->inp_socket, 1);
2972 		}
2973 		pcb->inp_state = INPCB_STATE_DEAD;
2974 
2975 stopusing:
2976 		if (pcb->inp_socket->so_usecount < 0) {
2977 			panic("%s: pcb=%p so=%p usecount is negative",
2978 			    __func__, pcb, pcb->inp_socket);
2979 			/* NOTREACHED */
2980 		}
2981 		if (locked == 0) {
2982 			socket_unlock(pcb->inp_socket, 1);
2983 		}
2984 
2985 		inpcb_gc_sched(pcb->inp_pcbinfo, INPCB_TIMER_FAST);
2986 
2987 		origwant = *wantcnt;
2988 		if ((UInt16) origwant == 0xffff) { /* should stop using */
2989 			return WNT_STOPUSING;
2990 		}
2991 		newwant = 0xffff;
2992 		if ((UInt16) origwant == 0) {
2993 			/* try to mark it as unsuable now */
2994 			OSCompareAndSwap(origwant, newwant, wantcnt);
2995 		}
2996 		return WNT_STOPUSING;
2997 
2998 	case WNT_ACQUIRE:
2999 		/*
3000 		 * Try to increase reference to pcb.  If WNT_STOPUSING
3001 		 * should bail out.  If socket state DEAD, try to set count
3002 		 * to STOPUSING, return failed otherwise increase cnt.
3003 		 */
3004 		do {
3005 			origwant = *wantcnt;
3006 			if ((UInt16) origwant == 0xffff) {
3007 				/* should stop using */
3008 				return WNT_STOPUSING;
3009 			}
3010 			newwant = origwant + 1;
3011 		} while (!OSCompareAndSwap(origwant, newwant, wantcnt));
3012 		return WNT_ACQUIRE;
3013 
3014 	case WNT_RELEASE:
3015 		/*
3016 		 * Release reference.  If result is null and pcb state
3017 		 * is DEAD, set wanted bit to STOPUSING
3018 		 */
3019 		if (locked == 0) {
3020 			socket_lock(pcb->inp_socket, 1);
3021 		}
3022 
3023 		do {
3024 			origwant = *wantcnt;
3025 			if ((UInt16) origwant == 0x0) {
3026 				panic("%s: pcb=%p release with zero count",
3027 				    __func__, pcb);
3028 				/* NOTREACHED */
3029 			}
3030 			if ((UInt16) origwant == 0xffff) {
3031 				/* should stop using */
3032 				if (locked == 0) {
3033 					socket_unlock(pcb->inp_socket, 1);
3034 				}
3035 				return WNT_STOPUSING;
3036 			}
3037 			newwant = origwant - 1;
3038 		} while (!OSCompareAndSwap(origwant, newwant, wantcnt));
3039 
3040 		if (pcb->inp_state == INPCB_STATE_DEAD) {
3041 			goto stopusing;
3042 		}
3043 		if (pcb->inp_socket->so_usecount < 0) {
3044 			panic("%s: RELEASE pcb=%p so=%p usecount is negative",
3045 			    __func__, pcb, pcb->inp_socket);
3046 			/* NOTREACHED */
3047 		}
3048 
3049 		if (locked == 0) {
3050 			socket_unlock(pcb->inp_socket, 1);
3051 		}
3052 		return WNT_RELEASE;
3053 
3054 	default:
3055 		panic("%s: so=%p not a valid state =%x", __func__,
3056 		    pcb->inp_socket, mode);
3057 		/* NOTREACHED */
3058 	}
3059 
3060 	/* NOTREACHED */
3061 	return mode;
3062 }
3063 
3064 /*
3065  * inpcb_to_compat copies specific bits of an inpcb to a inpcb_compat.
3066  * The inpcb_compat data structure is passed to user space and must
3067  * not change. We intentionally avoid copying pointers.
3068  */
3069 void
inpcb_to_compat(struct inpcb * inp,struct inpcb_compat * inp_compat)3070 inpcb_to_compat(struct inpcb *inp, struct inpcb_compat *inp_compat)
3071 {
3072 	bzero(inp_compat, sizeof(*inp_compat));
3073 	inp_compat->inp_fport = inp->inp_fport;
3074 	inp_compat->inp_lport = inp->inp_lport;
3075 	inp_compat->nat_owner = 0;
3076 	inp_compat->nat_cookie = 0;
3077 	inp_compat->inp_gencnt = inp->inp_gencnt;
3078 	inp_compat->inp_flags = inp->inp_flags;
3079 	inp_compat->inp_flow = inp->inp_flow;
3080 	inp_compat->inp_vflag = inp->inp_vflag;
3081 	inp_compat->inp_ip_ttl = inp->inp_ip_ttl;
3082 	inp_compat->inp_ip_p = inp->inp_ip_p;
3083 	inp_compat->inp_dependfaddr.inp6_foreign =
3084 	    inp->inp_dependfaddr.inp6_foreign;
3085 	inp_compat->inp_dependladdr.inp6_local =
3086 	    inp->inp_dependladdr.inp6_local;
3087 	inp_compat->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
3088 	inp_compat->inp_depend6.inp6_hlim = 0;
3089 	inp_compat->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
3090 	inp_compat->inp_depend6.inp6_ifindex = 0;
3091 	inp_compat->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
3092 }
3093 
3094 #if XNU_TARGET_OS_OSX
3095 void
inpcb_to_xinpcb64(struct inpcb * inp,struct xinpcb64 * xinp)3096 inpcb_to_xinpcb64(struct inpcb *inp, struct xinpcb64 *xinp)
3097 {
3098 	xinp->inp_fport = inp->inp_fport;
3099 	xinp->inp_lport = inp->inp_lport;
3100 	xinp->inp_gencnt = inp->inp_gencnt;
3101 	xinp->inp_flags = inp->inp_flags;
3102 	xinp->inp_flow = inp->inp_flow;
3103 	xinp->inp_vflag = inp->inp_vflag;
3104 	xinp->inp_ip_ttl = inp->inp_ip_ttl;
3105 	xinp->inp_ip_p = inp->inp_ip_p;
3106 	xinp->inp_dependfaddr.inp6_foreign = inp->inp_dependfaddr.inp6_foreign;
3107 	xinp->inp_dependladdr.inp6_local = inp->inp_dependladdr.inp6_local;
3108 	xinp->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
3109 	xinp->inp_depend6.inp6_hlim = 0;
3110 	xinp->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
3111 	xinp->inp_depend6.inp6_ifindex = 0;
3112 	xinp->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
3113 }
3114 #endif /* XNU_TARGET_OS_OSX */
3115 
3116 /*
3117  * The following routines implement this scheme:
3118  *
3119  * Callers of ip_output() that intend to cache the route in the inpcb pass
3120  * a local copy of the struct route to ip_output().  Using a local copy of
3121  * the cached route significantly simplifies things as IP no longer has to
3122  * worry about having exclusive access to the passed in struct route, since
3123  * it's defined in the caller's stack; in essence, this allows for a lock-
3124  * less operation when updating the struct route at the IP level and below,
3125  * whenever necessary. The scheme works as follows:
3126  *
3127  * Prior to dropping the socket's lock and calling ip_output(), the caller
3128  * copies the struct route from the inpcb into its stack, and adds a reference
3129  * to the cached route entry, if there was any.  The socket's lock is then
3130  * dropped and ip_output() is called with a pointer to the copy of struct
3131  * route defined on the stack (not to the one in the inpcb.)
3132  *
3133  * Upon returning from ip_output(), the caller then acquires the socket's
3134  * lock and synchronizes the cache; if there is no route cached in the inpcb,
3135  * it copies the local copy of struct route (which may or may not contain any
3136  * route) back into the cache; otherwise, if the inpcb has a route cached in
3137  * it, the one in the local copy will be freed, if there's any.  Trashing the
3138  * cached route in the inpcb can be avoided because ip_output() is single-
3139  * threaded per-PCB (i.e. multiple transmits on a PCB are always serialized
3140  * by the socket/transport layer.)
3141  */
3142 void
inp_route_copyout(struct inpcb * inp,struct route * dst)3143 inp_route_copyout(struct inpcb *inp, struct route *dst)
3144 {
3145 	struct route *src = &inp->inp_route;
3146 
3147 	socket_lock_assert_owned(inp->inp_socket);
3148 
3149 	/*
3150 	 * If the route in the PCB is stale or not for IPv4, blow it away;
3151 	 * this is possible in the case of IPv4-mapped address case.
3152 	 */
3153 	if (ROUTE_UNUSABLE(src) || rt_key(src->ro_rt)->sa_family != AF_INET) {
3154 		ROUTE_RELEASE(src);
3155 	}
3156 
3157 	route_copyout(dst, src, sizeof(*dst));
3158 }
3159 
3160 void
inp_route_copyin(struct inpcb * inp,struct route * src)3161 inp_route_copyin(struct inpcb *inp, struct route *src)
3162 {
3163 	struct route *dst = &inp->inp_route;
3164 
3165 	socket_lock_assert_owned(inp->inp_socket);
3166 
3167 	/* Minor sanity check */
3168 	if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3169 		panic("%s: wrong or corrupted route: %p", __func__, src);
3170 	}
3171 
3172 	route_copyin(src, dst, sizeof(*src));
3173 }
3174 
3175 /*
3176  * Handler for setting IP_BOUND_IF/IPV6_BOUND_IF socket option.
3177  */
3178 int
inp_bindif(struct inpcb * inp,unsigned int ifscope,struct ifnet ** pifp)3179 inp_bindif(struct inpcb *inp, unsigned int ifscope, struct ifnet **pifp)
3180 {
3181 	struct ifnet *ifp = NULL;
3182 
3183 	ifnet_head_lock_shared();
3184 	if ((ifscope > (unsigned)if_index) || (ifscope != IFSCOPE_NONE &&
3185 	    (ifp = ifindex2ifnet[ifscope]) == NULL)) {
3186 		ifnet_head_done();
3187 		return ENXIO;
3188 	}
3189 	ifnet_head_done();
3190 
3191 	VERIFY(ifp != NULL || ifscope == IFSCOPE_NONE);
3192 
3193 	/*
3194 	 * A zero interface scope value indicates an "unbind".
3195 	 * Otherwise, take in whatever value the app desires;
3196 	 * the app may already know the scope (or force itself
3197 	 * to such a scope) ahead of time before the interface
3198 	 * gets attached.  It doesn't matter either way; any
3199 	 * route lookup from this point on will require an
3200 	 * exact match for the embedded interface scope.
3201 	 */
3202 	inp->inp_boundifp = ifp;
3203 	if (inp->inp_boundifp == NULL) {
3204 		inp->inp_flags &= ~INP_BOUND_IF;
3205 	} else {
3206 		inp->inp_flags |= INP_BOUND_IF;
3207 	}
3208 
3209 	/* Blow away any cached route in the PCB */
3210 	ROUTE_RELEASE(&inp->inp_route);
3211 
3212 	if (pifp != NULL) {
3213 		*pifp = ifp;
3214 	}
3215 
3216 	return 0;
3217 }
3218 
3219 /*
3220  * Handler for setting IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
3221  * as well as for setting PROC_UUID_NO_CELLULAR policy.
3222  */
3223 void
inp_set_nocellular(struct inpcb * inp)3224 inp_set_nocellular(struct inpcb *inp)
3225 {
3226 	inp->inp_flags |= INP_NO_IFT_CELLULAR;
3227 
3228 	/* Blow away any cached route in the PCB */
3229 	ROUTE_RELEASE(&inp->inp_route);
3230 }
3231 
3232 /*
3233  * Handler for clearing IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
3234  * as well as for clearing PROC_UUID_NO_CELLULAR policy.
3235  */
3236 void
inp_clear_nocellular(struct inpcb * inp)3237 inp_clear_nocellular(struct inpcb *inp)
3238 {
3239 	struct socket *so = inp->inp_socket;
3240 
3241 	/*
3242 	 * SO_RESTRICT_DENY_CELLULAR socket restriction issued on the socket
3243 	 * has a higher precendence than INP_NO_IFT_CELLULAR.  Clear the flag
3244 	 * if and only if the socket is unrestricted.
3245 	 */
3246 	if (so != NULL && !(so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
3247 		inp->inp_flags &= ~INP_NO_IFT_CELLULAR;
3248 
3249 		/* Blow away any cached route in the PCB */
3250 		ROUTE_RELEASE(&inp->inp_route);
3251 	}
3252 }
3253 
3254 void
inp_set_noexpensive(struct inpcb * inp)3255 inp_set_noexpensive(struct inpcb *inp)
3256 {
3257 	inp->inp_flags2 |= INP2_NO_IFF_EXPENSIVE;
3258 
3259 	/* Blow away any cached route in the PCB */
3260 	ROUTE_RELEASE(&inp->inp_route);
3261 }
3262 
3263 void
inp_set_noconstrained(struct inpcb * inp)3264 inp_set_noconstrained(struct inpcb *inp)
3265 {
3266 	inp->inp_flags2 |= INP2_NO_IFF_CONSTRAINED;
3267 
3268 	/* Blow away any cached route in the PCB */
3269 	ROUTE_RELEASE(&inp->inp_route);
3270 }
3271 
3272 void
inp_set_awdl_unrestricted(struct inpcb * inp)3273 inp_set_awdl_unrestricted(struct inpcb *inp)
3274 {
3275 	inp->inp_flags2 |= INP2_AWDL_UNRESTRICTED;
3276 
3277 	/* Blow away any cached route in the PCB */
3278 	ROUTE_RELEASE(&inp->inp_route);
3279 }
3280 
3281 boolean_t
inp_get_awdl_unrestricted(struct inpcb * inp)3282 inp_get_awdl_unrestricted(struct inpcb *inp)
3283 {
3284 	return (inp->inp_flags2 & INP2_AWDL_UNRESTRICTED) ? TRUE : FALSE;
3285 }
3286 
3287 void
inp_clear_awdl_unrestricted(struct inpcb * inp)3288 inp_clear_awdl_unrestricted(struct inpcb *inp)
3289 {
3290 	inp->inp_flags2 &= ~INP2_AWDL_UNRESTRICTED;
3291 
3292 	/* Blow away any cached route in the PCB */
3293 	ROUTE_RELEASE(&inp->inp_route);
3294 }
3295 
3296 void
inp_set_intcoproc_allowed(struct inpcb * inp)3297 inp_set_intcoproc_allowed(struct inpcb *inp)
3298 {
3299 	inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
3300 
3301 	/* Blow away any cached route in the PCB */
3302 	ROUTE_RELEASE(&inp->inp_route);
3303 }
3304 
3305 boolean_t
inp_get_intcoproc_allowed(struct inpcb * inp)3306 inp_get_intcoproc_allowed(struct inpcb *inp)
3307 {
3308 	return (inp->inp_flags2 & INP2_INTCOPROC_ALLOWED) ? TRUE : FALSE;
3309 }
3310 
3311 void
inp_clear_intcoproc_allowed(struct inpcb * inp)3312 inp_clear_intcoproc_allowed(struct inpcb *inp)
3313 {
3314 	inp->inp_flags2 &= ~INP2_INTCOPROC_ALLOWED;
3315 
3316 	/* Blow away any cached route in the PCB */
3317 	ROUTE_RELEASE(&inp->inp_route);
3318 }
3319 
3320 void
inp_set_management_allowed(struct inpcb * inp)3321 inp_set_management_allowed(struct inpcb *inp)
3322 {
3323 	inp->inp_flags2 |= INP2_MANAGEMENT_ALLOWED;
3324 	inp->inp_flags2 |= INP2_MANAGEMENT_CHECKED;
3325 
3326 	/* Blow away any cached route in the PCB */
3327 	ROUTE_RELEASE(&inp->inp_route);
3328 }
3329 
3330 boolean_t
inp_get_management_allowed(struct inpcb * inp)3331 inp_get_management_allowed(struct inpcb *inp)
3332 {
3333 	return (inp->inp_flags2 & INP2_MANAGEMENT_ALLOWED) ? TRUE : FALSE;
3334 }
3335 
3336 void
inp_clear_management_allowed(struct inpcb * inp)3337 inp_clear_management_allowed(struct inpcb *inp)
3338 {
3339 	inp->inp_flags2 &= ~INP2_MANAGEMENT_ALLOWED;
3340 
3341 	/* Blow away any cached route in the PCB */
3342 	ROUTE_RELEASE(&inp->inp_route);
3343 }
3344 
3345 #if NECP
3346 /*
3347  * Called when PROC_UUID_NECP_APP_POLICY is set.
3348  */
3349 void
inp_set_want_app_policy(struct inpcb * inp)3350 inp_set_want_app_policy(struct inpcb *inp)
3351 {
3352 	inp->inp_flags2 |= INP2_WANT_APP_POLICY;
3353 }
3354 
3355 /*
3356  * Called when PROC_UUID_NECP_APP_POLICY is cleared.
3357  */
3358 void
inp_clear_want_app_policy(struct inpcb * inp)3359 inp_clear_want_app_policy(struct inpcb *inp)
3360 {
3361 	inp->inp_flags2 &= ~INP2_WANT_APP_POLICY;
3362 }
3363 #endif /* NECP */
3364 
3365 /*
3366  * Calculate flow hash for an inp, used by an interface to identify a
3367  * flow. When an interface provides flow control advisory, this flow
3368  * hash is used as an identifier.
3369  */
3370 u_int32_t
inp_calc_flowhash(struct inpcb * inp)3371 inp_calc_flowhash(struct inpcb *inp)
3372 {
3373 #if SKYWALK
3374 
3375 	uint32_t flowid;
3376 	struct flowidns_flow_key fk;
3377 
3378 	bzero(&fk, sizeof(fk));
3379 
3380 	if (inp->inp_vflag & INP_IPV4) {
3381 		fk.ffk_af = AF_INET;
3382 		fk.ffk_laddr_v4 = inp->inp_laddr;
3383 		fk.ffk_raddr_v4 = inp->inp_faddr;
3384 	} else {
3385 		fk.ffk_af = AF_INET6;
3386 		fk.ffk_laddr_v6 = inp->in6p_laddr;
3387 		fk.ffk_raddr_v6 = inp->in6p_faddr;
3388 		/* clear embedded scope ID */
3389 		if (IN6_IS_SCOPE_EMBED(&fk.ffk_laddr_v6)) {
3390 			fk.ffk_laddr_v6.s6_addr16[1] = 0;
3391 		}
3392 		if (IN6_IS_SCOPE_EMBED(&fk.ffk_raddr_v6)) {
3393 			fk.ffk_raddr_v6.s6_addr16[1] = 0;
3394 		}
3395 	}
3396 
3397 	fk.ffk_lport = inp->inp_lport;
3398 	fk.ffk_rport = inp->inp_fport;
3399 	fk.ffk_proto = (inp->inp_ip_p != 0) ? inp->inp_ip_p :
3400 	    (uint8_t)SOCK_PROTO(inp->inp_socket);
3401 	flowidns_allocate_flowid(FLOWIDNS_DOMAIN_INPCB, &fk, &flowid);
3402 	/* Insert the inp into inp_fc_tree */
3403 	lck_mtx_lock_spin(&inp_fc_lck);
3404 	ASSERT(inp->inp_flowhash == 0);
3405 	ASSERT((inp->inp_flags2 & INP2_IN_FCTREE) == 0);
3406 	inp->inp_flowhash = flowid;
3407 	VERIFY(RB_INSERT(inp_fc_tree, &inp_fc_tree, inp) == NULL);
3408 	inp->inp_flags2 |= INP2_IN_FCTREE;
3409 	lck_mtx_unlock(&inp_fc_lck);
3410 
3411 	return flowid;
3412 
3413 #else /* !SKYWALK */
3414 
3415 	struct inp_flowhash_key fh __attribute__((aligned(8)));
3416 	u_int32_t flowhash = 0;
3417 	struct inpcb *tmp_inp = NULL;
3418 
3419 	if (inp_hash_seed == 0) {
3420 		inp_hash_seed = RandomULong();
3421 	}
3422 
3423 	bzero(&fh, sizeof(fh));
3424 
3425 	bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof(fh.infh_laddr));
3426 	bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof(fh.infh_faddr));
3427 
3428 	fh.infh_lport = inp->inp_lport;
3429 	fh.infh_fport = inp->inp_fport;
3430 	fh.infh_af = (inp->inp_vflag & INP_IPV6) ? AF_INET6 : AF_INET;
3431 	fh.infh_proto = inp->inp_ip_p;
3432 	fh.infh_rand1 = RandomULong();
3433 	fh.infh_rand2 = RandomULong();
3434 
3435 try_again:
3436 	flowhash = net_flowhash(&fh, sizeof(fh), inp_hash_seed);
3437 	if (flowhash == 0) {
3438 		/* try to get a non-zero flowhash */
3439 		inp_hash_seed = RandomULong();
3440 		goto try_again;
3441 	}
3442 
3443 	inp->inp_flowhash = flowhash;
3444 
3445 	/* Insert the inp into inp_fc_tree */
3446 	lck_mtx_lock_spin(&inp_fc_lck);
3447 	tmp_inp = RB_FIND(inp_fc_tree, &inp_fc_tree, inp);
3448 	if (tmp_inp != NULL) {
3449 		/*
3450 		 * There is a different inp with the same flowhash.
3451 		 * There can be a collision on flow hash but the
3452 		 * probability is low.  Let's recompute the
3453 		 * flowhash.
3454 		 */
3455 		lck_mtx_unlock(&inp_fc_lck);
3456 		/* recompute hash seed */
3457 		inp_hash_seed = RandomULong();
3458 		goto try_again;
3459 	}
3460 
3461 	RB_INSERT(inp_fc_tree, &inp_fc_tree, inp);
3462 	inp->inp_flags2 |= INP2_IN_FCTREE;
3463 	lck_mtx_unlock(&inp_fc_lck);
3464 
3465 	return flowhash;
3466 
3467 #endif /* !SKYWALK */
3468 }
3469 
3470 void
inp_flowadv(uint32_t flowhash)3471 inp_flowadv(uint32_t flowhash)
3472 {
3473 	struct inpcb *inp;
3474 
3475 	inp = inp_fc_getinp(flowhash, 0);
3476 
3477 	if (inp == NULL) {
3478 		return;
3479 	}
3480 	inp_fc_feedback(inp);
3481 }
3482 
3483 /*
3484  * Function to compare inp_fc_entries in inp flow control tree
3485  */
3486 static inline int
infc_cmp(const struct inpcb * inp1,const struct inpcb * inp2)3487 infc_cmp(const struct inpcb *inp1, const struct inpcb *inp2)
3488 {
3489 	return memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash),
3490 	           sizeof(inp1->inp_flowhash));
3491 }
3492 
3493 static struct inpcb *
inp_fc_getinp(u_int32_t flowhash,u_int32_t flags)3494 inp_fc_getinp(u_int32_t flowhash, u_int32_t flags)
3495 {
3496 	struct inpcb *inp = NULL;
3497 	int locked = (flags & INPFC_SOLOCKED) ? 1 : 0;
3498 
3499 	lck_mtx_lock_spin(&inp_fc_lck);
3500 	key_inp.inp_flowhash = flowhash;
3501 	inp = RB_FIND(inp_fc_tree, &inp_fc_tree, &key_inp);
3502 	if (inp == NULL) {
3503 		/* inp is not present, return */
3504 		lck_mtx_unlock(&inp_fc_lck);
3505 		return NULL;
3506 	}
3507 
3508 	if (flags & INPFC_REMOVE) {
3509 		ASSERT((inp->inp_flags2 & INP2_IN_FCTREE) != 0);
3510 		lck_mtx_convert_spin(&inp_fc_lck);
3511 		RB_REMOVE(inp_fc_tree, &inp_fc_tree, inp);
3512 		bzero(&(inp->infc_link), sizeof(inp->infc_link));
3513 #if SKYWALK
3514 		VERIFY(inp->inp_flowhash != 0);
3515 		flowidns_release_flowid(inp->inp_flowhash);
3516 		inp->inp_flowhash = 0;
3517 #endif /* !SKYWALK */
3518 		inp->inp_flags2 &= ~INP2_IN_FCTREE;
3519 		lck_mtx_unlock(&inp_fc_lck);
3520 		return NULL;
3521 	}
3522 
3523 	if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
3524 		inp = NULL;
3525 	}
3526 	lck_mtx_unlock(&inp_fc_lck);
3527 
3528 	return inp;
3529 }
3530 
3531 static void
inp_fc_feedback(struct inpcb * inp)3532 inp_fc_feedback(struct inpcb *inp)
3533 {
3534 	struct socket *so = inp->inp_socket;
3535 
3536 	/* we already hold a want_cnt on this inp, socket can't be null */
3537 	VERIFY(so != NULL);
3538 	socket_lock(so, 1);
3539 
3540 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3541 		socket_unlock(so, 1);
3542 		return;
3543 	}
3544 
3545 	if (inp->inp_sndinprog_cnt > 0) {
3546 		inp->inp_flags |= INP_FC_FEEDBACK;
3547 	}
3548 
3549 	/*
3550 	 * Return if the connection is not in flow-controlled state.
3551 	 * This can happen if the connection experienced
3552 	 * loss while it was in flow controlled state
3553 	 */
3554 	if (!INP_WAIT_FOR_IF_FEEDBACK(inp)) {
3555 		socket_unlock(so, 1);
3556 		return;
3557 	}
3558 	inp_reset_fc_state(inp);
3559 
3560 	if (SOCK_TYPE(so) == SOCK_STREAM) {
3561 		inp_fc_unthrottle_tcp(inp);
3562 	}
3563 
3564 	socket_unlock(so, 1);
3565 }
3566 
3567 static void
inp_reset_fc_timerstat(struct inpcb * inp)3568 inp_reset_fc_timerstat(struct inpcb *inp)
3569 {
3570 	uint64_t now;
3571 
3572 	if (inp->inp_fadv_start_time == 0) {
3573 		return;
3574 	}
3575 
3576 	now = net_uptime_us();
3577 	ASSERT(now >= inp->inp_fadv_start_time);
3578 
3579 	inp->inp_fadv_total_time += (now - inp->inp_fadv_start_time);
3580 	inp->inp_fadv_cnt++;
3581 
3582 	inp->inp_fadv_start_time = 0;
3583 }
3584 
3585 static void
inp_set_fc_timerstat(struct inpcb * inp)3586 inp_set_fc_timerstat(struct inpcb *inp)
3587 {
3588 	if (inp->inp_fadv_start_time != 0) {
3589 		return;
3590 	}
3591 
3592 	inp->inp_fadv_start_time = net_uptime_us();
3593 }
3594 
3595 void
inp_reset_fc_state(struct inpcb * inp)3596 inp_reset_fc_state(struct inpcb *inp)
3597 {
3598 	struct socket *so = inp->inp_socket;
3599 	int suspended = (INP_IS_FLOW_SUSPENDED(inp)) ? 1 : 0;
3600 	int needwakeup = (INP_WAIT_FOR_IF_FEEDBACK(inp)) ? 1 : 0;
3601 
3602 	inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
3603 
3604 	inp_reset_fc_timerstat(inp);
3605 
3606 	if (suspended) {
3607 		so->so_flags &= ~(SOF_SUSPENDED);
3608 		soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_RESUME));
3609 	}
3610 
3611 	/* Give a write wakeup to unblock the socket */
3612 	if (needwakeup) {
3613 		sowwakeup(so);
3614 	}
3615 }
3616 
3617 int
inp_set_fc_state(struct inpcb * inp,int advcode)3618 inp_set_fc_state(struct inpcb *inp, int advcode)
3619 {
3620 	boolean_t is_flow_controlled = INP_WAIT_FOR_IF_FEEDBACK(inp);
3621 	struct inpcb *tmp_inp = NULL;
3622 	/*
3623 	 * If there was a feedback from the interface when
3624 	 * send operation was in progress, we should ignore
3625 	 * this flow advisory to avoid a race between setting
3626 	 * flow controlled state and receiving feedback from
3627 	 * the interface
3628 	 */
3629 	if (inp->inp_flags & INP_FC_FEEDBACK) {
3630 		return 0;
3631 	}
3632 
3633 	inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
3634 	if ((tmp_inp = inp_fc_getinp(inp->inp_flowhash,
3635 	    INPFC_SOLOCKED)) != NULL) {
3636 		if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3637 			goto exit_reset;
3638 		}
3639 		VERIFY(tmp_inp == inp);
3640 		switch (advcode) {
3641 		case FADV_FLOW_CONTROLLED:
3642 			inp->inp_flags |= INP_FLOW_CONTROLLED;
3643 			inp_set_fc_timerstat(inp);
3644 			break;
3645 		case FADV_SUSPENDED:
3646 			inp->inp_flags |= INP_FLOW_SUSPENDED;
3647 			inp_set_fc_timerstat(inp);
3648 
3649 			soevent(inp->inp_socket,
3650 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_SUSPEND));
3651 
3652 			/* Record the fact that suspend event was sent */
3653 			inp->inp_socket->so_flags |= SOF_SUSPENDED;
3654 			break;
3655 		}
3656 
3657 		if (!is_flow_controlled && SOCK_TYPE(inp->inp_socket) == SOCK_STREAM) {
3658 			inp_fc_throttle_tcp(inp);
3659 		}
3660 		return 1;
3661 	}
3662 
3663 exit_reset:
3664 	inp_reset_fc_timerstat(inp);
3665 
3666 	return 0;
3667 }
3668 
3669 /*
3670  * Handler for SO_FLUSH socket option.
3671  */
3672 int
inp_flush(struct inpcb * inp,int optval)3673 inp_flush(struct inpcb *inp, int optval)
3674 {
3675 	u_int32_t flowhash = inp->inp_flowhash;
3676 	struct ifnet *rtifp, *oifp;
3677 
3678 	/* Either all classes or one of the valid ones */
3679 	if (optval != SO_TC_ALL && !SO_VALID_TC(optval)) {
3680 		return EINVAL;
3681 	}
3682 
3683 	/* We need a flow hash for identification */
3684 	if (flowhash == 0) {
3685 		return 0;
3686 	}
3687 
3688 	/* Grab the interfaces from the route and pcb */
3689 	rtifp = ((inp->inp_route.ro_rt != NULL) ?
3690 	    inp->inp_route.ro_rt->rt_ifp : NULL);
3691 	oifp = inp->inp_last_outifp;
3692 
3693 	if (rtifp != NULL) {
3694 		if_qflush_sc(rtifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
3695 	}
3696 	if (oifp != NULL && oifp != rtifp) {
3697 		if_qflush_sc(oifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
3698 	}
3699 
3700 	return 0;
3701 }
3702 
3703 /*
3704  * Clear the INP_INADDR_ANY flag (special case for PPP only)
3705  */
3706 void
inp_clear_INP_INADDR_ANY(struct socket * so)3707 inp_clear_INP_INADDR_ANY(struct socket *so)
3708 {
3709 	struct inpcb *inp = NULL;
3710 
3711 	socket_lock(so, 1);
3712 	inp = sotoinpcb(so);
3713 	if (inp) {
3714 		inp->inp_flags &= ~INP_INADDR_ANY;
3715 	}
3716 	socket_unlock(so, 1);
3717 }
3718 
3719 void
inp_get_soprocinfo(struct inpcb * inp,struct so_procinfo * soprocinfo)3720 inp_get_soprocinfo(struct inpcb *inp, struct so_procinfo *soprocinfo)
3721 {
3722 	struct socket *so = inp->inp_socket;
3723 
3724 	soprocinfo->spi_pid = so->last_pid;
3725 	strlcpy(&soprocinfo->spi_proc_name[0], &inp->inp_last_proc_name[0],
3726 	    sizeof(soprocinfo->spi_proc_name));
3727 	if (so->last_pid != 0) {
3728 		uuid_copy(soprocinfo->spi_uuid, so->last_uuid);
3729 	}
3730 	/*
3731 	 * When not delegated, the effective pid is the same as the real pid
3732 	 */
3733 	if (so->so_flags & SOF_DELEGATED) {
3734 		soprocinfo->spi_delegated = 1;
3735 		soprocinfo->spi_epid = so->e_pid;
3736 		uuid_copy(soprocinfo->spi_euuid, so->e_uuid);
3737 	} else {
3738 		soprocinfo->spi_delegated = 0;
3739 		soprocinfo->spi_epid = so->last_pid;
3740 	}
3741 	strlcpy(&soprocinfo->spi_e_proc_name[0], &inp->inp_e_proc_name[0],
3742 	    sizeof(soprocinfo->spi_e_proc_name));
3743 }
3744 
3745 int
inp_findinpcb_procinfo(struct inpcbinfo * pcbinfo,uint32_t flowhash,struct so_procinfo * soprocinfo)3746 inp_findinpcb_procinfo(struct inpcbinfo *pcbinfo, uint32_t flowhash,
3747     struct so_procinfo *soprocinfo)
3748 {
3749 	struct inpcb *inp = NULL;
3750 	int found = 0;
3751 
3752 	bzero(soprocinfo, sizeof(struct so_procinfo));
3753 
3754 	if (!flowhash) {
3755 		return -1;
3756 	}
3757 
3758 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
3759 	LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
3760 		if (inp->inp_state != INPCB_STATE_DEAD &&
3761 		    inp->inp_socket != NULL &&
3762 		    inp->inp_flowhash == flowhash) {
3763 			found = 1;
3764 			inp_get_soprocinfo(inp, soprocinfo);
3765 			break;
3766 		}
3767 	}
3768 	lck_rw_done(&pcbinfo->ipi_lock);
3769 
3770 	return found;
3771 }
3772 
3773 #if CONFIG_PROC_UUID_POLICY
3774 static void
inp_update_cellular_policy(struct inpcb * inp,boolean_t set)3775 inp_update_cellular_policy(struct inpcb *inp, boolean_t set)
3776 {
3777 	struct socket *so = inp->inp_socket;
3778 	int before, after;
3779 
3780 	VERIFY(so != NULL);
3781 	VERIFY(inp->inp_state != INPCB_STATE_DEAD);
3782 
3783 	before = INP_NO_CELLULAR(inp);
3784 	if (set) {
3785 		inp_set_nocellular(inp);
3786 	} else {
3787 		inp_clear_nocellular(inp);
3788 	}
3789 	after = INP_NO_CELLULAR(inp);
3790 	if (net_io_policy_log && (before != after)) {
3791 		static const char *ok = "OK";
3792 		static const char *nok = "NOACCESS";
3793 		uuid_string_t euuid_buf;
3794 		pid_t epid;
3795 
3796 		if (so->so_flags & SOF_DELEGATED) {
3797 			uuid_unparse(so->e_uuid, euuid_buf);
3798 			epid = so->e_pid;
3799 		} else {
3800 			uuid_unparse(so->last_uuid, euuid_buf);
3801 			epid = so->last_pid;
3802 		}
3803 
3804 		/* allow this socket to generate another notification event */
3805 		so->so_ifdenied_notifies = 0;
3806 
3807 		log(LOG_DEBUG, "%s: so %llu [%d,%d] epid %d "
3808 		    "euuid %s%s %s->%s\n", __func__,
3809 		    so->so_gencnt, SOCK_DOM(so),
3810 		    SOCK_TYPE(so), epid, euuid_buf,
3811 		    (so->so_flags & SOF_DELEGATED) ?
3812 		    " [delegated]" : "",
3813 		    ((before < after) ? ok : nok),
3814 		    ((before < after) ? nok : ok));
3815 	}
3816 }
3817 
3818 #if NECP
3819 static void
inp_update_necp_want_app_policy(struct inpcb * inp,boolean_t set)3820 inp_update_necp_want_app_policy(struct inpcb *inp, boolean_t set)
3821 {
3822 	struct socket *so = inp->inp_socket;
3823 	int before, after;
3824 
3825 	VERIFY(so != NULL);
3826 	VERIFY(inp->inp_state != INPCB_STATE_DEAD);
3827 
3828 	before = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
3829 	if (set) {
3830 		inp_set_want_app_policy(inp);
3831 	} else {
3832 		inp_clear_want_app_policy(inp);
3833 	}
3834 	after = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
3835 	if (net_io_policy_log && (before != after)) {
3836 		static const char *wanted = "WANTED";
3837 		static const char *unwanted = "UNWANTED";
3838 		uuid_string_t euuid_buf;
3839 		pid_t epid;
3840 
3841 		if (so->so_flags & SOF_DELEGATED) {
3842 			uuid_unparse(so->e_uuid, euuid_buf);
3843 			epid = so->e_pid;
3844 		} else {
3845 			uuid_unparse(so->last_uuid, euuid_buf);
3846 			epid = so->last_pid;
3847 		}
3848 
3849 		log(LOG_DEBUG, "%s: so %llu [%d,%d] epid %d "
3850 		    "euuid %s%s %s->%s\n", __func__,
3851 		    so->so_gencnt, SOCK_DOM(so),
3852 		    SOCK_TYPE(so), epid, euuid_buf,
3853 		    (so->so_flags & SOF_DELEGATED) ?
3854 		    " [delegated]" : "",
3855 		    ((before < after) ? unwanted : wanted),
3856 		    ((before < after) ? wanted : unwanted));
3857 	}
3858 }
3859 #endif /* NECP */
3860 #endif /* !CONFIG_PROC_UUID_POLICY */
3861 
3862 #if NECP
3863 void
inp_update_necp_policy(struct inpcb * inp,struct sockaddr * override_local_addr,struct sockaddr * override_remote_addr,u_int override_bound_interface)3864 inp_update_necp_policy(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int override_bound_interface)
3865 {
3866 	necp_socket_find_policy_match(inp, override_local_addr, override_remote_addr, override_bound_interface);
3867 	if (necp_socket_should_rescope(inp) &&
3868 	    inp->inp_lport == 0 &&
3869 	    inp->inp_laddr.s_addr == INADDR_ANY &&
3870 	    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
3871 		// If we should rescope, and the socket is not yet bound
3872 		inp_bindif(inp, necp_socket_get_rescope_if_index(inp), NULL);
3873 		inp->inp_flags2 |= INP2_SCOPED_BY_NECP;
3874 	}
3875 }
3876 #endif /* NECP */
3877 
3878 int
inp_update_policy(struct inpcb * inp)3879 inp_update_policy(struct inpcb *inp)
3880 {
3881 #if CONFIG_PROC_UUID_POLICY
3882 	struct socket *so = inp->inp_socket;
3883 	uint32_t pflags = 0;
3884 	int32_t ogencnt;
3885 	int err = 0;
3886 	uint8_t *lookup_uuid = NULL;
3887 
3888 	if (!net_io_policy_uuid ||
3889 	    so == NULL || inp->inp_state == INPCB_STATE_DEAD) {
3890 		return 0;
3891 	}
3892 
3893 	/*
3894 	 * Kernel-created sockets that aren't delegating other sockets
3895 	 * are currently exempted from UUID policy checks.
3896 	 */
3897 	if (so->last_pid == 0 && !(so->so_flags & SOF_DELEGATED)) {
3898 		return 0;
3899 	}
3900 
3901 #if defined(XNU_TARGET_OS_OSX)
3902 	if (so->so_rpid > 0) {
3903 		lookup_uuid = so->so_ruuid;
3904 		ogencnt = so->so_policy_gencnt;
3905 		err = proc_uuid_policy_lookup(lookup_uuid, &pflags, &so->so_policy_gencnt);
3906 	}
3907 #endif
3908 	if (lookup_uuid == NULL || err == ENOENT) {
3909 		lookup_uuid = ((so->so_flags & SOF_DELEGATED) ? so->e_uuid : so->last_uuid);
3910 		ogencnt = so->so_policy_gencnt;
3911 		err = proc_uuid_policy_lookup(lookup_uuid, &pflags, &so->so_policy_gencnt);
3912 	}
3913 
3914 	/*
3915 	 * Discard cached generation count if the entry is gone (ENOENT),
3916 	 * so that we go thru the checks below.
3917 	 */
3918 	if (err == ENOENT && ogencnt != 0) {
3919 		so->so_policy_gencnt = 0;
3920 	}
3921 
3922 	/*
3923 	 * If the generation count has changed, inspect the policy flags
3924 	 * and act accordingly.  If a policy flag was previously set and
3925 	 * the UUID is no longer present in the table (ENOENT), treat it
3926 	 * as if the flag has been cleared.
3927 	 */
3928 	if ((err == 0 || err == ENOENT) && ogencnt != so->so_policy_gencnt) {
3929 		/* update cellular policy for this socket */
3930 		if (err == 0 && (pflags & PROC_UUID_NO_CELLULAR)) {
3931 			inp_update_cellular_policy(inp, TRUE);
3932 		} else if (!(pflags & PROC_UUID_NO_CELLULAR)) {
3933 			inp_update_cellular_policy(inp, FALSE);
3934 		}
3935 #if NECP
3936 		/* update necp want app policy for this socket */
3937 		if (err == 0 && (pflags & PROC_UUID_NECP_APP_POLICY)) {
3938 			inp_update_necp_want_app_policy(inp, TRUE);
3939 		} else if (!(pflags & PROC_UUID_NECP_APP_POLICY)) {
3940 			inp_update_necp_want_app_policy(inp, FALSE);
3941 		}
3942 #endif /* NECP */
3943 	}
3944 
3945 	return (err == ENOENT) ? 0 : err;
3946 #else /* !CONFIG_PROC_UUID_POLICY */
3947 #pragma unused(inp)
3948 	return 0;
3949 #endif /* !CONFIG_PROC_UUID_POLICY */
3950 }
3951 
3952 unsigned int log_restricted;
3953 SYSCTL_DECL(_net_inet);
3954 SYSCTL_INT(_net_inet, OID_AUTO, log_restricted,
3955     CTLFLAG_RW | CTLFLAG_LOCKED, &log_restricted, 0,
3956     "Log network restrictions");
3957 
3958 
3959 /*
3960  * Called when we need to enforce policy restrictions in the input path.
3961  *
3962  * Returns TRUE if we're not allowed to receive data, otherwise FALSE.
3963  */
3964 static boolean_t
_inp_restricted_recv(struct inpcb * inp,struct ifnet * ifp)3965 _inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
3966 {
3967 	VERIFY(inp != NULL);
3968 
3969 	/*
3970 	 * Inbound restrictions.
3971 	 */
3972 	if (!sorestrictrecv) {
3973 		return FALSE;
3974 	}
3975 
3976 	if (ifp == NULL) {
3977 		return FALSE;
3978 	}
3979 
3980 	if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
3981 		return TRUE;
3982 	}
3983 
3984 	if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
3985 		return TRUE;
3986 	}
3987 
3988 	if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
3989 		return TRUE;
3990 	}
3991 
3992 	if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
3993 		return TRUE;
3994 	}
3995 
3996 	if (!(ifp->if_eflags & IFEF_RESTRICTED_RECV)) {
3997 		return FALSE;
3998 	}
3999 
4000 	if (inp->inp_flags & INP_RECV_ANYIF) {
4001 		return FALSE;
4002 	}
4003 
4004 	/*
4005 	 * An entitled process can use the management interface without being bound
4006 	 * to the interface
4007 	 */
4008 	if (IFNET_IS_MANAGEMENT(ifp)) {
4009 		if (INP_MANAGEMENT_ALLOWED(inp)) {
4010 			return FALSE;
4011 		}
4012 		if (if_management_verbose > 1) {
4013 			os_log(OS_LOG_DEFAULT, "_inp_restricted_recv %s:%d not allowed on management interface %s",
4014 			    proc_best_name(current_proc()), proc_getpid(current_proc()),
4015 			    ifp->if_xname);
4016 		}
4017 		return TRUE;
4018 	}
4019 
4020 	if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp == ifp) {
4021 		return FALSE;
4022 	}
4023 
4024 	if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
4025 		return TRUE;
4026 	}
4027 
4028 
4029 	return TRUE;
4030 }
4031 
4032 boolean_t
inp_restricted_recv(struct inpcb * inp,struct ifnet * ifp)4033 inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
4034 {
4035 	boolean_t ret;
4036 
4037 	ret = _inp_restricted_recv(inp, ifp);
4038 	if (ret == TRUE && log_restricted) {
4039 		printf("pid %d (%s) is unable to receive packets on %s\n",
4040 		    proc_getpid(current_proc()), proc_best_name(current_proc()),
4041 		    ifp->if_xname);
4042 	}
4043 	return ret;
4044 }
4045 
4046 /*
4047  * Called when we need to enforce policy restrictions in the output path.
4048  *
4049  * Returns TRUE if we're not allowed to send data out, otherwise FALSE.
4050  */
4051 static boolean_t
_inp_restricted_send(struct inpcb * inp,struct ifnet * ifp)4052 _inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
4053 {
4054 	VERIFY(inp != NULL);
4055 
4056 	/*
4057 	 * Outbound restrictions.
4058 	 */
4059 	if (!sorestrictsend) {
4060 		return FALSE;
4061 	}
4062 
4063 	if (ifp == NULL) {
4064 		return FALSE;
4065 	}
4066 
4067 	if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
4068 		return TRUE;
4069 	}
4070 
4071 	if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
4072 		return TRUE;
4073 	}
4074 
4075 	if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
4076 		return TRUE;
4077 	}
4078 
4079 	if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
4080 		return TRUE;
4081 	}
4082 
4083 	if (IFNET_IS_MANAGEMENT(ifp)) {
4084 		if (!INP_MANAGEMENT_ALLOWED(inp)) {
4085 			if (if_management_verbose > 1) {
4086 				os_log(OS_LOG_DEFAULT, "_inp_restricted_send %s:%d not allowed on management interface %s",
4087 				    proc_best_name(current_proc()), proc_getpid(current_proc()),
4088 				    ifp->if_xname);
4089 			}
4090 			return TRUE;
4091 		}
4092 	}
4093 
4094 	if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
4095 		return TRUE;
4096 	}
4097 
4098 	return FALSE;
4099 }
4100 
4101 boolean_t
inp_restricted_send(struct inpcb * inp,struct ifnet * ifp)4102 inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
4103 {
4104 	boolean_t ret;
4105 
4106 	ret = _inp_restricted_send(inp, ifp);
4107 	if (ret == TRUE && log_restricted) {
4108 		printf("pid %d (%s) is unable to transmit packets on %s\n",
4109 		    proc_getpid(current_proc()), proc_best_name(current_proc()),
4110 		    ifp->if_xname);
4111 	}
4112 	return ret;
4113 }
4114 
4115 inline void
inp_count_sndbytes(struct inpcb * inp,u_int32_t th_ack)4116 inp_count_sndbytes(struct inpcb *inp, u_int32_t th_ack)
4117 {
4118 	struct ifnet *ifp = inp->inp_last_outifp;
4119 	struct socket *so = inp->inp_socket;
4120 	if (ifp != NULL && !(so->so_flags & SOF_MP_SUBFLOW) &&
4121 	    (ifp->if_type == IFT_CELLULAR || IFNET_IS_WIFI(ifp))) {
4122 		int32_t unsent;
4123 
4124 		so->so_snd.sb_flags |= SB_SNDBYTE_CNT;
4125 
4126 		/*
4127 		 * There can be data outstanding before the connection
4128 		 * becomes established -- TFO case
4129 		 */
4130 		if (so->so_snd.sb_cc > 0) {
4131 			inp_incr_sndbytes_total(so, so->so_snd.sb_cc);
4132 		}
4133 
4134 		unsent = inp_get_sndbytes_allunsent(so, th_ack);
4135 		if (unsent > 0) {
4136 			inp_incr_sndbytes_unsent(so, unsent);
4137 		}
4138 	}
4139 }
4140 
4141 inline void
inp_incr_sndbytes_total(struct socket * so,int32_t len)4142 inp_incr_sndbytes_total(struct socket *so, int32_t len)
4143 {
4144 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4145 	struct ifnet *ifp = inp->inp_last_outifp;
4146 
4147 	if (ifp != NULL) {
4148 		VERIFY(ifp->if_sndbyte_total >= 0);
4149 		OSAddAtomic64(len, &ifp->if_sndbyte_total);
4150 	}
4151 }
4152 
4153 inline void
inp_decr_sndbytes_total(struct socket * so,int32_t len)4154 inp_decr_sndbytes_total(struct socket *so, int32_t len)
4155 {
4156 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4157 	struct ifnet *ifp = inp->inp_last_outifp;
4158 
4159 	if (ifp != NULL) {
4160 		if (ifp->if_sndbyte_total >= len) {
4161 			OSAddAtomic64(-len, &ifp->if_sndbyte_total);
4162 		} else {
4163 			ifp->if_sndbyte_total = 0;
4164 		}
4165 	}
4166 }
4167 
4168 inline void
inp_incr_sndbytes_unsent(struct socket * so,int32_t len)4169 inp_incr_sndbytes_unsent(struct socket *so, int32_t len)
4170 {
4171 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4172 	struct ifnet *ifp = inp->inp_last_outifp;
4173 
4174 	if (ifp != NULL) {
4175 		VERIFY(ifp->if_sndbyte_unsent >= 0);
4176 		OSAddAtomic64(len, &ifp->if_sndbyte_unsent);
4177 	}
4178 }
4179 
4180 inline void
inp_decr_sndbytes_unsent(struct socket * so,int32_t len)4181 inp_decr_sndbytes_unsent(struct socket *so, int32_t len)
4182 {
4183 	if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
4184 		return;
4185 	}
4186 
4187 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4188 	struct ifnet *ifp = inp->inp_last_outifp;
4189 
4190 	if (ifp != NULL) {
4191 		if (ifp->if_sndbyte_unsent >= len) {
4192 			OSAddAtomic64(-len, &ifp->if_sndbyte_unsent);
4193 		} else {
4194 			ifp->if_sndbyte_unsent = 0;
4195 		}
4196 	}
4197 }
4198 
4199 inline void
inp_decr_sndbytes_allunsent(struct socket * so,u_int32_t th_ack)4200 inp_decr_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
4201 {
4202 	int32_t len;
4203 
4204 	if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
4205 		return;
4206 	}
4207 
4208 	len = inp_get_sndbytes_allunsent(so, th_ack);
4209 	inp_decr_sndbytes_unsent(so, len);
4210 }
4211 
4212 #if SKYWALK
4213 inline void
inp_update_netns_flags(struct socket * so)4214 inp_update_netns_flags(struct socket *so)
4215 {
4216 	struct inpcb *inp;
4217 	uint32_t set_flags = 0;
4218 	uint32_t clear_flags = 0;
4219 
4220 	if (!(SOCK_CHECK_DOM(so, AF_INET) || SOCK_CHECK_DOM(so, AF_INET6))) {
4221 		return;
4222 	}
4223 
4224 	inp = sotoinpcb(so);
4225 
4226 	if (inp == NULL) {
4227 		return;
4228 	}
4229 
4230 	if (!NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
4231 		return;
4232 	}
4233 
4234 	if (so->so_options & SO_NOWAKEFROMSLEEP) {
4235 		set_flags |= NETNS_NOWAKEFROMSLEEP;
4236 	} else {
4237 		clear_flags |= NETNS_NOWAKEFROMSLEEP;
4238 	}
4239 
4240 	if (inp->inp_flags & INP_RECV_ANYIF) {
4241 		set_flags |= NETNS_RECVANYIF;
4242 	} else {
4243 		clear_flags |= NETNS_RECVANYIF;
4244 	}
4245 
4246 	if (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) {
4247 		set_flags |= NETNS_EXTBGIDLE;
4248 	} else {
4249 		clear_flags |= NETNS_EXTBGIDLE;
4250 	}
4251 
4252 	netns_change_flags(&inp->inp_netns_token, set_flags, clear_flags);
4253 }
4254 #endif /* SKYWALK */
4255 
4256 inline void
inp_set_activity_bitmap(struct inpcb * inp)4257 inp_set_activity_bitmap(struct inpcb *inp)
4258 {
4259 	in_stat_set_activity_bitmap(&inp->inp_nw_activity, net_uptime());
4260 }
4261 
4262 inline void
inp_get_activity_bitmap(struct inpcb * inp,activity_bitmap_t * ab)4263 inp_get_activity_bitmap(struct inpcb *inp, activity_bitmap_t *ab)
4264 {
4265 	bcopy(&inp->inp_nw_activity, ab, sizeof(*ab));
4266 }
4267 
4268 inline void
inp_clear_activity_bitmap(struct inpcb * inp)4269 inp_clear_activity_bitmap(struct inpcb *inp)
4270 {
4271 	in_stat_clear_activity_bitmap(&inp->inp_nw_activity);
4272 }
4273 
4274 void
inp_update_last_owner(struct socket * so,struct proc * p,struct proc * ep)4275 inp_update_last_owner(struct socket *so, struct proc *p, struct proc *ep)
4276 {
4277 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4278 
4279 	if (inp == NULL) {
4280 		return;
4281 	}
4282 
4283 	if (p != NULL) {
4284 		strlcpy(&inp->inp_last_proc_name[0], proc_name_address(p), sizeof(inp->inp_last_proc_name));
4285 	}
4286 	if (so->so_flags & SOF_DELEGATED) {
4287 		if (ep != NULL) {
4288 			strlcpy(&inp->inp_e_proc_name[0], proc_name_address(ep), sizeof(inp->inp_e_proc_name));
4289 		} else {
4290 			inp->inp_e_proc_name[0] = 0;
4291 		}
4292 	} else {
4293 		inp->inp_e_proc_name[0] = 0;
4294 	}
4295 }
4296 
4297 void
inp_copy_last_owner(struct socket * so,struct socket * head)4298 inp_copy_last_owner(struct socket *so, struct socket *head)
4299 {
4300 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4301 	struct inpcb *head_inp = (struct inpcb *)head->so_pcb;
4302 
4303 	if (inp == NULL || head_inp == NULL) {
4304 		return;
4305 	}
4306 
4307 	strlcpy(&inp->inp_last_proc_name[0], &head_inp->inp_last_proc_name[0], sizeof(inp->inp_last_proc_name));
4308 	strlcpy(&inp->inp_e_proc_name[0], &head_inp->inp_e_proc_name[0], sizeof(inp->inp_e_proc_name));
4309 }
4310 
4311 static int
in_check_management_interface_proc_callout(proc_t proc,void * arg __unused)4312 in_check_management_interface_proc_callout(proc_t proc, void *arg __unused)
4313 {
4314 	struct fileproc *fp = NULL;
4315 	task_t task = proc_task(proc);
4316 	bool allowed = false;
4317 
4318 	if (IOTaskHasEntitlement(task, INTCOPROC_RESTRICTED_ENTITLEMENT) == true
4319 	    || IOTaskHasEntitlement(task, MANAGEMENT_DATA_ENTITLEMENT) == true
4320 #if DEBUG || DEVELOPMENT
4321 	    || IOTaskHasEntitlement(task, INTCOPROC_RESTRICTED_ENTITLEMENT_DEVELOPMENT) == true
4322 	    || IOTaskHasEntitlement(task, MANAGEMENT_DATA_ENTITLEMENT_DEVELOPMENT) == true
4323 #endif /* DEBUG || DEVELOPMENT */
4324 	    ) {
4325 		allowed = true;
4326 	}
4327 	if (allowed == false && management_data_unrestricted == false) {
4328 		return PROC_RETURNED;
4329 	}
4330 
4331 	proc_fdlock(proc);
4332 	fdt_foreach(fp, proc) {
4333 		struct fileglob *fg = fp->fp_glob;
4334 		struct socket *so;
4335 		struct inpcb *inp;
4336 
4337 		if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) {
4338 			continue;
4339 		}
4340 
4341 		so = (struct socket *)fp_get_data(fp);
4342 		if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
4343 			continue;
4344 		}
4345 
4346 		inp = (struct inpcb *)so->so_pcb;
4347 
4348 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
4349 			continue;
4350 		}
4351 
4352 		socket_lock(so, 1);
4353 
4354 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
4355 			socket_unlock(so, 1);
4356 			continue;
4357 		}
4358 		inp->inp_flags2 |= INP2_MANAGEMENT_ALLOWED;
4359 		inp->inp_flags2 |= INP2_MANAGEMENT_CHECKED;
4360 
4361 		socket_unlock(so, 1);
4362 	}
4363 	proc_fdunlock(proc);
4364 
4365 	return PROC_RETURNED;
4366 }
4367 
4368 static bool in_management_interface_checked = false;
4369 
4370 static void
in_management_interface_event_callback(struct nwk_wq_entry * nwk_item)4371 in_management_interface_event_callback(struct nwk_wq_entry *nwk_item)
4372 {
4373 	kfree_type(struct nwk_wq_entry, nwk_item);
4374 
4375 	if (in_management_interface_checked == true) {
4376 		return;
4377 	}
4378 	in_management_interface_checked = true;
4379 
4380 	proc_iterate(PROC_ALLPROCLIST,
4381 	    in_check_management_interface_proc_callout,
4382 	    NULL, NULL, NULL);
4383 }
4384 
4385 void
in_management_interface_check(void)4386 in_management_interface_check(void)
4387 {
4388 	struct nwk_wq_entry *nwk_item;
4389 
4390 	if (if_management_interface_check_needed == false ||
4391 	    in_management_interface_checked == true) {
4392 		return;
4393 	}
4394 
4395 	nwk_item  = kalloc_type(struct nwk_wq_entry,
4396 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
4397 
4398 	nwk_item->func = in_management_interface_event_callback;
4399 
4400 	nwk_wq_enqueue(nwk_item);
4401 }
4402