xref: /xnu-11215.81.4/bsd/netinet/in_pcb.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1991, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)in_pcb.c	8.4 (Berkeley) 5/24/95
61  * $FreeBSD: src/sys/netinet/in_pcb.c,v 1.59.2.17 2001/08/13 16:26:17 ume Exp $
62  */
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/domain.h>
69 #include <sys/protosw.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/proc.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/mcache.h>
76 #include <sys/kauth.h>
77 #include <sys/priv.h>
78 #include <sys/proc_uuid_policy.h>
79 #include <sys/syslog.h>
80 #include <sys/priv.h>
81 #include <sys/file_internal.h>
82 #include <net/dlil.h>
83 
84 #include <libkern/OSAtomic.h>
85 #include <kern/locks.h>
86 
87 #include <machine/limits.h>
88 
89 #include <kern/zalloc.h>
90 
91 #include <net/if.h>
92 #include <net/if_types.h>
93 #include <net/route.h>
94 #include <net/flowhash.h>
95 #include <net/flowadv.h>
96 #include <net/nat464_utils.h>
97 #include <net/ntstat.h>
98 #include <net/nwk_wq.h>
99 #include <net/restricted_in_port.h>
100 
101 #include <netinet/in.h>
102 #include <netinet/in_pcb.h>
103 #include <netinet/inp_log.h>
104 #include <netinet/in_var.h>
105 #include <netinet/ip_var.h>
106 
107 #include <netinet/ip6.h>
108 #include <netinet6/ip6_var.h>
109 
110 #include <sys/kdebug.h>
111 #include <sys/random.h>
112 
113 #include <dev/random/randomdev.h>
114 #include <mach/boolean.h>
115 
116 #include <atm/atm_internal.h>
117 #include <pexpert/pexpert.h>
118 
119 #if NECP
120 #include <net/necp.h>
121 #endif
122 
123 #include <sys/stat.h>
124 #include <sys/ubc.h>
125 #include <sys/vnode.h>
126 
127 #include <os/log.h>
128 
129 #if SKYWALK
130 #include <skywalk/namespace/flowidns.h>
131 #endif /* SKYWALK */
132 
133 #include <IOKit/IOBSD.h>
134 
135 #include <net/sockaddr_utils.h>
136 
137 extern const char *proc_name_address(struct proc *);
138 
139 static LCK_GRP_DECLARE(inpcb_lock_grp, "inpcb");
140 static LCK_ATTR_DECLARE(inpcb_lock_attr, 0, 0);
141 static LCK_MTX_DECLARE_ATTR(inpcb_lock, &inpcb_lock_grp, &inpcb_lock_attr);
142 static LCK_MTX_DECLARE_ATTR(inpcb_timeout_lock, &inpcb_lock_grp, &inpcb_lock_attr);
143 
144 static TAILQ_HEAD(, inpcbinfo) inpcb_head = TAILQ_HEAD_INITIALIZER(inpcb_head);
145 
146 static u_int16_t inpcb_timeout_run = 0; /* INPCB timer is scheduled to run */
147 static boolean_t inpcb_garbage_collecting = FALSE; /* gc timer is scheduled */
148 static boolean_t inpcb_ticking = FALSE;         /* "slow" timer is scheduled */
149 static boolean_t inpcb_fast_timer_on = FALSE;
150 
151 #define INPCB_GCREQ_THRESHOLD   50000
152 
153 static thread_call_t inpcb_thread_call, inpcb_fast_thread_call;
154 static void inpcb_sched_timeout(void);
155 static void inpcb_sched_lazy_timeout(void);
156 static void _inpcb_sched_timeout(unsigned int);
157 static void inpcb_timeout(void *, void *);
158 const int inpcb_timeout_lazy = 10;      /* 10 seconds leeway for lazy timers */
159 extern int tvtohz(struct timeval *);
160 
161 #if CONFIG_PROC_UUID_POLICY
162 static void inp_update_cellular_policy(struct inpcb *, boolean_t);
163 #if NECP
164 static void inp_update_necp_want_app_policy(struct inpcb *, boolean_t);
165 #endif /* NECP */
166 #endif /* !CONFIG_PROC_UUID_POLICY */
167 
168 #define DBG_FNC_PCB_LOOKUP      NETDBG_CODE(DBG_NETTCP, (6 << 8))
169 #define DBG_FNC_PCB_HLOOKUP     NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1))
170 
171 int allow_udp_port_exhaustion = 0;
172 
173 /*
174  * These configure the range of local port addresses assigned to
175  * "unspecified" outgoing connections/packets/whatever.
176  */
177 int     ipport_lowfirstauto  = IPPORT_RESERVED - 1;     /* 1023 */
178 int     ipport_lowlastauto = IPPORT_RESERVEDSTART;      /* 600 */
179 int     ipport_firstauto = IPPORT_HIFIRSTAUTO;          /* 49152 */
180 int     ipport_lastauto  = IPPORT_HILASTAUTO;           /* 65535 */
181 int     ipport_hifirstauto = IPPORT_HIFIRSTAUTO;        /* 49152 */
182 int     ipport_hilastauto  = IPPORT_HILASTAUTO;         /* 65535 */
183 
184 #define RANGECHK(var, min, max) \
185 	if ((var) < (min)) { (var) = (min); } \
186 	else if ((var) > (max)) { (var) = (max); }
187 
188 static int
189 sysctl_net_ipport_check SYSCTL_HANDLER_ARGS
190 {
191 #pragma unused(arg1, arg2)
192 	int error;
193 	int new_value = *(int *)oidp->oid_arg1;
194 #if (DEBUG | DEVELOPMENT)
195 	int old_value = *(int *)oidp->oid_arg1;
196 	/*
197 	 * For unit testing allow a non-superuser process with the
198 	 * proper entitlement to modify the variables
199 	 */
200 	if (req->newptr) {
201 		if (proc_suser(current_proc()) != 0 &&
202 		    (error = priv_check_cred(kauth_cred_get(),
203 		    PRIV_NETINET_RESERVEDPORT, 0))) {
204 			return EPERM;
205 		}
206 	}
207 #endif /* (DEBUG | DEVELOPMENT) */
208 
209 	error = sysctl_handle_int(oidp, &new_value, 0, req);
210 	if (!error) {
211 		if (oidp->oid_arg1 == &ipport_lowfirstauto || oidp->oid_arg1 == &ipport_lowlastauto) {
212 			RANGECHK(new_value, 1, IPPORT_RESERVED - 1);
213 		} else {
214 			RANGECHK(new_value, IPPORT_RESERVED, USHRT_MAX);
215 		}
216 		*(int *)oidp->oid_arg1 = new_value;
217 	}
218 
219 #if (DEBUG | DEVELOPMENT)
220 	os_log(OS_LOG_DEFAULT,
221 	    "%s:%u sysctl net.restricted_port.verbose: %d -> %d)",
222 	    proc_best_name(current_proc()), proc_selfpid(),
223 	    old_value, *(int *)oidp->oid_arg1);
224 #endif /* (DEBUG | DEVELOPMENT) */
225 
226 	return error;
227 }
228 
229 #undef RANGECHK
230 
231 SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange,
232     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IP Ports");
233 
234 #if (DEBUG | DEVELOPMENT)
235 #define CTLFAGS_IP_PORTRANGE (CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY)
236 #else
237 #define CTLFAGS_IP_PORTRANGE (CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED)
238 #endif /* (DEBUG | DEVELOPMENT) */
239 
240 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst,
241     CTLFAGS_IP_PORTRANGE,
242     &ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", "");
243 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast,
244     CTLFAGS_IP_PORTRANGE,
245     &ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", "");
246 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, first,
247     CTLFAGS_IP_PORTRANGE,
248     &ipport_firstauto, 0, &sysctl_net_ipport_check, "I", "");
249 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, last,
250     CTLFAGS_IP_PORTRANGE,
251     &ipport_lastauto, 0, &sysctl_net_ipport_check, "I", "");
252 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst,
253     CTLFAGS_IP_PORTRANGE,
254     &ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", "");
255 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast,
256     CTLFAGS_IP_PORTRANGE,
257     &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", "");
258 SYSCTL_INT(_net_inet_ip_portrange, OID_AUTO, ipport_allow_udp_port_exhaustion,
259     CTLFLAG_LOCKED | CTLFLAG_RW, &allow_udp_port_exhaustion, 0, "");
260 
261 static uint32_t apn_fallbk_debug = 0;
262 #define apn_fallbk_log(x)       do { if (apn_fallbk_debug >= 1) log x; } while (0)
263 
264 #if !XNU_TARGET_OS_OSX
265 static boolean_t apn_fallbk_enabled = TRUE;
266 
267 SYSCTL_DECL(_net_inet);
268 SYSCTL_NODE(_net_inet, OID_AUTO, apn_fallback, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "APN Fallback");
269 SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_LOCKED,
270     &apn_fallbk_enabled, 0, "APN fallback enable");
271 SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
272     &apn_fallbk_debug, 0, "APN fallback debug enable");
273 #else /* XNU_TARGET_OS_OSX */
274 static boolean_t apn_fallbk_enabled = FALSE;
275 #endif /* XNU_TARGET_OS_OSX */
276 
277 extern int      udp_use_randomport;
278 extern int      tcp_use_randomport;
279 
280 /* Structs used for flowhash computation */
281 struct inp_flowhash_key_addr {
282 	union {
283 		struct in_addr  v4;
284 		struct in6_addr v6;
285 		u_int8_t        addr8[16];
286 		u_int16_t       addr16[8];
287 		u_int32_t       addr32[4];
288 	} infha;
289 };
290 
291 struct inp_flowhash_key {
292 	struct inp_flowhash_key_addr    infh_laddr;
293 	struct inp_flowhash_key_addr    infh_faddr;
294 	u_int32_t                       infh_lport;
295 	u_int32_t                       infh_fport;
296 	u_int32_t                       infh_af;
297 	u_int32_t                       infh_proto;
298 	u_int32_t                       infh_rand1;
299 	u_int32_t                       infh_rand2;
300 };
301 
302 #if !SKYWALK
303 static u_int32_t inp_hash_seed = 0;
304 #endif /* !SKYWALK */
305 
306 static int infc_cmp(const struct inpcb *, const struct inpcb *);
307 
308 /* Flags used by inp_fc_getinp */
309 #define INPFC_SOLOCKED  0x1
310 #define INPFC_REMOVE    0x2
311 static struct inpcb *inp_fc_getinp(u_int32_t, u_int32_t);
312 
313 static void inp_fc_feedback(struct inpcb *);
314 extern void tcp_remove_from_time_wait(struct inpcb *inp);
315 
316 static LCK_MTX_DECLARE_ATTR(inp_fc_lck, &inpcb_lock_grp, &inpcb_lock_attr);
317 
318 RB_HEAD(inp_fc_tree, inpcb) inp_fc_tree;
319 RB_PROTOTYPE(inp_fc_tree, inpcb, infc_link, infc_cmp);
320 RB_GENERATE(inp_fc_tree, inpcb, infc_link, infc_cmp);
321 
322 /*
323  * Use this inp as a key to find an inp in the flowhash tree.
324  * Accesses to it are protected by inp_fc_lck.
325  */
326 struct inpcb key_inp;
327 
328 /*
329  * in_pcb.c: manage the Protocol Control Blocks.
330  */
331 
332 void
in_pcbinit(void)333 in_pcbinit(void)
334 {
335 	static int inpcb_initialized = 0;
336 	uint32_t logging_config;
337 
338 	VERIFY(!inpcb_initialized);
339 	inpcb_initialized = 1;
340 
341 	logging_config = atm_get_diagnostic_config();
342 	if (logging_config & 0x80000000) {
343 		inp_log_privacy = 1;
344 	}
345 
346 	inpcb_thread_call = thread_call_allocate_with_priority(inpcb_timeout,
347 	    NULL, THREAD_CALL_PRIORITY_KERNEL);
348 	/* Give it an arg so that we know that this is the fast timer */
349 	inpcb_fast_thread_call = thread_call_allocate_with_priority(
350 		inpcb_timeout, &inpcb_timeout, THREAD_CALL_PRIORITY_KERNEL);
351 	if (inpcb_thread_call == NULL || inpcb_fast_thread_call == NULL) {
352 		panic("unable to alloc the inpcb thread call");
353 	}
354 
355 	/*
356 	 * Initialize data structures required to deliver
357 	 * flow advisories.
358 	 */
359 	lck_mtx_lock(&inp_fc_lck);
360 	RB_INIT(&inp_fc_tree);
361 	bzero(&key_inp, sizeof(key_inp));
362 	lck_mtx_unlock(&inp_fc_lck);
363 }
364 
365 #define INPCB_HAVE_TIMER_REQ(req)       (((req).intimer_lazy > 0) || \
366 	((req).intimer_fast > 0) || ((req).intimer_nodelay > 0))
367 static void
inpcb_timeout(void * arg0,void * arg1)368 inpcb_timeout(void *arg0, void *arg1)
369 {
370 #pragma unused(arg1)
371 	struct inpcbinfo *ipi;
372 	boolean_t t, gc;
373 	struct intimercount gccnt, tmcnt;
374 
375 	/*
376 	 * Update coarse-grained networking timestamp (in sec.); the idea
377 	 * is to piggy-back on the timeout callout to update the counter
378 	 * returnable via net_uptime().
379 	 */
380 	net_update_uptime();
381 
382 	bzero(&gccnt, sizeof(gccnt));
383 	bzero(&tmcnt, sizeof(tmcnt));
384 
385 	lck_mtx_lock_spin(&inpcb_timeout_lock);
386 	gc = inpcb_garbage_collecting;
387 	inpcb_garbage_collecting = FALSE;
388 
389 	t = inpcb_ticking;
390 	inpcb_ticking = FALSE;
391 
392 	if (gc || t) {
393 		lck_mtx_unlock(&inpcb_timeout_lock);
394 
395 		lck_mtx_lock(&inpcb_lock);
396 		TAILQ_FOREACH(ipi, &inpcb_head, ipi_entry) {
397 			if (INPCB_HAVE_TIMER_REQ(ipi->ipi_gc_req)) {
398 				bzero(&ipi->ipi_gc_req,
399 				    sizeof(ipi->ipi_gc_req));
400 				if (gc && ipi->ipi_gc != NULL) {
401 					ipi->ipi_gc(ipi);
402 					gccnt.intimer_lazy +=
403 					    ipi->ipi_gc_req.intimer_lazy;
404 					gccnt.intimer_fast +=
405 					    ipi->ipi_gc_req.intimer_fast;
406 					gccnt.intimer_nodelay +=
407 					    ipi->ipi_gc_req.intimer_nodelay;
408 				}
409 			}
410 			if (INPCB_HAVE_TIMER_REQ(ipi->ipi_timer_req)) {
411 				bzero(&ipi->ipi_timer_req,
412 				    sizeof(ipi->ipi_timer_req));
413 				if (t && ipi->ipi_timer != NULL) {
414 					ipi->ipi_timer(ipi);
415 					tmcnt.intimer_lazy +=
416 					    ipi->ipi_timer_req.intimer_lazy;
417 					tmcnt.intimer_fast +=
418 					    ipi->ipi_timer_req.intimer_fast;
419 					tmcnt.intimer_nodelay +=
420 					    ipi->ipi_timer_req.intimer_nodelay;
421 				}
422 			}
423 		}
424 		lck_mtx_unlock(&inpcb_lock);
425 		lck_mtx_lock_spin(&inpcb_timeout_lock);
426 	}
427 
428 	/* lock was dropped above, so check first before overriding */
429 	if (!inpcb_garbage_collecting) {
430 		inpcb_garbage_collecting = INPCB_HAVE_TIMER_REQ(gccnt);
431 	}
432 	if (!inpcb_ticking) {
433 		inpcb_ticking = INPCB_HAVE_TIMER_REQ(tmcnt);
434 	}
435 
436 	/* arg0 will be set if we are the fast timer */
437 	if (arg0 != NULL) {
438 		inpcb_fast_timer_on = FALSE;
439 	}
440 	inpcb_timeout_run--;
441 	VERIFY(inpcb_timeout_run >= 0 && inpcb_timeout_run < 2);
442 
443 	/* re-arm the timer if there's work to do */
444 	if (gccnt.intimer_nodelay > 0 || tmcnt.intimer_nodelay > 0) {
445 		inpcb_sched_timeout();
446 	} else if ((gccnt.intimer_fast + tmcnt.intimer_fast) <= 5) {
447 		/* be lazy when idle with little activity */
448 		inpcb_sched_lazy_timeout();
449 	} else {
450 		inpcb_sched_timeout();
451 	}
452 
453 	lck_mtx_unlock(&inpcb_timeout_lock);
454 }
455 
456 static void
inpcb_sched_timeout(void)457 inpcb_sched_timeout(void)
458 {
459 	_inpcb_sched_timeout(0);
460 }
461 
462 static void
inpcb_sched_lazy_timeout(void)463 inpcb_sched_lazy_timeout(void)
464 {
465 	_inpcb_sched_timeout(inpcb_timeout_lazy);
466 }
467 
468 static void
_inpcb_sched_timeout(unsigned int offset)469 _inpcb_sched_timeout(unsigned int offset)
470 {
471 	uint64_t deadline, leeway;
472 
473 	clock_interval_to_deadline(1, NSEC_PER_SEC, &deadline);
474 	LCK_MTX_ASSERT(&inpcb_timeout_lock, LCK_MTX_ASSERT_OWNED);
475 	if (inpcb_timeout_run == 0 &&
476 	    (inpcb_garbage_collecting || inpcb_ticking)) {
477 		lck_mtx_convert_spin(&inpcb_timeout_lock);
478 		inpcb_timeout_run++;
479 		if (offset == 0) {
480 			inpcb_fast_timer_on = TRUE;
481 			thread_call_enter_delayed(inpcb_fast_thread_call,
482 			    deadline);
483 		} else {
484 			inpcb_fast_timer_on = FALSE;
485 			clock_interval_to_absolutetime_interval(offset,
486 			    NSEC_PER_SEC, &leeway);
487 			thread_call_enter_delayed_with_leeway(
488 				inpcb_thread_call, NULL, deadline, leeway,
489 				THREAD_CALL_DELAY_LEEWAY);
490 		}
491 	} else if (inpcb_timeout_run == 1 &&
492 	    offset == 0 && !inpcb_fast_timer_on) {
493 		/*
494 		 * Since the request was for a fast timer but the
495 		 * scheduled timer is a lazy timer, try to schedule
496 		 * another instance of fast timer also.
497 		 */
498 		lck_mtx_convert_spin(&inpcb_timeout_lock);
499 		inpcb_timeout_run++;
500 		inpcb_fast_timer_on = TRUE;
501 		thread_call_enter_delayed(inpcb_fast_thread_call, deadline);
502 	}
503 }
504 
505 void
inpcb_gc_sched(struct inpcbinfo * ipi,u_int32_t type)506 inpcb_gc_sched(struct inpcbinfo *ipi, u_int32_t type)
507 {
508 	u_int32_t gccnt;
509 
510 	lck_mtx_lock_spin(&inpcb_timeout_lock);
511 	inpcb_garbage_collecting = TRUE;
512 	gccnt = ipi->ipi_gc_req.intimer_nodelay +
513 	    ipi->ipi_gc_req.intimer_fast;
514 
515 	if (gccnt > INPCB_GCREQ_THRESHOLD) {
516 		type = INPCB_TIMER_FAST;
517 	}
518 
519 	switch (type) {
520 	case INPCB_TIMER_NODELAY:
521 		os_atomic_inc(&ipi->ipi_gc_req.intimer_nodelay, relaxed);
522 		inpcb_sched_timeout();
523 		break;
524 	case INPCB_TIMER_FAST:
525 		os_atomic_inc(&ipi->ipi_gc_req.intimer_fast, relaxed);
526 		inpcb_sched_timeout();
527 		break;
528 	default:
529 		os_atomic_inc(&ipi->ipi_gc_req.intimer_lazy, relaxed);
530 		inpcb_sched_lazy_timeout();
531 		break;
532 	}
533 	lck_mtx_unlock(&inpcb_timeout_lock);
534 }
535 
536 void
inpcb_timer_sched(struct inpcbinfo * ipi,u_int32_t type)537 inpcb_timer_sched(struct inpcbinfo *ipi, u_int32_t type)
538 {
539 	lck_mtx_lock_spin(&inpcb_timeout_lock);
540 	inpcb_ticking = TRUE;
541 	switch (type) {
542 	case INPCB_TIMER_NODELAY:
543 		os_atomic_inc(&ipi->ipi_timer_req.intimer_nodelay, relaxed);
544 		inpcb_sched_timeout();
545 		break;
546 	case INPCB_TIMER_FAST:
547 		os_atomic_inc(&ipi->ipi_timer_req.intimer_fast, relaxed);
548 		inpcb_sched_timeout();
549 		break;
550 	default:
551 		os_atomic_inc(&ipi->ipi_timer_req.intimer_lazy, relaxed);
552 		inpcb_sched_lazy_timeout();
553 		break;
554 	}
555 	lck_mtx_unlock(&inpcb_timeout_lock);
556 }
557 
558 void
in_pcbinfo_attach(struct inpcbinfo * ipi)559 in_pcbinfo_attach(struct inpcbinfo *ipi)
560 {
561 	struct inpcbinfo *ipi0;
562 
563 	lck_mtx_lock(&inpcb_lock);
564 	TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
565 		if (ipi0 == ipi) {
566 			panic("%s: ipi %p already in the list",
567 			    __func__, ipi);
568 			/* NOTREACHED */
569 		}
570 	}
571 	TAILQ_INSERT_TAIL(&inpcb_head, ipi, ipi_entry);
572 	lck_mtx_unlock(&inpcb_lock);
573 }
574 
575 int
in_pcbinfo_detach(struct inpcbinfo * ipi)576 in_pcbinfo_detach(struct inpcbinfo *ipi)
577 {
578 	struct inpcbinfo *ipi0;
579 	int error = 0;
580 
581 	lck_mtx_lock(&inpcb_lock);
582 	TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
583 		if (ipi0 == ipi) {
584 			break;
585 		}
586 	}
587 	if (ipi0 != NULL) {
588 		TAILQ_REMOVE(&inpcb_head, ipi0, ipi_entry);
589 	} else {
590 		error = ENXIO;
591 	}
592 	lck_mtx_unlock(&inpcb_lock);
593 
594 	return error;
595 }
596 
597 __attribute__((noinline))
598 char *
inp_snprintf_tuple(struct inpcb * inp,char * __sized_by (buflen)buf,size_t buflen)599 inp_snprintf_tuple(struct inpcb *inp, char *__sized_by(buflen) buf, size_t buflen)
600 {
601 	char laddrstr[MAX_IPv6_STR_LEN];
602 	char faddrstr[MAX_IPv6_STR_LEN];
603 	uint16_t lport = 0;
604 	uint16_t fport = 0;
605 	uint16_t proto = IPPROTO_IP;
606 
607 	if (inp->inp_socket != NULL) {
608 		proto = SOCK_PROTO(inp->inp_socket);
609 
610 		if (proto == IPPROTO_TCP || proto == IPPROTO_UDP) {
611 			lport  = inp->inp_lport;
612 			fport = inp->inp_fport;
613 		}
614 	}
615 	if (inp->inp_vflag & INP_IPV4) {
616 		inet_ntop(AF_INET, (void *)&inp->inp_laddr.s_addr, laddrstr, sizeof(laddrstr));
617 		inet_ntop(AF_INET, (void *)&inp->inp_faddr.s_addr, faddrstr, sizeof(faddrstr));
618 	} else if (inp->inp_vflag & INP_IPV6) {
619 		inet_ntop(AF_INET6, (void *)&inp->in6p_faddr, laddrstr, sizeof(laddrstr));
620 		inet_ntop(AF_INET6, (void *)&inp->in6p_faddr, faddrstr, sizeof(faddrstr));
621 	}
622 	snprintf(buf, buflen, "[%u %s:%u %s:%u]",
623 	    proto, laddrstr, ntohs(lport), faddrstr, ntohs(fport));
624 
625 	return buf;
626 }
627 
628 __attribute__((noinline))
629 void
in_pcb_check_management_entitled(struct inpcb * inp)630 in_pcb_check_management_entitled(struct inpcb *inp)
631 {
632 	if (inp->inp_flags2 & INP2_MANAGEMENT_CHECKED) {
633 		return;
634 	}
635 
636 	if (management_data_unrestricted) {
637 		inp->inp_flags2 |= INP2_MANAGEMENT_ALLOWED;
638 		inp->inp_flags2 |= INP2_MANAGEMENT_CHECKED;
639 	} else if (if_management_interface_check_needed == true) {
640 		inp->inp_flags2 |= INP2_MANAGEMENT_CHECKED;
641 		/*
642 		 * Note that soopt_cred_check check both intcoproc entitlements
643 		 * We check MANAGEMENT_DATA_ENTITLEMENT as there is no corresponding PRIV value
644 		 */
645 		if (soopt_cred_check(inp->inp_socket, PRIV_NET_RESTRICTED_INTCOPROC, false, false) == 0
646 		    || IOCurrentTaskHasEntitlement(MANAGEMENT_DATA_ENTITLEMENT) == true
647 #if DEBUG || DEVELOPMENT
648 		    || IOCurrentTaskHasEntitlement(MANAGEMENT_DATA_ENTITLEMENT_DEVELOPMENT) == true
649 #endif /* DEBUG || DEVELOPMENT */
650 		    ) {
651 			inp->inp_flags2 |= INP2_MANAGEMENT_ALLOWED;
652 		} else {
653 			if (__improbable(if_management_verbose > 1)) {
654 				char buf[128];
655 
656 				os_log(OS_LOG_DEFAULT, "in_pcb_check_management_entitled %s:%d not management entitled %s",
657 				    proc_best_name(current_proc()),
658 				    proc_selfpid(),
659 				    inp_snprintf_tuple(inp, buf, sizeof(buf)));
660 			}
661 		}
662 	}
663 }
664 
665 __attribute__((noinline))
666 void
in_pcb_check_ultra_constrained_entitled(struct inpcb * inp)667 in_pcb_check_ultra_constrained_entitled(struct inpcb *inp)
668 {
669 	if (inp->inp_flags2 & INP2_ULTRA_CONSTRAINED_CHECKED) {
670 		return;
671 	}
672 
673 	if (if_ultra_constrained_check_needed) {
674 		inp->inp_flags2 |= INP2_ULTRA_CONSTRAINED_CHECKED;
675 		if (IOCurrentTaskHasEntitlement(ULTRA_CONSTRAINED_ENTITLEMENT)) {
676 			inp->inp_flags2 |= INP2_ULTRA_CONSTRAINED_ALLOWED;
677 		}
678 	}
679 }
680 
681 /*
682  * Allocate a PCB and associate it with the socket.
683  *
684  * Returns:	0			Success
685  *		ENOBUFS
686  *		ENOMEM
687  */
688 int
in_pcballoc(struct socket * so,struct inpcbinfo * pcbinfo,struct proc * p)689 in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p)
690 {
691 #pragma unused(p)
692 	struct inpcb *inp;
693 	caddr_t temp;
694 
695 	if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
696 		void *__unsafe_indexable addr = __zalloc_flags(pcbinfo->ipi_zone,
697 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
698 		__builtin_assume(addr != NULL);
699 		/*
700 		 * N.B: the allocation above may actually be inp_tp
701 		 * which is a structure that includes inpcb, but for
702 		 * the purposes of this function we just touch
703 		 * struct inpcb.
704 		 */
705 		inp = __unsafe_forge_single(struct inpcb *, addr);
706 	} else {
707 		inp = (struct inpcb *)(void *)so->so_saved_pcb;
708 		temp = inp->inp_saved_ppcb;
709 		bzero((caddr_t)inp, sizeof(*inp));
710 		inp->inp_saved_ppcb = temp;
711 	}
712 
713 	inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
714 	inp->inp_pcbinfo = pcbinfo;
715 	inp->inp_socket = so;
716 #define INP_ALIGN_AND_CAST(_type, _ptr) ({                                \
717 	typeof((_type)(void *__header_bidi_indexable)NULL) __roundup_type;\
718 	const volatile char *__roundup_align_ptr = (const volatile char *)(_ptr); \
719 	__roundup_align_ptr += P2ROUNDUP((uintptr_t)__roundup_align_ptr,  \
720 	                                 _Alignof(typeof(*__roundup_type))) - (uintptr_t)__roundup_align_ptr; \
721 	__DEQUALIFY(_type, __roundup_align_ptr);                          \
722 })
723 	/* make sure inp_stat is always 64-bit aligned */
724 	inp->inp_stat = INP_ALIGN_AND_CAST(struct inp_stat *, inp->inp_stat_store);
725 	if (((uintptr_t)inp->inp_stat - (uintptr_t)inp->inp_stat_store) +
726 	    sizeof(*inp->inp_stat) > sizeof(inp->inp_stat_store)) {
727 		panic("%s: insufficient space to align inp_stat", __func__);
728 		/* NOTREACHED */
729 	}
730 
731 	/* make sure inp_cstat is always 64-bit aligned */
732 	inp->inp_cstat = INP_ALIGN_AND_CAST(struct inp_stat *, inp->inp_cstat_store);
733 	if (((uintptr_t)inp->inp_cstat - (uintptr_t)inp->inp_cstat_store) +
734 	    sizeof(*inp->inp_cstat) > sizeof(inp->inp_cstat_store)) {
735 		panic("%s: insufficient space to align inp_cstat", __func__);
736 		/* NOTREACHED */
737 	}
738 
739 	/* make sure inp_wstat is always 64-bit aligned */
740 	inp->inp_wstat = INP_ALIGN_AND_CAST(struct inp_stat *, inp->inp_wstat_store);
741 	if (((uintptr_t)inp->inp_wstat - (uintptr_t)inp->inp_wstat_store) +
742 	    sizeof(*inp->inp_wstat) > sizeof(inp->inp_wstat_store)) {
743 		panic("%s: insufficient space to align inp_wstat", __func__);
744 		/* NOTREACHED */
745 	}
746 
747 	/* make sure inp_Wstat is always 64-bit aligned */
748 	inp->inp_Wstat = INP_ALIGN_AND_CAST(struct inp_stat *, inp->inp_Wstat_store);
749 	if (((uintptr_t)inp->inp_Wstat - (uintptr_t)inp->inp_Wstat_store) +
750 	    sizeof(*inp->inp_Wstat) > sizeof(inp->inp_Wstat_store)) {
751 		panic("%s: insufficient space to align inp_Wstat", __func__);
752 		/* NOTREACHED */
753 	}
754 
755 	/* make sure inp_btstat is always 64-bit aligned */
756 	inp->inp_btstat = INP_ALIGN_AND_CAST(struct inp_stat *, inp->inp_btstat_store);
757 	if (((uintptr_t)inp->inp_btstat - (uintptr_t)inp->inp_btstat_store) +
758 	    sizeof(*inp->inp_btstat) > sizeof(inp->inp_btstat_store)) {
759 		panic("%s: insufficient space to align inp_btstat", __func__);
760 		/* NOTREACHED */
761 	}
762 #undef INP_ALIGN_AND_CAST
763 	so->so_pcb = (caddr_t)inp;
764 
765 	if (so->so_proto->pr_flags & PR_PCBLOCK) {
766 		lck_mtx_init(&inp->inpcb_mtx, pcbinfo->ipi_lock_grp,
767 		    &pcbinfo->ipi_lock_attr);
768 	}
769 
770 	if (SOCK_DOM(so) == PF_INET6 && !ip6_mapped_addr_on) {
771 		inp->inp_flags |= IN6P_IPV6_V6ONLY;
772 	}
773 
774 	if (ip6_auto_flowlabel) {
775 		inp->inp_flags |= IN6P_AUTOFLOWLABEL;
776 	}
777 	if (intcoproc_unrestricted) {
778 		inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
779 	}
780 
781 	(void) inp_update_policy(inp);
782 
783 	lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
784 	inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
785 	LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
786 	pcbinfo->ipi_count++;
787 	lck_rw_done(&pcbinfo->ipi_lock);
788 	return 0;
789 }
790 
791 /*
792  * in_pcblookup_local_and_cleanup does everything
793  * in_pcblookup_local does but it checks for a socket
794  * that's going away. Since we know that the lock is
795  * held read+write when this function is called, we
796  * can safely dispose of this socket like the slow
797  * timer would usually do and return NULL. This is
798  * great for bind.
799  */
800 struct inpcb *
in_pcblookup_local_and_cleanup(struct inpcbinfo * pcbinfo,struct in_addr laddr,u_int lport_arg,int wild_okay)801 in_pcblookup_local_and_cleanup(struct inpcbinfo *pcbinfo, struct in_addr laddr,
802     u_int lport_arg, int wild_okay)
803 {
804 	struct inpcb *inp;
805 
806 	/* Perform normal lookup */
807 	inp = in_pcblookup_local(pcbinfo, laddr, lport_arg, wild_okay);
808 
809 	/* Check if we found a match but it's waiting to be disposed */
810 	if (inp != NULL && inp->inp_wantcnt == WNT_STOPUSING) {
811 		struct socket *so = inp->inp_socket;
812 
813 		socket_lock(so, 0);
814 
815 		if (so->so_usecount == 0) {
816 			if (inp->inp_state != INPCB_STATE_DEAD) {
817 				in_pcbdetach(inp);
818 			}
819 			in_pcbdispose(inp);     /* will unlock & destroy */
820 			inp = NULL;
821 		} else {
822 			socket_unlock(so, 0);
823 		}
824 	}
825 
826 	return inp;
827 }
828 
829 static void
in_pcb_conflict_post_msg(u_int16_t port)830 in_pcb_conflict_post_msg(u_int16_t port)
831 {
832 	/*
833 	 * Radar 5523020 send a kernel event notification if a
834 	 * non-participating socket tries to bind the port a socket
835 	 * who has set SOF_NOTIFYCONFLICT owns.
836 	 */
837 	struct kev_msg ev_msg;
838 	struct kev_in_portinuse in_portinuse;
839 
840 	bzero(&in_portinuse, sizeof(struct kev_in_portinuse));
841 	bzero(&ev_msg, sizeof(struct kev_msg));
842 	in_portinuse.port = ntohs(port);        /* port in host order */
843 	in_portinuse.req_pid = proc_selfpid();
844 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
845 	ev_msg.kev_class = KEV_NETWORK_CLASS;
846 	ev_msg.kev_subclass = KEV_INET_SUBCLASS;
847 	ev_msg.event_code = KEV_INET_PORTINUSE;
848 	ev_msg.dv[0].data_ptr = &in_portinuse;
849 	ev_msg.dv[0].data_length = sizeof(struct kev_in_portinuse);
850 	ev_msg.dv[1].data_length = 0;
851 	dlil_post_complete_msg(NULL, &ev_msg);
852 }
853 
854 /*
855  * Bind an INPCB to an address and/or port.  This routine should not alter
856  * the caller-supplied local address "nam" or remote address "remote".
857  *
858  * Returns:	0			Success
859  *		EADDRNOTAVAIL		Address not available.
860  *		EINVAL			Invalid argument
861  *		EAFNOSUPPORT		Address family not supported [notdef]
862  *		EACCES			Permission denied
863  *		EADDRINUSE		Address in use
864  *		EAGAIN			Resource unavailable, try again
865  *		priv_check_cred:EPERM	Operation not permitted
866  */
867 int
in_pcbbind(struct inpcb * inp,struct sockaddr * nam,struct sockaddr * remote,struct proc * p)868 in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct sockaddr *remote, struct proc *p)
869 {
870 	struct socket *so = inp->inp_socket;
871 	unsigned short *lastport;
872 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
873 	u_short lport = 0, rand_port = 0;
874 	int wild = 0;
875 	int reuseport = (so->so_options & SO_REUSEPORT);
876 	int error = 0;
877 	int randomport;
878 	int conflict = 0;
879 	boolean_t anonport = FALSE;
880 	kauth_cred_t cred;
881 	struct in_addr laddr;
882 	struct ifnet *outif = NULL;
883 
884 	ASSERT((inp->inp_flags2 & INP2_BIND_IN_PROGRESS) != 0);
885 
886 	if (TAILQ_EMPTY(&in_ifaddrhead)) { /* XXX broken! */
887 		error = EADDRNOTAVAIL;
888 		goto done;
889 	}
890 	if (!(so->so_options & (SO_REUSEADDR | SO_REUSEPORT))) {
891 		wild = 1;
892 	}
893 
894 	bzero(&laddr, sizeof(laddr));
895 
896 	socket_unlock(so, 0); /* keep reference on socket */
897 	lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
898 	if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
899 		/* another thread completed the bind */
900 		lck_rw_done(&pcbinfo->ipi_lock);
901 		socket_lock(so, 0);
902 		error = EINVAL;
903 		goto done;
904 	}
905 
906 	if (nam != NULL) {
907 		if (nam->sa_len != sizeof(struct sockaddr_in)) {
908 			lck_rw_done(&pcbinfo->ipi_lock);
909 			socket_lock(so, 0);
910 			error = EINVAL;
911 			goto done;
912 		}
913 #if 0
914 		/*
915 		 * We should check the family, but old programs
916 		 * incorrectly fail to initialize it.
917 		 */
918 		if (nam->sa_family != AF_INET) {
919 			lck_rw_done(&pcbinfo->ipi_lock);
920 			socket_lock(so, 0);
921 			error = EAFNOSUPPORT;
922 			goto done;
923 		}
924 #endif /* 0 */
925 		lport = SIN(nam)->sin_port;
926 
927 		if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr))) {
928 			/*
929 			 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
930 			 * allow complete duplication of binding if
931 			 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
932 			 * and a multicast address is bound on both
933 			 * new and duplicated sockets.
934 			 */
935 			if (so->so_options & SO_REUSEADDR) {
936 				reuseport = SO_REUSEADDR | SO_REUSEPORT;
937 			}
938 		} else if (SIN(nam)->sin_addr.s_addr != INADDR_ANY) {
939 			struct sockaddr_in sin;
940 			struct ifaddr *ifa;
941 
942 			/* Sanitized for interface address searches */
943 			SOCKADDR_ZERO(&sin, sizeof(sin));
944 			sin.sin_family = AF_INET;
945 			sin.sin_len = sizeof(struct sockaddr_in);
946 			sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
947 
948 			ifa = ifa_ifwithaddr(SA(&sin));
949 			if (ifa == NULL) {
950 				lck_rw_done(&pcbinfo->ipi_lock);
951 				socket_lock(so, 0);
952 				error = EADDRNOTAVAIL;
953 				goto done;
954 			} else {
955 				/*
956 				 * Opportunistically determine the outbound
957 				 * interface that may be used; this may not
958 				 * hold true if we end up using a route
959 				 * going over a different interface, e.g.
960 				 * when sending to a local address.  This
961 				 * will get updated again after sending.
962 				 */
963 				IFA_LOCK(ifa);
964 				outif = ifa->ifa_ifp;
965 				IFA_UNLOCK(ifa);
966 				ifa_remref(ifa);
967 			}
968 		}
969 
970 #if SKYWALK
971 		if (inp->inp_flags2 & INP2_EXTERNAL_PORT) {
972 			// Extract the external flow info
973 			struct ns_flow_info nfi = {};
974 			error = necp_client_get_netns_flow_info(inp->necp_client_uuid,
975 			    &nfi);
976 			if (error != 0) {
977 				lck_rw_done(&pcbinfo->ipi_lock);
978 				socket_lock(so, 0);
979 				goto done;
980 			}
981 
982 			// Extract the reserved port
983 			u_int16_t reserved_lport = 0;
984 			if (nfi.nfi_laddr.sa.sa_family == AF_INET) {
985 				reserved_lport = nfi.nfi_laddr.sin.sin_port;
986 			} else if (nfi.nfi_laddr.sa.sa_family == AF_INET6) {
987 				reserved_lport = nfi.nfi_laddr.sin6.sin6_port;
988 			} else {
989 				lck_rw_done(&pcbinfo->ipi_lock);
990 				socket_lock(so, 0);
991 				error = EINVAL;
992 				goto done;
993 			}
994 
995 			// Validate or use the reserved port
996 			if (lport == 0) {
997 				lport = reserved_lport;
998 			} else if (lport != reserved_lport) {
999 				lck_rw_done(&pcbinfo->ipi_lock);
1000 				socket_lock(so, 0);
1001 				error = EINVAL;
1002 				goto done;
1003 			}
1004 		}
1005 
1006 		/* Do not allow reserving a UDP port if remaining UDP port count is below 4096 */
1007 		if (SOCK_PROTO(so) == IPPROTO_UDP && !allow_udp_port_exhaustion) {
1008 			uint32_t current_reservations = 0;
1009 			if (inp->inp_vflag & INP_IPV6) {
1010 				current_reservations = netns_lookup_reservations_count_in6(inp->in6p_laddr, IPPROTO_UDP);
1011 			} else {
1012 				current_reservations = netns_lookup_reservations_count_in(inp->inp_laddr, IPPROTO_UDP);
1013 			}
1014 			if (USHRT_MAX - UDP_RANDOM_PORT_RESERVE < current_reservations) {
1015 				log(LOG_ERR, "UDP port not available, less than 4096 UDP ports left");
1016 				lck_rw_done(&pcbinfo->ipi_lock);
1017 				socket_lock(so, 0);
1018 				error = EADDRNOTAVAIL;
1019 				goto done;
1020 			}
1021 		}
1022 
1023 #endif /* SKYWALK */
1024 
1025 		if (lport != 0) {
1026 			struct inpcb *t;
1027 			uid_t u;
1028 
1029 #if XNU_TARGET_OS_OSX
1030 			if (ntohs(lport) < IPPORT_RESERVED &&
1031 			    SIN(nam)->sin_addr.s_addr != 0 &&
1032 			    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
1033 				cred = kauth_cred_proc_ref(p);
1034 				error = priv_check_cred(cred,
1035 				    PRIV_NETINET_RESERVEDPORT, 0);
1036 				kauth_cred_unref(&cred);
1037 				if (error != 0) {
1038 					lck_rw_done(&pcbinfo->ipi_lock);
1039 					socket_lock(so, 0);
1040 					error = EACCES;
1041 					goto done;
1042 				}
1043 			}
1044 #endif /* XNU_TARGET_OS_OSX */
1045 			/*
1046 			 * Check wether the process is allowed to bind to a restricted port
1047 			 */
1048 			if (!current_task_can_use_restricted_in_port(lport,
1049 			    (uint8_t)SOCK_PROTO(so), PORT_FLAGS_BSD)) {
1050 				lck_rw_done(&pcbinfo->ipi_lock);
1051 				socket_lock(so, 0);
1052 				error = EADDRINUSE;
1053 				goto done;
1054 			}
1055 
1056 			if (!IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
1057 			    (u = kauth_cred_getuid(so->so_cred)) != 0 &&
1058 			    (t = in_pcblookup_local_and_cleanup(
1059 				    inp->inp_pcbinfo, SIN(nam)->sin_addr, lport,
1060 				    INPLOOKUP_WILDCARD)) != NULL &&
1061 			    (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
1062 			    t->inp_laddr.s_addr != INADDR_ANY ||
1063 			    !(t->inp_socket->so_options & SO_REUSEPORT)) &&
1064 			    (u != kauth_cred_getuid(t->inp_socket->so_cred)) &&
1065 			    !(t->inp_socket->so_flags & SOF_REUSESHAREUID) &&
1066 			    (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
1067 			    t->inp_laddr.s_addr != INADDR_ANY) &&
1068 			    (!(t->inp_flags2 & INP2_EXTERNAL_PORT) ||
1069 			    !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
1070 			    uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) {
1071 				if ((t->inp_socket->so_flags &
1072 				    SOF_NOTIFYCONFLICT) &&
1073 				    !(so->so_flags & SOF_NOTIFYCONFLICT)) {
1074 					conflict = 1;
1075 				}
1076 
1077 				lck_rw_done(&pcbinfo->ipi_lock);
1078 
1079 				if (conflict) {
1080 					in_pcb_conflict_post_msg(lport);
1081 				}
1082 
1083 				socket_lock(so, 0);
1084 				error = EADDRINUSE;
1085 				goto done;
1086 			}
1087 			t = in_pcblookup_local_and_cleanup(pcbinfo,
1088 			    SIN(nam)->sin_addr, lport, wild);
1089 			if (t != NULL &&
1090 			    (reuseport & t->inp_socket->so_options) == 0 &&
1091 			    (!(t->inp_flags2 & INP2_EXTERNAL_PORT) ||
1092 			    !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
1093 			    uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) {
1094 				if (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
1095 				    t->inp_laddr.s_addr != INADDR_ANY ||
1096 				    SOCK_DOM(so) != PF_INET6 ||
1097 				    SOCK_DOM(t->inp_socket) != PF_INET6) {
1098 					if ((t->inp_socket->so_flags &
1099 					    SOF_NOTIFYCONFLICT) &&
1100 					    !(so->so_flags & SOF_NOTIFYCONFLICT)) {
1101 						conflict = 1;
1102 					}
1103 
1104 					lck_rw_done(&pcbinfo->ipi_lock);
1105 
1106 					if (conflict) {
1107 						in_pcb_conflict_post_msg(lport);
1108 					}
1109 					socket_lock(so, 0);
1110 					error = EADDRINUSE;
1111 					goto done;
1112 				}
1113 			}
1114 #if SKYWALK
1115 			if ((SOCK_PROTO(so) == IPPROTO_TCP ||
1116 			    SOCK_PROTO(so) == IPPROTO_UDP) &&
1117 			    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
1118 				int res_err = 0;
1119 				if (inp->inp_vflag & INP_IPV6) {
1120 					res_err = netns_reserve_in6(
1121 						&inp->inp_netns_token,
1122 						SIN6(nam)->sin6_addr,
1123 						(uint8_t)SOCK_PROTO(so), lport, NETNS_BSD,
1124 						NULL);
1125 				} else {
1126 					res_err = netns_reserve_in(
1127 						&inp->inp_netns_token,
1128 						SIN(nam)->sin_addr, (uint8_t)SOCK_PROTO(so),
1129 						lport, NETNS_BSD, NULL);
1130 				}
1131 				if (res_err != 0) {
1132 					lck_rw_done(&pcbinfo->ipi_lock);
1133 					socket_lock(so, 0);
1134 					error = EADDRINUSE;
1135 					goto done;
1136 				}
1137 			}
1138 #endif /* SKYWALK */
1139 		}
1140 		laddr = SIN(nam)->sin_addr;
1141 	}
1142 	if (lport == 0) {
1143 		u_short first, last;
1144 		int count;
1145 		bool found;
1146 
1147 		/*
1148 		 * Override wild = 1 for implicit bind (mainly used by connect)
1149 		 * For implicit bind (lport == 0), we always use an unused port,
1150 		 * so REUSEADDR|REUSEPORT don't apply
1151 		 */
1152 		wild = 1;
1153 
1154 		randomport = (so->so_flags & SOF_BINDRANDOMPORT) ||
1155 		    (so->so_type == SOCK_STREAM ? tcp_use_randomport :
1156 		    udp_use_randomport);
1157 
1158 		/*
1159 		 * Even though this looks similar to the code in
1160 		 * in6_pcbsetport, the v6 vs v4 checks are different.
1161 		 */
1162 		anonport = TRUE;
1163 		if (inp->inp_flags & INP_HIGHPORT) {
1164 			first = (u_short)ipport_hifirstauto;     /* sysctl */
1165 			last  = (u_short)ipport_hilastauto;
1166 			lastport = &pcbinfo->ipi_lasthi;
1167 		} else if (inp->inp_flags & INP_LOWPORT) {
1168 			cred = kauth_cred_proc_ref(p);
1169 			error = priv_check_cred(cred,
1170 			    PRIV_NETINET_RESERVEDPORT, 0);
1171 			kauth_cred_unref(&cred);
1172 			if (error != 0) {
1173 				lck_rw_done(&pcbinfo->ipi_lock);
1174 				socket_lock(so, 0);
1175 				goto done;
1176 			}
1177 			first = (u_short)ipport_lowfirstauto;    /* 1023 */
1178 			last  = (u_short)ipport_lowlastauto;     /* 600 */
1179 			lastport = &pcbinfo->ipi_lastlow;
1180 		} else {
1181 			first = (u_short)ipport_firstauto;       /* sysctl */
1182 			last  = (u_short)ipport_lastauto;
1183 			lastport = &pcbinfo->ipi_lastport;
1184 		}
1185 		/* No point in randomizing if only one port is available */
1186 
1187 		if (first == last) {
1188 			randomport = 0;
1189 		}
1190 		/*
1191 		 * Simple check to ensure all ports are not used up causing
1192 		 * a deadlock here.
1193 		 *
1194 		 * We split the two cases (up and down) so that the direction
1195 		 * is not being tested on each round of the loop.
1196 		 */
1197 		if (first > last) {
1198 			struct in_addr lookup_addr;
1199 
1200 			/*
1201 			 * counting down
1202 			 */
1203 			if (randomport) {
1204 				read_frandom(&rand_port, sizeof(rand_port));
1205 				*lastport =
1206 				    first - (rand_port % (first - last));
1207 			}
1208 			count = first - last;
1209 
1210 			lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
1211 			    inp->inp_laddr;
1212 
1213 			found = false;
1214 			do {
1215 				if (count-- < 0) {      /* completely used? */
1216 					lck_rw_done(&pcbinfo->ipi_lock);
1217 					socket_lock(so, 0);
1218 					error = EADDRNOTAVAIL;
1219 					goto done;
1220 				}
1221 				--*lastport;
1222 				if (*lastport > first || *lastport < last) {
1223 					*lastport = first;
1224 				}
1225 				lport = htons(*lastport);
1226 
1227 				/*
1228 				 * Skip if this is a restricted port as we do not want to
1229 				 * restricted ports as ephemeral
1230 				 */
1231 				if (IS_RESTRICTED_IN_PORT(lport)) {
1232 					continue;
1233 				}
1234 
1235 				found = in_pcblookup_local_and_cleanup(pcbinfo,
1236 				    lookup_addr, lport, wild) == NULL;
1237 #if SKYWALK
1238 				if (found &&
1239 				    (SOCK_PROTO(so) == IPPROTO_TCP ||
1240 				    SOCK_PROTO(so) == IPPROTO_UDP) &&
1241 				    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
1242 					int res_err;
1243 					if (inp->inp_vflag & INP_IPV6) {
1244 						res_err = netns_reserve_in6(
1245 							&inp->inp_netns_token,
1246 							inp->in6p_laddr,
1247 							(uint8_t)SOCK_PROTO(so), lport,
1248 							NETNS_BSD, NULL);
1249 					} else {
1250 						res_err = netns_reserve_in(
1251 							&inp->inp_netns_token,
1252 							lookup_addr, (uint8_t)SOCK_PROTO(so),
1253 							lport, NETNS_BSD, NULL);
1254 					}
1255 					found = res_err == 0;
1256 				}
1257 #endif /* SKYWALK */
1258 			} while (!found);
1259 		} else {
1260 			struct in_addr lookup_addr;
1261 
1262 			/*
1263 			 * counting up
1264 			 */
1265 			if (randomport) {
1266 				read_frandom(&rand_port, sizeof(rand_port));
1267 				*lastport =
1268 				    first + (rand_port % (first - last));
1269 			}
1270 			count = last - first;
1271 
1272 			lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
1273 			    inp->inp_laddr;
1274 
1275 			found = false;
1276 			do {
1277 				if (count-- < 0) {      /* completely used? */
1278 					lck_rw_done(&pcbinfo->ipi_lock);
1279 					socket_lock(so, 0);
1280 					error = EADDRNOTAVAIL;
1281 					goto done;
1282 				}
1283 				++*lastport;
1284 				if (*lastport < first || *lastport > last) {
1285 					*lastport = first;
1286 				}
1287 				lport = htons(*lastport);
1288 
1289 				/*
1290 				 * Skip if this is a restricted port as we do not want to
1291 				 * restricted ports as ephemeral
1292 				 */
1293 				if (IS_RESTRICTED_IN_PORT(lport)) {
1294 					continue;
1295 				}
1296 
1297 				found = in_pcblookup_local_and_cleanup(pcbinfo,
1298 				    lookup_addr, lport, wild) == NULL;
1299 #if SKYWALK
1300 				if (found &&
1301 				    (SOCK_PROTO(so) == IPPROTO_TCP ||
1302 				    SOCK_PROTO(so) == IPPROTO_UDP) &&
1303 				    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
1304 					int res_err;
1305 					if (inp->inp_vflag & INP_IPV6) {
1306 						res_err = netns_reserve_in6(
1307 							&inp->inp_netns_token,
1308 							inp->in6p_laddr,
1309 							(uint8_t)SOCK_PROTO(so), lport,
1310 							NETNS_BSD, NULL);
1311 					} else {
1312 						res_err = netns_reserve_in(
1313 							&inp->inp_netns_token,
1314 							lookup_addr, (uint8_t)SOCK_PROTO(so),
1315 							lport, NETNS_BSD, NULL);
1316 					}
1317 					found = res_err == 0;
1318 				}
1319 #endif /* SKYWALK */
1320 			} while (!found);
1321 		}
1322 	}
1323 	socket_lock(so, 0);
1324 
1325 	/*
1326 	 * We unlocked socket's protocol lock for a long time.
1327 	 * The socket might have been dropped/defuncted.
1328 	 * Checking if world has changed since.
1329 	 */
1330 	if (inp->inp_state == INPCB_STATE_DEAD) {
1331 #if SKYWALK
1332 		netns_release(&inp->inp_netns_token);
1333 #endif /* SKYWALK */
1334 		lck_rw_done(&pcbinfo->ipi_lock);
1335 		error = ECONNABORTED;
1336 		goto done;
1337 	}
1338 
1339 	if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
1340 #if SKYWALK
1341 		netns_release(&inp->inp_netns_token);
1342 #endif /* SKYWALK */
1343 		lck_rw_done(&pcbinfo->ipi_lock);
1344 		error = EINVAL;
1345 		goto done;
1346 	}
1347 
1348 	if (laddr.s_addr != INADDR_ANY) {
1349 		inp->inp_laddr = laddr;
1350 		inp->inp_last_outifp = outif;
1351 #if SKYWALK
1352 		if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
1353 			netns_set_ifnet(&inp->inp_netns_token, outif);
1354 		}
1355 #endif /* SKYWALK */
1356 	}
1357 	inp->inp_lport = lport;
1358 	if (anonport) {
1359 		inp->inp_flags |= INP_ANONPORT;
1360 	}
1361 
1362 	if (in_pcbinshash(inp, remote, 1) != 0) {
1363 		inp->inp_laddr.s_addr = INADDR_ANY;
1364 		inp->inp_last_outifp = NULL;
1365 
1366 #if SKYWALK
1367 		netns_release(&inp->inp_netns_token);
1368 #endif /* SKYWALK */
1369 		inp->inp_lport = 0;
1370 		if (anonport) {
1371 			inp->inp_flags &= ~INP_ANONPORT;
1372 		}
1373 		lck_rw_done(&pcbinfo->ipi_lock);
1374 		error = EAGAIN;
1375 		goto done;
1376 	}
1377 	lck_rw_done(&pcbinfo->ipi_lock);
1378 	sflt_notify(so, sock_evt_bound, NULL);
1379 
1380 	in_pcb_check_management_entitled(inp);
1381 	in_pcb_check_ultra_constrained_entitled(inp);
1382 done:
1383 	return error;
1384 }
1385 
1386 #define APN_FALLBACK_IP_FILTER(a)       \
1387 	(IN_LINKLOCAL(ntohl((a)->sin_addr.s_addr)) || \
1388 	 IN_LOOPBACK(ntohl((a)->sin_addr.s_addr)) || \
1389 	 IN_ZERONET(ntohl((a)->sin_addr.s_addr)) || \
1390 	 IN_MULTICAST(ntohl((a)->sin_addr.s_addr)) || \
1391 	 IN_PRIVATE(ntohl((a)->sin_addr.s_addr)))
1392 
1393 #define APN_FALLBACK_NOTIF_INTERVAL     2 /* Magic Number */
1394 static uint64_t last_apn_fallback = 0;
1395 
1396 static boolean_t
apn_fallback_required(proc_t proc,struct socket * so,struct sockaddr_in * p_dstv4)1397 apn_fallback_required(proc_t proc, struct socket *so, struct sockaddr_in *p_dstv4)
1398 {
1399 	uint64_t timenow;
1400 	struct sockaddr_storage lookup_default_addr;
1401 	struct rtentry *rt = NULL;
1402 
1403 	VERIFY(proc != NULL);
1404 
1405 	if (apn_fallbk_enabled == FALSE) {
1406 		return FALSE;
1407 	}
1408 
1409 	if (proc == kernproc) {
1410 		return FALSE;
1411 	}
1412 
1413 	if (so && (so->so_options & SO_NOAPNFALLBK)) {
1414 		return FALSE;
1415 	}
1416 
1417 	timenow = net_uptime();
1418 	if ((timenow - last_apn_fallback) < APN_FALLBACK_NOTIF_INTERVAL) {
1419 		apn_fallbk_log((LOG_INFO, "APN fallback notification throttled.\n"));
1420 		return FALSE;
1421 	}
1422 
1423 	if (p_dstv4 && APN_FALLBACK_IP_FILTER(p_dstv4)) {
1424 		return FALSE;
1425 	}
1426 
1427 	/* Check if we have unscoped IPv6 default route through cellular */
1428 	bzero(&lookup_default_addr, sizeof(lookup_default_addr));
1429 	lookup_default_addr.ss_family = AF_INET6;
1430 	lookup_default_addr.ss_len = sizeof(struct sockaddr_in6);
1431 
1432 	rt = rtalloc1(SA(&lookup_default_addr), 0, 0);
1433 	if (NULL == rt) {
1434 		apn_fallbk_log((LOG_INFO, "APN fallback notification could not find "
1435 		    "unscoped default IPv6 route.\n"));
1436 		return FALSE;
1437 	}
1438 
1439 	if (!IFNET_IS_CELLULAR(rt->rt_ifp)) {
1440 		rtfree(rt);
1441 		apn_fallbk_log((LOG_INFO, "APN fallback notification could not find "
1442 		    "unscoped default IPv6 route through cellular interface.\n"));
1443 		return FALSE;
1444 	}
1445 
1446 	/*
1447 	 * We have a default IPv6 route, ensure that
1448 	 * we do not have IPv4 default route before triggering
1449 	 * the event
1450 	 */
1451 	rtfree(rt);
1452 	rt = NULL;
1453 
1454 	bzero(&lookup_default_addr, sizeof(lookup_default_addr));
1455 	lookup_default_addr.ss_family = AF_INET;
1456 	lookup_default_addr.ss_len = sizeof(struct sockaddr_in);
1457 
1458 	rt = rtalloc1(SA(&lookup_default_addr), 0, 0);
1459 
1460 	if (rt) {
1461 		rtfree(rt);
1462 		rt = NULL;
1463 		apn_fallbk_log((LOG_INFO, "APN fallback notification found unscoped "
1464 		    "IPv4 default route!\n"));
1465 		return FALSE;
1466 	}
1467 
1468 	{
1469 		/*
1470 		 * We disable APN fallback if the binary is not a third-party app.
1471 		 * Note that platform daemons use their process name as a
1472 		 * bundle ID so we filter out bundle IDs without dots.
1473 		 */
1474 		const char *__null_terminated bundle_id = cs_identity_get(proc);
1475 		if (bundle_id == NULL ||
1476 		    bundle_id[0] == '\0' ||
1477 		    strchr(bundle_id, '.') == NULL ||
1478 		    strlcmp("com.apple.", bundle_id, sizeof("com.apple.") - 1) == 0) {
1479 			apn_fallbk_log((LOG_INFO, "Abort: APN fallback notification found first-"
1480 			    "party bundle ID \"%s\"!\n", (bundle_id ? bundle_id : "NULL")));
1481 			return FALSE;
1482 		}
1483 	}
1484 
1485 	{
1486 		/*
1487 		 * The Apple App Store IPv6 requirement started on
1488 		 * June 1st, 2016 at 12:00:00 AM PDT.
1489 		 * We disable APN fallback if the binary is more recent than that.
1490 		 * We check both atime and birthtime since birthtime is not always supported.
1491 		 */
1492 		static const long ipv6_start_date = 1464764400L;
1493 		vfs_context_t __single context;
1494 		struct stat64 sb;
1495 		int vn_stat_error;
1496 
1497 		bzero(&sb, sizeof(struct stat64));
1498 		context = vfs_context_create(NULL);
1499 		vn_stat_error = vn_stat(proc->p_textvp, &sb, NULL, 1, 0, context);
1500 		(void)vfs_context_rele(context);
1501 
1502 		if (vn_stat_error != 0 ||
1503 		    sb.st_atimespec.tv_sec >= ipv6_start_date ||
1504 		    sb.st_birthtimespec.tv_sec >= ipv6_start_date) {
1505 			apn_fallbk_log((LOG_INFO, "Abort: APN fallback notification found binary "
1506 			    "too recent! (err %d atime %ld mtime %ld ctime %ld birthtime %ld)\n",
1507 			    vn_stat_error, sb.st_atimespec.tv_sec, sb.st_mtimespec.tv_sec,
1508 			    sb.st_ctimespec.tv_sec, sb.st_birthtimespec.tv_sec));
1509 			return FALSE;
1510 		}
1511 	}
1512 	return TRUE;
1513 }
1514 
1515 static void
apn_fallback_trigger(proc_t proc,struct socket * so)1516 apn_fallback_trigger(proc_t proc, struct socket *so)
1517 {
1518 	pid_t pid = 0;
1519 	struct kev_msg ev_msg;
1520 	struct kev_netevent_apnfallbk_data apnfallbk_data;
1521 
1522 	last_apn_fallback = net_uptime();
1523 	pid = proc_pid(proc);
1524 	uuid_t application_uuid;
1525 	uuid_clear(application_uuid);
1526 	proc_getexecutableuuid(proc, application_uuid,
1527 	    sizeof(application_uuid));
1528 
1529 	bzero(&ev_msg, sizeof(struct kev_msg));
1530 	ev_msg.vendor_code      = KEV_VENDOR_APPLE;
1531 	ev_msg.kev_class        = KEV_NETWORK_CLASS;
1532 	ev_msg.kev_subclass     = KEV_NETEVENT_SUBCLASS;
1533 	ev_msg.event_code       = KEV_NETEVENT_APNFALLBACK;
1534 
1535 	bzero(&apnfallbk_data, sizeof(apnfallbk_data));
1536 
1537 	if (so->so_flags & SOF_DELEGATED) {
1538 		apnfallbk_data.epid = so->e_pid;
1539 		uuid_copy(apnfallbk_data.euuid, so->e_uuid);
1540 	} else {
1541 		apnfallbk_data.epid = so->last_pid;
1542 		uuid_copy(apnfallbk_data.euuid, so->last_uuid);
1543 	}
1544 
1545 	ev_msg.dv[0].data_ptr   = &apnfallbk_data;
1546 	ev_msg.dv[0].data_length = sizeof(apnfallbk_data);
1547 	kev_post_msg(&ev_msg);
1548 	apn_fallbk_log((LOG_INFO, "APN fallback notification issued.\n"));
1549 }
1550 
1551 /*
1552  * Transform old in_pcbconnect() into an inner subroutine for new
1553  * in_pcbconnect(); do some validity-checking on the remote address
1554  * (in "nam") and then determine local host address (i.e., which
1555  * interface) to use to access that remote host.
1556  *
1557  * This routine may alter the caller-supplied remote address "nam".
1558  *
1559  * The caller may override the bound-to-interface setting of the socket
1560  * by specifying the ifscope parameter (e.g. from IP_PKTINFO.)
1561  *
1562  * This routine might return an ifp with a reference held if the caller
1563  * provides a non-NULL outif, even in the error case.  The caller is
1564  * responsible for releasing its reference.
1565  *
1566  * Returns:	0			Success
1567  *		EINVAL			Invalid argument
1568  *		EAFNOSUPPORT		Address family not supported
1569  *		EADDRNOTAVAIL		Address not available
1570  */
1571 int
in_pcbladdr(struct inpcb * inp,struct sockaddr * nam,struct in_addr * laddr,unsigned int ifscope,struct ifnet ** outif,int raw)1572 in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr,
1573     unsigned int ifscope, struct ifnet **outif, int raw)
1574 {
1575 	struct route *ro = &inp->inp_route;
1576 	struct in_ifaddr *ia = NULL;
1577 	struct sockaddr_in sin;
1578 	int error = 0;
1579 	boolean_t restricted = FALSE;
1580 
1581 	if (outif != NULL) {
1582 		*outif = NULL;
1583 	}
1584 	if (nam->sa_len != sizeof(struct sockaddr_in)) {
1585 		return EINVAL;
1586 	}
1587 	if (SIN(nam)->sin_family != AF_INET) {
1588 		return EAFNOSUPPORT;
1589 	}
1590 	if (raw == 0 && SIN(nam)->sin_port == 0) {
1591 		return EADDRNOTAVAIL;
1592 	}
1593 
1594 	in_pcb_check_management_entitled(inp);
1595 	in_pcb_check_ultra_constrained_entitled(inp);
1596 
1597 	/*
1598 	 * If the destination address is INADDR_ANY,
1599 	 * use the primary local address.
1600 	 * If the supplied address is INADDR_BROADCAST,
1601 	 * and the primary interface supports broadcast,
1602 	 * choose the broadcast address for that interface.
1603 	 */
1604 	if (raw == 0 && (SIN(nam)->sin_addr.s_addr == INADDR_ANY ||
1605 	    SIN(nam)->sin_addr.s_addr == (u_int32_t)INADDR_BROADCAST)) {
1606 		lck_rw_lock_shared(&in_ifaddr_rwlock);
1607 		if (!TAILQ_EMPTY(&in_ifaddrhead)) {
1608 			ia = TAILQ_FIRST(&in_ifaddrhead);
1609 			IFA_LOCK_SPIN(&ia->ia_ifa);
1610 			if (SIN(nam)->sin_addr.s_addr == INADDR_ANY) {
1611 				SIN(nam)->sin_addr = IA_SIN(ia)->sin_addr;
1612 			} else if (ia->ia_ifp->if_flags & IFF_BROADCAST) {
1613 				SIN(nam)->sin_addr =
1614 				    SIN(&ia->ia_broadaddr)->sin_addr;
1615 			}
1616 			IFA_UNLOCK(&ia->ia_ifa);
1617 			ia = NULL;
1618 		}
1619 		lck_rw_done(&in_ifaddr_rwlock);
1620 	}
1621 	/*
1622 	 * Otherwise, if the socket has already bound the source, just use it.
1623 	 */
1624 	if (inp->inp_laddr.s_addr != INADDR_ANY) {
1625 		VERIFY(ia == NULL);
1626 		*laddr = inp->inp_laddr;
1627 		return 0;
1628 	}
1629 
1630 	/*
1631 	 * If the ifscope is specified by the caller (e.g. IP_PKTINFO)
1632 	 * then it overrides the sticky ifscope set for the socket.
1633 	 */
1634 	if (ifscope == IFSCOPE_NONE && (inp->inp_flags & INP_BOUND_IF)) {
1635 		ifscope = inp->inp_boundifp->if_index;
1636 	}
1637 
1638 	/*
1639 	 * If route is known or can be allocated now,
1640 	 * our src addr is taken from the i/f, else punt.
1641 	 * Note that we should check the address family of the cached
1642 	 * destination, in case of sharing the cache with IPv6.
1643 	 */
1644 	if (ro->ro_rt != NULL) {
1645 		RT_LOCK_SPIN(ro->ro_rt);
1646 	}
1647 	if (ROUTE_UNUSABLE(ro) || ro->ro_dst.sa_family != AF_INET ||
1648 	    SIN(&ro->ro_dst)->sin_addr.s_addr != SIN(nam)->sin_addr.s_addr ||
1649 	    (inp->inp_socket->so_options & SO_DONTROUTE)) {
1650 		if (ro->ro_rt != NULL) {
1651 			RT_UNLOCK(ro->ro_rt);
1652 		}
1653 		ROUTE_RELEASE(ro);
1654 	}
1655 	if (!(inp->inp_socket->so_options & SO_DONTROUTE) &&
1656 	    (ro->ro_rt == NULL || ro->ro_rt->rt_ifp == NULL)) {
1657 		if (ro->ro_rt != NULL) {
1658 			RT_UNLOCK(ro->ro_rt);
1659 		}
1660 		ROUTE_RELEASE(ro);
1661 		/* No route yet, so try to acquire one */
1662 		SOCKADDR_ZERO(&ro->ro_dst, sizeof(struct sockaddr_in));
1663 		ro->ro_dst.sa_family = AF_INET;
1664 		ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1665 		SIN(&ro->ro_dst)->sin_addr = SIN(nam)->sin_addr;
1666 		rtalloc_scoped(ro, ifscope);
1667 		if (ro->ro_rt != NULL) {
1668 			RT_LOCK_SPIN(ro->ro_rt);
1669 		}
1670 	}
1671 	/* Sanitized local copy for interface address searches */
1672 	SOCKADDR_ZERO(&sin, sizeof(sin));
1673 	sin.sin_family = AF_INET;
1674 	sin.sin_len = sizeof(struct sockaddr_in);
1675 	sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
1676 	/*
1677 	 * If we did not find (or use) a route, assume dest is reachable
1678 	 * on a directly connected network and try to find a corresponding
1679 	 * interface to take the source address from.
1680 	 */
1681 	if (ro->ro_rt == NULL) {
1682 		proc_t proc = current_proc();
1683 
1684 		VERIFY(ia == NULL);
1685 		ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
1686 		if (ia == NULL) {
1687 			ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
1688 		}
1689 		error = ((ia == NULL) ? ENETUNREACH : 0);
1690 
1691 		if (apn_fallback_required(proc, inp->inp_socket,
1692 		    (void *)nam)) {
1693 			apn_fallback_trigger(proc, inp->inp_socket);
1694 		}
1695 
1696 		goto done;
1697 	}
1698 	RT_LOCK_ASSERT_HELD(ro->ro_rt);
1699 	/*
1700 	 * If the outgoing interface on the route found is not
1701 	 * a loopback interface, use the address from that interface.
1702 	 */
1703 	if (!(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK)) {
1704 		VERIFY(ia == NULL);
1705 		/*
1706 		 * If the route points to a cellular interface and the
1707 		 * caller forbids our using interfaces of such type,
1708 		 * pretend that there is no route.
1709 		 * Apply the same logic for expensive interfaces.
1710 		 */
1711 		if (inp_restricted_send(inp, ro->ro_rt->rt_ifp)) {
1712 			RT_UNLOCK(ro->ro_rt);
1713 			ROUTE_RELEASE(ro);
1714 			error = EHOSTUNREACH;
1715 			restricted = TRUE;
1716 		} else {
1717 			/* Become a regular mutex */
1718 			RT_CONVERT_LOCK(ro->ro_rt);
1719 			ia = ifatoia(ro->ro_rt->rt_ifa);
1720 			ifa_addref(&ia->ia_ifa);
1721 
1722 			/*
1723 			 * Mark the control block for notification of
1724 			 * a possible flow that might undergo clat46
1725 			 * translation.
1726 			 *
1727 			 * We defer the decision to a later point when
1728 			 * inpcb is being disposed off.
1729 			 * The reason is that we only want to send notification
1730 			 * if the flow was ever used to send data.
1731 			 */
1732 			if (IS_INTF_CLAT46(ro->ro_rt->rt_ifp)) {
1733 				inp->inp_flags2 |= INP2_CLAT46_FLOW;
1734 			}
1735 
1736 			RT_UNLOCK(ro->ro_rt);
1737 			error = 0;
1738 		}
1739 		goto done;
1740 	}
1741 	VERIFY(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK);
1742 	RT_UNLOCK(ro->ro_rt);
1743 	/*
1744 	 * The outgoing interface is marked with 'loopback net', so a route
1745 	 * to ourselves is here.
1746 	 * Try to find the interface of the destination address and then
1747 	 * take the address from there. That interface is not necessarily
1748 	 * a loopback interface.
1749 	 */
1750 	VERIFY(ia == NULL);
1751 	ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
1752 	if (ia == NULL) {
1753 		ia = ifatoia(ifa_ifwithaddr_scoped(SA(&sin), ifscope));
1754 	}
1755 	if (ia == NULL) {
1756 		ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
1757 	}
1758 	if (ia == NULL) {
1759 		RT_LOCK(ro->ro_rt);
1760 		ia = ifatoia(ro->ro_rt->rt_ifa);
1761 		if (ia != NULL) {
1762 			ifa_addref(&ia->ia_ifa);
1763 		}
1764 		RT_UNLOCK(ro->ro_rt);
1765 	}
1766 	error = ((ia == NULL) ? ENETUNREACH : 0);
1767 
1768 done:
1769 	/*
1770 	 * If the destination address is multicast and an outgoing
1771 	 * interface has been set as a multicast option, use the
1772 	 * address of that interface as our source address.
1773 	 */
1774 	if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
1775 	    inp->inp_moptions != NULL) {
1776 		struct ip_moptions *imo;
1777 		struct ifnet *ifp;
1778 
1779 		imo = inp->inp_moptions;
1780 		IMO_LOCK(imo);
1781 		if (imo->imo_multicast_ifp != NULL && (ia == NULL ||
1782 		    ia->ia_ifp != imo->imo_multicast_ifp)) {
1783 			ifp = imo->imo_multicast_ifp;
1784 			if (ia != NULL) {
1785 				ifa_remref(&ia->ia_ifa);
1786 			}
1787 			lck_rw_lock_shared(&in_ifaddr_rwlock);
1788 			TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
1789 				if (ia->ia_ifp == ifp) {
1790 					break;
1791 				}
1792 			}
1793 			if (ia != NULL) {
1794 				ifa_addref(&ia->ia_ifa);
1795 			}
1796 			lck_rw_done(&in_ifaddr_rwlock);
1797 			if (ia == NULL) {
1798 				error = EADDRNOTAVAIL;
1799 			} else {
1800 				error = 0;
1801 			}
1802 		}
1803 		IMO_UNLOCK(imo);
1804 	}
1805 	/*
1806 	 * Don't do pcblookup call here; return interface in laddr
1807 	 * and exit to caller, that will do the lookup.
1808 	 */
1809 	if (ia != NULL) {
1810 		/*
1811 		 * If the source address belongs to a cellular interface
1812 		 * and the socket forbids our using interfaces of such
1813 		 * type, pretend that there is no source address.
1814 		 * Apply the same logic for expensive interfaces.
1815 		 */
1816 		IFA_LOCK_SPIN(&ia->ia_ifa);
1817 		if (inp_restricted_send(inp, ia->ia_ifa.ifa_ifp)) {
1818 			IFA_UNLOCK(&ia->ia_ifa);
1819 			error = EHOSTUNREACH;
1820 			restricted = TRUE;
1821 		} else if (error == 0) {
1822 			*laddr = ia->ia_addr.sin_addr;
1823 			if (outif != NULL) {
1824 				struct ifnet *ifp;
1825 
1826 				if (ro->ro_rt != NULL) {
1827 					ifp = ro->ro_rt->rt_ifp;
1828 				} else {
1829 					ifp = ia->ia_ifp;
1830 				}
1831 
1832 				VERIFY(ifp != NULL);
1833 				IFA_CONVERT_LOCK(&ia->ia_ifa);
1834 				ifnet_reference(ifp);   /* for caller */
1835 				if (*outif != NULL) {
1836 					ifnet_release(*outif);
1837 				}
1838 				*outif = ifp;
1839 			}
1840 			IFA_UNLOCK(&ia->ia_ifa);
1841 		} else {
1842 			IFA_UNLOCK(&ia->ia_ifa);
1843 		}
1844 		ifa_remref(&ia->ia_ifa);
1845 		ia = NULL;
1846 	}
1847 
1848 	if (restricted && error == EHOSTUNREACH) {
1849 		soevent(inp->inp_socket, (SO_FILT_HINT_LOCKED |
1850 		    SO_FILT_HINT_IFDENIED));
1851 	}
1852 
1853 	return error;
1854 }
1855 
1856 /*
1857  * Outer subroutine:
1858  * Connect from a socket to a specified address.
1859  * Both address and port must be specified in argument sin.
1860  * If don't have a local address for this socket yet,
1861  * then pick one.
1862  *
1863  * The caller may override the bound-to-interface setting of the socket
1864  * by specifying the ifscope parameter (e.g. from IP_PKTINFO.)
1865  */
1866 int
in_pcbconnect(struct inpcb * inp,struct sockaddr * nam,struct proc * p,unsigned int ifscope,struct ifnet ** outif)1867 in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p,
1868     unsigned int ifscope, struct ifnet **outif)
1869 {
1870 	struct in_addr laddr;
1871 	struct sockaddr_in *sin = SIN(nam);
1872 	struct inpcb *pcb;
1873 	int error;
1874 	struct socket *so = inp->inp_socket;
1875 
1876 #if CONTENT_FILTER
1877 	if (so) {
1878 		so->so_state_change_cnt++;
1879 	}
1880 #endif
1881 
1882 	/*
1883 	 *   Call inner routine, to assign local interface address.
1884 	 */
1885 	if ((error = in_pcbladdr(inp, nam, &laddr, ifscope, outif, 0)) != 0) {
1886 		return error;
1887 	}
1888 
1889 	socket_unlock(so, 0);
1890 	pcb = in_pcblookup_hash(inp->inp_pcbinfo, sin->sin_addr, sin->sin_port,
1891 	    inp->inp_laddr.s_addr ? inp->inp_laddr : laddr,
1892 	    inp->inp_lport, 0, NULL);
1893 	socket_lock(so, 0);
1894 
1895 	/*
1896 	 * Check if the socket is still in a valid state. When we unlock this
1897 	 * embryonic socket, it can get aborted if another thread is closing
1898 	 * the listener (radar 7947600).
1899 	 */
1900 	if ((so->so_flags & SOF_ABORTED) != 0) {
1901 		return ECONNREFUSED;
1902 	}
1903 
1904 	if (pcb != NULL) {
1905 		in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0);
1906 		return EADDRINUSE;
1907 	}
1908 	if (inp->inp_laddr.s_addr == INADDR_ANY) {
1909 		if (inp->inp_lport == 0) {
1910 			error = in_pcbbind(inp, NULL, nam, p);
1911 			if (error) {
1912 				return error;
1913 			}
1914 		}
1915 		if (!lck_rw_try_lock_exclusive(&inp->inp_pcbinfo->ipi_lock)) {
1916 			/*
1917 			 * Lock inversion issue, mostly with udp
1918 			 * multicast packets.
1919 			 */
1920 			socket_unlock(so, 0);
1921 			lck_rw_lock_exclusive(&inp->inp_pcbinfo->ipi_lock);
1922 			socket_lock(so, 0);
1923 		}
1924 		inp->inp_laddr = laddr;
1925 		/* no reference needed */
1926 		inp->inp_last_outifp = (outif != NULL) ? *outif : NULL;
1927 #if SKYWALK
1928 		if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
1929 			netns_set_ifnet(&inp->inp_netns_token,
1930 			    inp->inp_last_outifp);
1931 		}
1932 #endif /* SKYWALK */
1933 		inp->inp_flags |= INP_INADDR_ANY;
1934 	} else {
1935 		/*
1936 		 * Usage of IP_PKTINFO, without local port already
1937 		 * speficified will cause kernel to panic,
1938 		 * see rdar://problem/18508185.
1939 		 * For now returning error to avoid a kernel panic
1940 		 * This routines can be refactored and handle this better
1941 		 * in future.
1942 		 */
1943 		if (inp->inp_lport == 0) {
1944 			return EINVAL;
1945 		}
1946 		if (!lck_rw_try_lock_exclusive(&inp->inp_pcbinfo->ipi_lock)) {
1947 			/*
1948 			 * Lock inversion issue, mostly with udp
1949 			 * multicast packets.
1950 			 */
1951 			socket_unlock(so, 0);
1952 			lck_rw_lock_exclusive(&inp->inp_pcbinfo->ipi_lock);
1953 			socket_lock(so, 0);
1954 		}
1955 	}
1956 	inp->inp_faddr = sin->sin_addr;
1957 	inp->inp_fport = sin->sin_port;
1958 	if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) {
1959 		nstat_pcb_invalidate_cache(inp);
1960 	}
1961 	in_pcbrehash(inp);
1962 	lck_rw_done(&inp->inp_pcbinfo->ipi_lock);
1963 	return 0;
1964 }
1965 
1966 void
in_pcbdisconnect(struct inpcb * inp)1967 in_pcbdisconnect(struct inpcb *inp)
1968 {
1969 	struct socket *so = inp->inp_socket;
1970 
1971 	if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) {
1972 		nstat_pcb_cache(inp);
1973 	}
1974 
1975 	inp->inp_faddr.s_addr = INADDR_ANY;
1976 	inp->inp_fport = 0;
1977 
1978 #if CONTENT_FILTER
1979 	if (so) {
1980 		so->so_state_change_cnt++;
1981 	}
1982 #endif
1983 
1984 	if (!lck_rw_try_lock_exclusive(&inp->inp_pcbinfo->ipi_lock)) {
1985 		/* lock inversion issue, mostly with udp multicast packets */
1986 		socket_unlock(so, 0);
1987 		lck_rw_lock_exclusive(&inp->inp_pcbinfo->ipi_lock);
1988 		socket_lock(so, 0);
1989 	}
1990 
1991 	in_pcbrehash(inp);
1992 	lck_rw_done(&inp->inp_pcbinfo->ipi_lock);
1993 	/*
1994 	 * A multipath subflow socket would have its SS_NOFDREF set by default,
1995 	 * so check for SOF_MP_SUBFLOW socket flag before detaching the PCB;
1996 	 * when the socket is closed for real, SOF_MP_SUBFLOW would be cleared.
1997 	 */
1998 	if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) {
1999 		in_pcbdetach(inp);
2000 	}
2001 }
2002 
2003 void
in_pcbdetach(struct inpcb * inp)2004 in_pcbdetach(struct inpcb *inp)
2005 {
2006 	struct socket *so = inp->inp_socket;
2007 
2008 	if (so->so_pcb == NULL) {
2009 		/* PCB has been disposed */
2010 		panic("%s: inp=%p so=%p proto=%d so_pcb is null!", __func__,
2011 		    inp, so, SOCK_PROTO(so));
2012 		/* NOTREACHED */
2013 	}
2014 
2015 #if IPSEC
2016 	if (inp->inp_sp != NULL) {
2017 		(void) ipsec4_delete_pcbpolicy(inp);
2018 	}
2019 #endif /* IPSEC */
2020 
2021 	if (inp->inp_stat != NULL && SOCK_PROTO(so) == IPPROTO_UDP) {
2022 		if (inp->inp_stat->rxpackets == 0 && inp->inp_stat->txpackets == 0) {
2023 			INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_no_data);
2024 		}
2025 	}
2026 
2027 	/*
2028 	 * Let NetworkStatistics know this PCB is going away
2029 	 * before we detach it.
2030 	 */
2031 	if (nstat_collect &&
2032 	    (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP)) {
2033 		nstat_pcb_detach(inp);
2034 	}
2035 
2036 	/* Free memory buffer held for generating keep alives */
2037 	if (inp->inp_keepalive_data != NULL) {
2038 		kfree_data_counted_by(inp->inp_keepalive_data, inp->inp_keepalive_datalen);
2039 	}
2040 
2041 	/* mark socket state as dead */
2042 	if (in_pcb_checkstate(inp, WNT_STOPUSING, 1) != WNT_STOPUSING) {
2043 		panic("%s: so=%p proto=%d couldn't set to STOPUSING",
2044 		    __func__, so, SOCK_PROTO(so));
2045 		/* NOTREACHED */
2046 	}
2047 
2048 #if SKYWALK
2049 	/* Free up the port in the namespace registrar if not in TIME_WAIT */
2050 	if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
2051 		netns_release(&inp->inp_netns_token);
2052 		netns_release(&inp->inp_wildcard_netns_token);
2053 	}
2054 #endif /* SKYWALK */
2055 
2056 	if (!(so->so_flags & SOF_PCBCLEARING)) {
2057 		struct ip_moptions *imo;
2058 
2059 		inp->inp_vflag = 0;
2060 		if (inp->inp_options != NULL) {
2061 			(void) m_free(inp->inp_options);
2062 			inp->inp_options = NULL;
2063 		}
2064 		ROUTE_RELEASE(&inp->inp_route);
2065 		imo = inp->inp_moptions;
2066 		if (imo != NULL) {
2067 			IMO_REMREF(imo);
2068 		}
2069 		inp->inp_moptions = NULL;
2070 		sofreelastref(so, 0);
2071 		inp->inp_state = INPCB_STATE_DEAD;
2072 
2073 		/*
2074 		 * Enqueue an event to send kernel event notification
2075 		 * if the flow has to CLAT46 for data packets
2076 		 */
2077 		if (inp->inp_flags2 & INP2_CLAT46_FLOW) {
2078 			/*
2079 			 * If there has been any exchange of data bytes
2080 			 * over this flow.
2081 			 * Schedule a notification to report that flow is
2082 			 * using client side translation.
2083 			 */
2084 			if (inp->inp_stat != NULL &&
2085 			    (inp->inp_stat->txbytes != 0 ||
2086 			    inp->inp_stat->rxbytes != 0)) {
2087 				if (so->so_flags & SOF_DELEGATED) {
2088 					in6_clat46_event_enqueue_nwk_wq_entry(
2089 						IN6_CLAT46_EVENT_V4_FLOW,
2090 						so->e_pid,
2091 						so->e_uuid);
2092 				} else {
2093 					in6_clat46_event_enqueue_nwk_wq_entry(
2094 						IN6_CLAT46_EVENT_V4_FLOW,
2095 						so->last_pid,
2096 						so->last_uuid);
2097 				}
2098 			}
2099 		}
2100 
2101 		/* makes sure we're not called twice from so_close */
2102 		so->so_flags |= SOF_PCBCLEARING;
2103 
2104 		inpcb_gc_sched(inp->inp_pcbinfo, INPCB_TIMER_FAST);
2105 	}
2106 }
2107 
2108 
2109 void
in_pcbdispose(struct inpcb * inp)2110 in_pcbdispose(struct inpcb *inp)
2111 {
2112 	struct socket *so = inp->inp_socket;
2113 	struct inpcbinfo *ipi = inp->inp_pcbinfo;
2114 
2115 	if (so != NULL && so->so_usecount != 0) {
2116 		panic("%s: so %p [%d,%d] usecount %d lockhistory %s",
2117 		    __func__, so, SOCK_DOM(so), SOCK_TYPE(so), so->so_usecount,
2118 		    solockhistory_nr(so));
2119 		/* NOTREACHED */
2120 	} else if (inp->inp_wantcnt != WNT_STOPUSING) {
2121 		if (so != NULL) {
2122 			panic_plain("%s: inp %p invalid wantcnt %d, so %p "
2123 			    "[%d,%d] usecount %d retaincnt %d state 0x%x "
2124 			    "flags 0x%x lockhistory %s\n", __func__, inp,
2125 			    inp->inp_wantcnt, so, SOCK_DOM(so), SOCK_TYPE(so),
2126 			    so->so_usecount, so->so_retaincnt, so->so_state,
2127 			    so->so_flags, solockhistory_nr(so));
2128 			/* NOTREACHED */
2129 		} else {
2130 			panic("%s: inp %p invalid wantcnt %d no socket",
2131 			    __func__, inp, inp->inp_wantcnt);
2132 			/* NOTREACHED */
2133 		}
2134 	}
2135 
2136 	LCK_RW_ASSERT(&ipi->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
2137 
2138 	inp->inp_gencnt = ++ipi->ipi_gencnt;
2139 	/* access ipi in in_pcbremlists */
2140 	in_pcbremlists(inp);
2141 
2142 	if (so != NULL) {
2143 		if (so->so_proto->pr_flags & PR_PCBLOCK) {
2144 			sofreelastref(so, 0);
2145 			if (so->so_rcv.sb_cc > 0 || so->so_snd.sb_cc > 0) {
2146 				/*
2147 				 * selthreadclear() already called
2148 				 * during sofreelastref() above.
2149 				 */
2150 				sbrelease(&so->so_rcv);
2151 				sbrelease(&so->so_snd);
2152 			}
2153 			if (so->so_head != NULL) {
2154 				panic("%s: so=%p head still exist",
2155 				    __func__, so);
2156 				/* NOTREACHED */
2157 			}
2158 			lck_mtx_unlock(&inp->inpcb_mtx);
2159 
2160 #if NECP
2161 			necp_inpcb_remove_cb(inp);
2162 #endif /* NECP */
2163 
2164 			lck_mtx_destroy(&inp->inpcb_mtx, ipi->ipi_lock_grp);
2165 		}
2166 		/* makes sure we're not called twice from so_close */
2167 		so->so_flags |= SOF_PCBCLEARING;
2168 		so->so_saved_pcb = (caddr_t)inp;
2169 		so->so_pcb = NULL;
2170 		inp->inp_socket = NULL;
2171 #if NECP
2172 		necp_inpcb_dispose(inp);
2173 #endif /* NECP */
2174 		/*
2175 		 * In case there a route cached after a detach (possible
2176 		 * in the tcp case), make sure that it is freed before
2177 		 * we deallocate the structure.
2178 		 */
2179 		ROUTE_RELEASE(&inp->inp_route);
2180 		if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
2181 			zfree(ipi->ipi_zone, inp);
2182 		}
2183 		sodealloc(so);
2184 	}
2185 }
2186 
2187 /*
2188  * The calling convention of in_getsockaddr() and in_getpeeraddr() was
2189  * modified to match the pru_sockaddr() and pru_peeraddr() entry points
2190  * in struct pr_usrreqs, so that protocols can just reference then directly
2191  * without the need for a wrapper function.
2192  */
2193 int
in_getsockaddr(struct socket * so,struct sockaddr ** nam)2194 in_getsockaddr(struct socket *so, struct sockaddr **nam)
2195 {
2196 	struct inpcb *inp;
2197 	struct sockaddr_in *sin;
2198 
2199 	/*
2200 	 * Do the malloc first in case it blocks.
2201 	 */
2202 	sin = SIN(alloc_sockaddr(sizeof(*sin),
2203 	    Z_WAITOK | Z_NOFAIL));
2204 
2205 	sin->sin_family = AF_INET;
2206 
2207 	if ((inp = sotoinpcb(so)) == NULL) {
2208 		free_sockaddr(sin);
2209 		return EINVAL;
2210 	}
2211 	sin->sin_port = inp->inp_lport;
2212 	sin->sin_addr = inp->inp_laddr;
2213 
2214 	*nam = SA(sin);
2215 	return 0;
2216 }
2217 
2218 int
in_getsockaddr_s(struct socket * so,struct sockaddr_in * ss)2219 in_getsockaddr_s(struct socket *so, struct sockaddr_in *ss)
2220 {
2221 	struct sockaddr_in *sin = ss;
2222 	struct inpcb *inp;
2223 
2224 	VERIFY(ss != NULL);
2225 	SOCKADDR_ZERO(ss, sizeof(*ss));
2226 
2227 	sin->sin_family = AF_INET;
2228 	sin->sin_len = sizeof(*sin);
2229 
2230 	if ((inp = sotoinpcb(so)) == NULL) {
2231 		return EINVAL;
2232 	}
2233 
2234 	sin->sin_port = inp->inp_lport;
2235 	sin->sin_addr = inp->inp_laddr;
2236 	return 0;
2237 }
2238 
2239 int
in_getpeeraddr(struct socket * so,struct sockaddr ** nam)2240 in_getpeeraddr(struct socket *so, struct sockaddr **nam)
2241 {
2242 	struct inpcb *inp;
2243 	struct sockaddr_in *sin;
2244 
2245 	/*
2246 	 * Do the malloc first in case it blocks.
2247 	 */
2248 	sin = SIN(alloc_sockaddr(sizeof(*sin),
2249 	    Z_WAITOK | Z_NOFAIL));
2250 
2251 	sin->sin_family = AF_INET;
2252 
2253 	if ((inp = sotoinpcb(so)) == NULL) {
2254 		free_sockaddr(sin);
2255 		return EINVAL;
2256 	}
2257 	sin->sin_port = inp->inp_fport;
2258 	sin->sin_addr = inp->inp_faddr;
2259 
2260 	*nam = SA(sin);
2261 	return 0;
2262 }
2263 
2264 void
in_pcbnotifyall(struct inpcbinfo * pcbinfo,struct in_addr faddr,int errno,void (* notify)(struct inpcb *,int))2265 in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2266     int errno, void (*notify)(struct inpcb *, int))
2267 {
2268 	struct inpcb *inp;
2269 
2270 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
2271 
2272 	LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
2273 		if (!(inp->inp_vflag & INP_IPV4)) {
2274 			continue;
2275 		}
2276 		if (inp->inp_faddr.s_addr != faddr.s_addr ||
2277 		    inp->inp_socket == NULL) {
2278 			continue;
2279 		}
2280 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2281 			continue;
2282 		}
2283 		socket_lock(inp->inp_socket, 1);
2284 		(*notify)(inp, errno);
2285 		(void) in_pcb_checkstate(inp, WNT_RELEASE, 1);
2286 		socket_unlock(inp->inp_socket, 1);
2287 	}
2288 	lck_rw_done(&pcbinfo->ipi_lock);
2289 }
2290 
2291 /*
2292  * Check for alternatives when higher level complains
2293  * about service problems.  For now, invalidate cached
2294  * routing information.  If the route was created dynamically
2295  * (by a redirect), time to try a default gateway again.
2296  */
2297 void
in_losing(struct inpcb * inp)2298 in_losing(struct inpcb *inp)
2299 {
2300 	boolean_t release = FALSE;
2301 	struct rtentry *rt;
2302 
2303 	if ((rt = inp->inp_route.ro_rt) != NULL) {
2304 		struct in_ifaddr *ia = NULL;
2305 
2306 		RT_LOCK(rt);
2307 		if (rt->rt_flags & RTF_DYNAMIC) {
2308 			/*
2309 			 * Prevent another thread from modifying rt_key,
2310 			 * rt_gateway via rt_setgate() after rt_lock is
2311 			 * dropped by marking the route as defunct.
2312 			 */
2313 			rt->rt_flags |= RTF_CONDEMNED;
2314 			RT_UNLOCK(rt);
2315 			(void) rtrequest(RTM_DELETE, rt_key(rt),
2316 			    rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
2317 		} else {
2318 			RT_UNLOCK(rt);
2319 		}
2320 		/* if the address is gone keep the old route in the pcb */
2321 		if (inp->inp_laddr.s_addr != INADDR_ANY &&
2322 		    (ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
2323 			/*
2324 			 * Address is around; ditch the route.  A new route
2325 			 * can be allocated the next time output is attempted.
2326 			 */
2327 			release = TRUE;
2328 		}
2329 		if (ia != NULL) {
2330 			ifa_remref(&ia->ia_ifa);
2331 		}
2332 	}
2333 	if (rt == NULL || release) {
2334 		ROUTE_RELEASE(&inp->inp_route);
2335 	}
2336 }
2337 
2338 /*
2339  * After a routing change, flush old routing
2340  * and allocate a (hopefully) better one.
2341  */
2342 void
in_rtchange(struct inpcb * inp,int errno)2343 in_rtchange(struct inpcb *inp, int errno)
2344 {
2345 #pragma unused(errno)
2346 	boolean_t release = FALSE;
2347 	struct rtentry *rt;
2348 
2349 	if ((rt = inp->inp_route.ro_rt) != NULL) {
2350 		struct in_ifaddr *ia = NULL;
2351 
2352 		/* if address is gone, keep the old route */
2353 		if (inp->inp_laddr.s_addr != INADDR_ANY &&
2354 		    (ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
2355 			/*
2356 			 * Address is around; ditch the route.  A new route
2357 			 * can be allocated the next time output is attempted.
2358 			 */
2359 			release = TRUE;
2360 		}
2361 		if (ia != NULL) {
2362 			ifa_remref(&ia->ia_ifa);
2363 		}
2364 	}
2365 	if (rt == NULL || release) {
2366 		ROUTE_RELEASE(&inp->inp_route);
2367 	}
2368 }
2369 
2370 /*
2371  * Lookup a PCB based on the local address and port.
2372  */
2373 struct inpcb *
in_pcblookup_local(struct inpcbinfo * pcbinfo,struct in_addr laddr,unsigned int lport_arg,int wild_okay)2374 in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr,
2375     unsigned int lport_arg, int wild_okay)
2376 {
2377 	struct inpcb *inp;
2378 	int matchwild = 3, wildcard;
2379 	u_short lport = (u_short)lport_arg;
2380 
2381 	KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_START, 0, 0, 0, 0, 0);
2382 
2383 	if (!wild_okay) {
2384 		struct inpcbhead *head;
2385 		/*
2386 		 * Look for an unconnected (wildcard foreign addr) PCB that
2387 		 * matches the local address and port we're looking for.
2388 		 */
2389 		head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2390 		    pcbinfo->ipi_hashmask)];
2391 		LIST_FOREACH(inp, head, inp_hash) {
2392 			if (!(inp->inp_vflag & INP_IPV4)) {
2393 				continue;
2394 			}
2395 			if (inp->inp_faddr.s_addr == INADDR_ANY &&
2396 			    inp->inp_laddr.s_addr == laddr.s_addr &&
2397 			    inp->inp_lport == lport) {
2398 				/*
2399 				 * Found.
2400 				 */
2401 				return inp;
2402 			}
2403 		}
2404 		/*
2405 		 * Not found.
2406 		 */
2407 		KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, 0, 0, 0, 0, 0);
2408 		return NULL;
2409 	} else {
2410 		struct inpcbporthead *porthash;
2411 		struct inpcbport *phd;
2412 		struct inpcb *match = NULL;
2413 		/*
2414 		 * Best fit PCB lookup.
2415 		 *
2416 		 * First see if this local port is in use by looking on the
2417 		 * port hash list.
2418 		 */
2419 		porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
2420 		    pcbinfo->ipi_porthashmask)];
2421 		LIST_FOREACH(phd, porthash, phd_hash) {
2422 			if (phd->phd_port == lport) {
2423 				break;
2424 			}
2425 		}
2426 		if (phd != NULL) {
2427 			/*
2428 			 * Port is in use by one or more PCBs. Look for best
2429 			 * fit.
2430 			 */
2431 			LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
2432 				wildcard = 0;
2433 				if (!(inp->inp_vflag & INP_IPV4)) {
2434 					continue;
2435 				}
2436 				if (inp->inp_faddr.s_addr != INADDR_ANY) {
2437 					wildcard++;
2438 				}
2439 				if (inp->inp_laddr.s_addr != INADDR_ANY) {
2440 					if (laddr.s_addr == INADDR_ANY) {
2441 						wildcard++;
2442 					} else if (inp->inp_laddr.s_addr !=
2443 					    laddr.s_addr) {
2444 						continue;
2445 					}
2446 				} else {
2447 					if (laddr.s_addr != INADDR_ANY) {
2448 						wildcard++;
2449 					}
2450 				}
2451 				if (wildcard < matchwild) {
2452 					match = inp;
2453 					matchwild = wildcard;
2454 					if (matchwild == 0) {
2455 						break;
2456 					}
2457 				}
2458 			}
2459 		}
2460 		KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, match,
2461 		    0, 0, 0, 0);
2462 		return match;
2463 	}
2464 }
2465 
2466 /*
2467  * Check if PCB exists in hash list.
2468  */
2469 int
in_pcblookup_hash_exists(struct inpcbinfo * pcbinfo,struct in_addr faddr,u_int fport_arg,struct in_addr laddr,u_int lport_arg,int wildcard,uid_t * uid,gid_t * gid,struct ifnet * ifp)2470 in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2471     u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
2472     uid_t *uid, gid_t *gid, struct ifnet *ifp)
2473 {
2474 	struct inpcbhead *head;
2475 	struct inpcb *inp;
2476 	u_short fport = (u_short)fport_arg, lport = (u_short)lport_arg;
2477 	int found = 0;
2478 	struct inpcb *local_wild = NULL;
2479 	struct inpcb *local_wild_mapped = NULL;
2480 
2481 	*uid = UID_MAX;
2482 	*gid = GID_MAX;
2483 
2484 	/*
2485 	 * We may have found the pcb in the last lookup - check this first.
2486 	 */
2487 
2488 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
2489 
2490 	/*
2491 	 * First look for an exact match.
2492 	 */
2493 	head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
2494 	    pcbinfo->ipi_hashmask)];
2495 	LIST_FOREACH(inp, head, inp_hash) {
2496 		if (!(inp->inp_vflag & INP_IPV4)) {
2497 			continue;
2498 		}
2499 		if (inp_restricted_recv(inp, ifp)) {
2500 			continue;
2501 		}
2502 
2503 #if NECP
2504 		if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2505 			continue;
2506 		}
2507 #endif /* NECP */
2508 
2509 		if (inp->inp_faddr.s_addr == faddr.s_addr &&
2510 		    inp->inp_laddr.s_addr == laddr.s_addr &&
2511 		    inp->inp_fport == fport &&
2512 		    inp->inp_lport == lport) {
2513 			if ((found = (inp->inp_socket != NULL))) {
2514 				/*
2515 				 * Found.
2516 				 */
2517 				*uid = kauth_cred_getuid(
2518 					inp->inp_socket->so_cred);
2519 				*gid = kauth_cred_getgid(
2520 					inp->inp_socket->so_cred);
2521 			}
2522 			lck_rw_done(&pcbinfo->ipi_lock);
2523 			return found;
2524 		}
2525 	}
2526 
2527 	if (!wildcard) {
2528 		/*
2529 		 * Not found.
2530 		 */
2531 		lck_rw_done(&pcbinfo->ipi_lock);
2532 		return 0;
2533 	}
2534 
2535 	head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2536 	    pcbinfo->ipi_hashmask)];
2537 	LIST_FOREACH(inp, head, inp_hash) {
2538 		if (!(inp->inp_vflag & INP_IPV4)) {
2539 			continue;
2540 		}
2541 		if (inp_restricted_recv(inp, ifp)) {
2542 			continue;
2543 		}
2544 
2545 #if NECP
2546 		if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2547 			continue;
2548 		}
2549 #endif /* NECP */
2550 
2551 		if (inp->inp_faddr.s_addr == INADDR_ANY &&
2552 		    inp->inp_lport == lport) {
2553 			if (inp->inp_laddr.s_addr == laddr.s_addr) {
2554 				if ((found = (inp->inp_socket != NULL))) {
2555 					*uid = kauth_cred_getuid(
2556 						inp->inp_socket->so_cred);
2557 					*gid = kauth_cred_getgid(
2558 						inp->inp_socket->so_cred);
2559 				}
2560 				lck_rw_done(&pcbinfo->ipi_lock);
2561 				return found;
2562 			} else if (inp->inp_laddr.s_addr == INADDR_ANY) {
2563 				if (inp->inp_socket &&
2564 				    SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) {
2565 					local_wild_mapped = inp;
2566 				} else {
2567 					local_wild = inp;
2568 				}
2569 			}
2570 		}
2571 	}
2572 	if (local_wild == NULL) {
2573 		if (local_wild_mapped != NULL) {
2574 			if ((found = (local_wild_mapped->inp_socket != NULL))) {
2575 				*uid = kauth_cred_getuid(
2576 					local_wild_mapped->inp_socket->so_cred);
2577 				*gid = kauth_cred_getgid(
2578 					local_wild_mapped->inp_socket->so_cred);
2579 			}
2580 			lck_rw_done(&pcbinfo->ipi_lock);
2581 			return found;
2582 		}
2583 		lck_rw_done(&pcbinfo->ipi_lock);
2584 		return 0;
2585 	}
2586 	if ((found = (local_wild->inp_socket != NULL))) {
2587 		*uid = kauth_cred_getuid(
2588 			local_wild->inp_socket->so_cred);
2589 		*gid = kauth_cred_getgid(
2590 			local_wild->inp_socket->so_cred);
2591 	}
2592 	lck_rw_done(&pcbinfo->ipi_lock);
2593 	return found;
2594 }
2595 
2596 /*
2597  * Lookup PCB in hash list.
2598  */
2599 struct inpcb *
in_pcblookup_hash(struct inpcbinfo * pcbinfo,struct in_addr faddr,u_int fport_arg,struct in_addr laddr,u_int lport_arg,int wildcard,struct ifnet * ifp)2600 in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2601     u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
2602     struct ifnet *ifp)
2603 {
2604 	struct inpcbhead *head;
2605 	struct inpcb *inp;
2606 	u_short fport = (u_short)fport_arg, lport = (u_short)lport_arg;
2607 	struct inpcb *local_wild = NULL;
2608 	struct inpcb *local_wild_mapped = NULL;
2609 
2610 	/*
2611 	 * We may have found the pcb in the last lookup - check this first.
2612 	 */
2613 
2614 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
2615 
2616 	/*
2617 	 * First look for an exact match.
2618 	 */
2619 	head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
2620 	    pcbinfo->ipi_hashmask)];
2621 	LIST_FOREACH(inp, head, inp_hash) {
2622 		if (!(inp->inp_vflag & INP_IPV4)) {
2623 			continue;
2624 		}
2625 		if (inp_restricted_recv(inp, ifp)) {
2626 			continue;
2627 		}
2628 
2629 #if NECP
2630 		if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2631 			continue;
2632 		}
2633 #endif /* NECP */
2634 
2635 		if (inp->inp_faddr.s_addr == faddr.s_addr &&
2636 		    inp->inp_laddr.s_addr == laddr.s_addr &&
2637 		    inp->inp_fport == fport &&
2638 		    inp->inp_lport == lport) {
2639 			/*
2640 			 * Found.
2641 			 */
2642 			if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2643 			    WNT_STOPUSING) {
2644 				lck_rw_done(&pcbinfo->ipi_lock);
2645 				return inp;
2646 			} else {
2647 				/* it's there but dead, say it isn't found */
2648 				lck_rw_done(&pcbinfo->ipi_lock);
2649 				return NULL;
2650 			}
2651 		}
2652 	}
2653 
2654 	if (!wildcard) {
2655 		/*
2656 		 * Not found.
2657 		 */
2658 		lck_rw_done(&pcbinfo->ipi_lock);
2659 		return NULL;
2660 	}
2661 
2662 	head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2663 	    pcbinfo->ipi_hashmask)];
2664 	LIST_FOREACH(inp, head, inp_hash) {
2665 		if (!(inp->inp_vflag & INP_IPV4)) {
2666 			continue;
2667 		}
2668 		if (inp_restricted_recv(inp, ifp)) {
2669 			continue;
2670 		}
2671 
2672 #if NECP
2673 		if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2674 			continue;
2675 		}
2676 #endif /* NECP */
2677 
2678 		if (inp->inp_faddr.s_addr == INADDR_ANY &&
2679 		    inp->inp_lport == lport) {
2680 			if (inp->inp_laddr.s_addr == laddr.s_addr) {
2681 				if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2682 				    WNT_STOPUSING) {
2683 					lck_rw_done(&pcbinfo->ipi_lock);
2684 					return inp;
2685 				} else {
2686 					/* it's dead; say it isn't found */
2687 					lck_rw_done(&pcbinfo->ipi_lock);
2688 					return NULL;
2689 				}
2690 			} else if (inp->inp_laddr.s_addr == INADDR_ANY) {
2691 				if (SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) {
2692 					local_wild_mapped = inp;
2693 				} else {
2694 					local_wild = inp;
2695 				}
2696 			}
2697 		}
2698 	}
2699 	if (local_wild == NULL) {
2700 		if (local_wild_mapped != NULL) {
2701 			if (in_pcb_checkstate(local_wild_mapped,
2702 			    WNT_ACQUIRE, 0) != WNT_STOPUSING) {
2703 				lck_rw_done(&pcbinfo->ipi_lock);
2704 				return local_wild_mapped;
2705 			} else {
2706 				/* it's dead; say it isn't found */
2707 				lck_rw_done(&pcbinfo->ipi_lock);
2708 				return NULL;
2709 			}
2710 		}
2711 		lck_rw_done(&pcbinfo->ipi_lock);
2712 		return NULL;
2713 	}
2714 	if (in_pcb_checkstate(local_wild, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
2715 		lck_rw_done(&pcbinfo->ipi_lock);
2716 		return local_wild;
2717 	}
2718 	/*
2719 	 * It's either not found or is already dead.
2720 	 */
2721 	lck_rw_done(&pcbinfo->ipi_lock);
2722 	return NULL;
2723 }
2724 
2725 /*
2726  * @brief	Insert PCB onto various hash lists.
2727  *
2728  * @param	inp Pointer to internet protocol control block
2729  * @param	remote Pointer to remote address sockaddr for policy evaluation
2730  * @param	locked	Implies if ipi_lock (protecting pcb list)
2731  *              is already locked or not.
2732  *
2733  * @return	int error on failure and 0 on success
2734  */
2735 int
in_pcbinshash(struct inpcb * inp,struct sockaddr * remote,int locked)2736 in_pcbinshash(struct inpcb *inp, struct sockaddr *remote, int locked)
2737 {
2738 	struct inpcbhead *pcbhash;
2739 	struct inpcbporthead *pcbporthash;
2740 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2741 	struct inpcbport *phd;
2742 	u_int32_t hashkey_faddr;
2743 
2744 	if (!locked) {
2745 		if (!lck_rw_try_lock_exclusive(&pcbinfo->ipi_lock)) {
2746 			/*
2747 			 * Lock inversion issue, mostly with udp
2748 			 * multicast packets
2749 			 */
2750 			socket_unlock(inp->inp_socket, 0);
2751 			lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
2752 			socket_lock(inp->inp_socket, 0);
2753 		}
2754 	}
2755 
2756 	/*
2757 	 * This routine or its caller may have given up
2758 	 * socket's protocol lock briefly.
2759 	 * During that time the socket may have been dropped.
2760 	 * Safe-guarding against that.
2761 	 */
2762 	if (inp->inp_state == INPCB_STATE_DEAD) {
2763 		if (!locked) {
2764 			lck_rw_done(&pcbinfo->ipi_lock);
2765 		}
2766 		return ECONNABORTED;
2767 	}
2768 
2769 
2770 	if (inp->inp_vflag & INP_IPV6) {
2771 		hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
2772 	} else {
2773 		hashkey_faddr = inp->inp_faddr.s_addr;
2774 	}
2775 
2776 	inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
2777 	    inp->inp_fport, pcbinfo->ipi_hashmask);
2778 
2779 	pcbhash = &pcbinfo->ipi_hashbase[inp->inp_hash_element];
2780 
2781 	pcbporthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(inp->inp_lport,
2782 	    pcbinfo->ipi_porthashmask)];
2783 
2784 	/*
2785 	 * Go through port list and look for a head for this lport.
2786 	 */
2787 	LIST_FOREACH(phd, pcbporthash, phd_hash) {
2788 		if (phd->phd_port == inp->inp_lport) {
2789 			break;
2790 		}
2791 	}
2792 
2793 	/*
2794 	 * If none exists, malloc one and tack it on.
2795 	 */
2796 	if (phd == NULL) {
2797 		phd = kalloc_type(struct inpcbport, Z_WAITOK | Z_NOFAIL);
2798 		phd->phd_port = inp->inp_lport;
2799 		LIST_INIT(&phd->phd_pcblist);
2800 		LIST_INSERT_HEAD(pcbporthash, phd, phd_hash);
2801 	}
2802 
2803 	VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2804 
2805 #if SKYWALK
2806 	int err;
2807 	struct socket *so = inp->inp_socket;
2808 	if ((SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP) &&
2809 	    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
2810 		if (inp->inp_vflag & INP_IPV6) {
2811 			err = netns_reserve_in6(&inp->inp_netns_token,
2812 			    inp->in6p_laddr, (uint8_t)SOCK_PROTO(so), inp->inp_lport,
2813 			    NETNS_BSD | NETNS_PRERESERVED, NULL);
2814 		} else {
2815 			err = netns_reserve_in(&inp->inp_netns_token,
2816 			    inp->inp_laddr, (uint8_t)SOCK_PROTO(so), inp->inp_lport,
2817 			    NETNS_BSD | NETNS_PRERESERVED, NULL);
2818 		}
2819 		if (err) {
2820 			if (!locked) {
2821 				lck_rw_done(&pcbinfo->ipi_lock);
2822 			}
2823 			return err;
2824 		}
2825 		netns_set_ifnet(&inp->inp_netns_token, inp->inp_last_outifp);
2826 		inp_update_netns_flags(so);
2827 	}
2828 #endif /* SKYWALK */
2829 
2830 	inp->inp_phd = phd;
2831 	LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
2832 	LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
2833 	inp->inp_flags2 |= INP2_INHASHLIST;
2834 
2835 	if (!locked) {
2836 		lck_rw_done(&pcbinfo->ipi_lock);
2837 	}
2838 
2839 #if NECP
2840 	// This call catches the original setting of the local address
2841 	inp_update_necp_policy(inp, NULL, remote, 0);
2842 #endif /* NECP */
2843 
2844 	return 0;
2845 }
2846 
2847 /*
2848  * Move PCB to the proper hash bucket when { faddr, fport } have  been
2849  * changed. NOTE: This does not handle the case of the lport changing (the
2850  * hashed port list would have to be updated as well), so the lport must
2851  * not change after in_pcbinshash() has been called.
2852  */
2853 void
in_pcbrehash(struct inpcb * inp)2854 in_pcbrehash(struct inpcb *inp)
2855 {
2856 	struct inpcbhead *head;
2857 	u_int32_t hashkey_faddr;
2858 
2859 #if SKYWALK
2860 	struct socket *so = inp->inp_socket;
2861 	if ((SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP) &&
2862 	    !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
2863 		int err;
2864 		if (NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
2865 			if (inp->inp_vflag & INP_IPV6) {
2866 				err = netns_change_addr_in6(
2867 					&inp->inp_netns_token, inp->in6p_laddr);
2868 			} else {
2869 				err = netns_change_addr_in(
2870 					&inp->inp_netns_token, inp->inp_laddr);
2871 			}
2872 		} else {
2873 			if (inp->inp_vflag & INP_IPV6) {
2874 				err = netns_reserve_in6(&inp->inp_netns_token,
2875 				    inp->in6p_laddr, (uint8_t)SOCK_PROTO(so),
2876 				    inp->inp_lport, NETNS_BSD, NULL);
2877 			} else {
2878 				err = netns_reserve_in(&inp->inp_netns_token,
2879 				    inp->inp_laddr, (uint8_t)SOCK_PROTO(so),
2880 				    inp->inp_lport, NETNS_BSD, NULL);
2881 			}
2882 		}
2883 		/* We are assuming that whatever code paths result in a rehash
2884 		 * did their due diligence and ensured that the given
2885 		 * <proto, laddr, lport> tuple was free ahead of time. Just
2886 		 * reserving the lport on INADDR_ANY should be enough, since
2887 		 * that will block Skywalk from trying to reserve that same
2888 		 * port. Given this assumption, the above netns calls should
2889 		 * never fail*/
2890 		VERIFY(err == 0);
2891 
2892 		netns_set_ifnet(&inp->inp_netns_token, inp->inp_last_outifp);
2893 		inp_update_netns_flags(so);
2894 	}
2895 #endif /* SKYWALK */
2896 	if (inp->inp_vflag & INP_IPV6) {
2897 		hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
2898 	} else {
2899 		hashkey_faddr = inp->inp_faddr.s_addr;
2900 	}
2901 
2902 	inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
2903 	    inp->inp_fport, inp->inp_pcbinfo->ipi_hashmask);
2904 	head = &inp->inp_pcbinfo->ipi_hashbase[inp->inp_hash_element];
2905 
2906 	if (inp->inp_flags2 & INP2_INHASHLIST) {
2907 		LIST_REMOVE(inp, inp_hash);
2908 		inp->inp_flags2 &= ~INP2_INHASHLIST;
2909 	}
2910 
2911 	VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2912 	LIST_INSERT_HEAD(head, inp, inp_hash);
2913 	inp->inp_flags2 |= INP2_INHASHLIST;
2914 
2915 #if NECP
2916 	// This call catches updates to the remote addresses
2917 	inp_update_necp_policy(inp, NULL, NULL, 0);
2918 #endif /* NECP */
2919 }
2920 
2921 /*
2922  * Remove PCB from various lists.
2923  * Must be called pcbinfo lock is held in exclusive mode.
2924  */
2925 void
in_pcbremlists(struct inpcb * inp)2926 in_pcbremlists(struct inpcb *inp)
2927 {
2928 	inp->inp_gencnt = ++inp->inp_pcbinfo->ipi_gencnt;
2929 
2930 	/*
2931 	 * Check if it's in hashlist -- an inp is placed in hashlist when
2932 	 * it's local port gets assigned. So it should also be present
2933 	 * in the port list.
2934 	 */
2935 	if (inp->inp_flags2 & INP2_INHASHLIST) {
2936 		struct inpcbport *phd = inp->inp_phd;
2937 
2938 		VERIFY(phd != NULL && inp->inp_lport > 0);
2939 
2940 		LIST_REMOVE(inp, inp_hash);
2941 		inp->inp_hash.le_next = NULL;
2942 		inp->inp_hash.le_prev = NULL;
2943 
2944 		LIST_REMOVE(inp, inp_portlist);
2945 		inp->inp_portlist.le_next = NULL;
2946 		inp->inp_portlist.le_prev = NULL;
2947 		if (LIST_EMPTY(&phd->phd_pcblist)) {
2948 			LIST_REMOVE(phd, phd_hash);
2949 			kfree_type(struct inpcbport, phd);
2950 		}
2951 		inp->inp_phd = NULL;
2952 		inp->inp_flags2 &= ~INP2_INHASHLIST;
2953 #if SKYWALK
2954 		/* Free up the port in the namespace registrar */
2955 		netns_release(&inp->inp_netns_token);
2956 		netns_release(&inp->inp_wildcard_netns_token);
2957 #endif /* SKYWALK */
2958 	}
2959 	VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2960 
2961 	if (inp->inp_flags2 & INP2_TIMEWAIT) {
2962 		/* Remove from time-wait queue */
2963 		tcp_remove_from_time_wait(inp);
2964 		inp->inp_flags2 &= ~INP2_TIMEWAIT;
2965 		VERIFY(inp->inp_pcbinfo->ipi_twcount != 0);
2966 		inp->inp_pcbinfo->ipi_twcount--;
2967 	} else {
2968 		/* Remove from global inp list if it is not time-wait */
2969 		LIST_REMOVE(inp, inp_list);
2970 	}
2971 
2972 	if (inp->inp_flags2 & INP2_IN_FCTREE) {
2973 		inp_fc_getinp(inp->inp_flowhash, (INPFC_SOLOCKED | INPFC_REMOVE));
2974 		VERIFY(!(inp->inp_flags2 & INP2_IN_FCTREE));
2975 	}
2976 
2977 	inp->inp_pcbinfo->ipi_count--;
2978 }
2979 
2980 /*
2981  * Mechanism used to defer the memory release of PCBs
2982  * The pcb list will contain the pcb until the reaper can clean it up if
2983  * the following conditions are met:
2984  *	1) state "DEAD",
2985  *	2) wantcnt is STOPUSING
2986  *	3) usecount is 0
2987  * This function will be called to either mark the pcb as
2988  */
2989 int
in_pcb_checkstate(struct inpcb * pcb,int mode,int locked)2990 in_pcb_checkstate(struct inpcb *pcb, int mode, int locked)
2991 {
2992 	volatile UInt32 *wantcnt = (volatile UInt32 *)&pcb->inp_wantcnt;
2993 	UInt32 origwant;
2994 	UInt32 newwant;
2995 
2996 	switch (mode) {
2997 	case WNT_STOPUSING:
2998 		/*
2999 		 * Try to mark the pcb as ready for recycling.  CAS with
3000 		 * STOPUSING, if success we're good, if it's in use, will
3001 		 * be marked later
3002 		 */
3003 		if (locked == 0) {
3004 			socket_lock(pcb->inp_socket, 1);
3005 		}
3006 		pcb->inp_state = INPCB_STATE_DEAD;
3007 
3008 stopusing:
3009 		if (pcb->inp_socket->so_usecount < 0) {
3010 			panic("%s: pcb=%p so=%p usecount is negative",
3011 			    __func__, pcb, pcb->inp_socket);
3012 			/* NOTREACHED */
3013 		}
3014 		if (locked == 0) {
3015 			socket_unlock(pcb->inp_socket, 1);
3016 		}
3017 
3018 		inpcb_gc_sched(pcb->inp_pcbinfo, INPCB_TIMER_FAST);
3019 
3020 		origwant = *wantcnt;
3021 		if ((UInt16) origwant == 0xffff) { /* should stop using */
3022 			return WNT_STOPUSING;
3023 		}
3024 		newwant = 0xffff;
3025 		if ((UInt16) origwant == 0) {
3026 			/* try to mark it as unsuable now */
3027 			OSCompareAndSwap(origwant, newwant, wantcnt);
3028 		}
3029 		return WNT_STOPUSING;
3030 
3031 	case WNT_ACQUIRE:
3032 		/*
3033 		 * Try to increase reference to pcb.  If WNT_STOPUSING
3034 		 * should bail out.  If socket state DEAD, try to set count
3035 		 * to STOPUSING, return failed otherwise increase cnt.
3036 		 */
3037 		do {
3038 			origwant = *wantcnt;
3039 			if ((UInt16) origwant == 0xffff) {
3040 				/* should stop using */
3041 				return WNT_STOPUSING;
3042 			}
3043 			newwant = origwant + 1;
3044 		} while (!OSCompareAndSwap(origwant, newwant, wantcnt));
3045 		return WNT_ACQUIRE;
3046 
3047 	case WNT_RELEASE:
3048 		/*
3049 		 * Release reference.  If result is null and pcb state
3050 		 * is DEAD, set wanted bit to STOPUSING
3051 		 */
3052 		if (locked == 0) {
3053 			socket_lock(pcb->inp_socket, 1);
3054 		}
3055 
3056 		do {
3057 			origwant = *wantcnt;
3058 			if ((UInt16) origwant == 0x0) {
3059 				panic("%s: pcb=%p release with zero count",
3060 				    __func__, pcb);
3061 				/* NOTREACHED */
3062 			}
3063 			if ((UInt16) origwant == 0xffff) {
3064 				/* should stop using */
3065 				if (locked == 0) {
3066 					socket_unlock(pcb->inp_socket, 1);
3067 				}
3068 				return WNT_STOPUSING;
3069 			}
3070 			newwant = origwant - 1;
3071 		} while (!OSCompareAndSwap(origwant, newwant, wantcnt));
3072 
3073 		if (pcb->inp_state == INPCB_STATE_DEAD) {
3074 			goto stopusing;
3075 		}
3076 		if (pcb->inp_socket->so_usecount < 0) {
3077 			panic("%s: RELEASE pcb=%p so=%p usecount is negative",
3078 			    __func__, pcb, pcb->inp_socket);
3079 			/* NOTREACHED */
3080 		}
3081 
3082 		if (locked == 0) {
3083 			socket_unlock(pcb->inp_socket, 1);
3084 		}
3085 		return WNT_RELEASE;
3086 
3087 	default:
3088 		panic("%s: so=%p not a valid state =%x", __func__,
3089 		    pcb->inp_socket, mode);
3090 		/* NOTREACHED */
3091 	}
3092 
3093 	/* NOTREACHED */
3094 	return mode;
3095 }
3096 
3097 /*
3098  * inpcb_to_compat copies specific bits of an inpcb to a inpcb_compat.
3099  * The inpcb_compat data structure is passed to user space and must
3100  * not change. We intentionally avoid copying pointers.
3101  */
3102 void
inpcb_to_compat(struct inpcb * inp,struct inpcb_compat * inp_compat)3103 inpcb_to_compat(struct inpcb *inp, struct inpcb_compat *inp_compat)
3104 {
3105 	bzero(inp_compat, sizeof(*inp_compat));
3106 	inp_compat->inp_fport = inp->inp_fport;
3107 	inp_compat->inp_lport = inp->inp_lport;
3108 	inp_compat->nat_owner = 0;
3109 	inp_compat->nat_cookie = 0;
3110 	inp_compat->inp_gencnt = inp->inp_gencnt;
3111 	inp_compat->inp_flags = inp->inp_flags;
3112 	inp_compat->inp_flow = inp->inp_flow;
3113 	inp_compat->inp_vflag = inp->inp_vflag;
3114 	inp_compat->inp_ip_ttl = inp->inp_ip_ttl;
3115 	inp_compat->inp_ip_p = inp->inp_ip_p;
3116 	inp_compat->inp_dependfaddr.inp6_foreign =
3117 	    inp->inp_dependfaddr.inp6_foreign;
3118 	inp_compat->inp_dependladdr.inp6_local =
3119 	    inp->inp_dependladdr.inp6_local;
3120 	inp_compat->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
3121 	inp_compat->inp_depend6.inp6_hlim = 0;
3122 	inp_compat->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
3123 	inp_compat->inp_depend6.inp6_ifindex = 0;
3124 	inp_compat->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
3125 }
3126 
3127 #if XNU_TARGET_OS_OSX
3128 void
inpcb_to_xinpcb64(struct inpcb * inp,struct xinpcb64 * xinp)3129 inpcb_to_xinpcb64(struct inpcb *inp, struct xinpcb64 *xinp)
3130 {
3131 	xinp->inp_fport = inp->inp_fport;
3132 	xinp->inp_lport = inp->inp_lport;
3133 	xinp->inp_gencnt = inp->inp_gencnt;
3134 	xinp->inp_flags = inp->inp_flags;
3135 	xinp->inp_flow = inp->inp_flow;
3136 	xinp->inp_vflag = inp->inp_vflag;
3137 	xinp->inp_ip_ttl = inp->inp_ip_ttl;
3138 	xinp->inp_ip_p = inp->inp_ip_p;
3139 	xinp->inp_dependfaddr.inp6_foreign = inp->inp_dependfaddr.inp6_foreign;
3140 	xinp->inp_dependladdr.inp6_local = inp->inp_dependladdr.inp6_local;
3141 	xinp->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
3142 	xinp->inp_depend6.inp6_hlim = 0;
3143 	xinp->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
3144 	xinp->inp_depend6.inp6_ifindex = 0;
3145 	xinp->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
3146 }
3147 #endif /* XNU_TARGET_OS_OSX */
3148 
3149 /*
3150  * The following routines implement this scheme:
3151  *
3152  * Callers of ip_output() that intend to cache the route in the inpcb pass
3153  * a local copy of the struct route to ip_output().  Using a local copy of
3154  * the cached route significantly simplifies things as IP no longer has to
3155  * worry about having exclusive access to the passed in struct route, since
3156  * it's defined in the caller's stack; in essence, this allows for a lock-
3157  * less operation when updating the struct route at the IP level and below,
3158  * whenever necessary. The scheme works as follows:
3159  *
3160  * Prior to dropping the socket's lock and calling ip_output(), the caller
3161  * copies the struct route from the inpcb into its stack, and adds a reference
3162  * to the cached route entry, if there was any.  The socket's lock is then
3163  * dropped and ip_output() is called with a pointer to the copy of struct
3164  * route defined on the stack (not to the one in the inpcb.)
3165  *
3166  * Upon returning from ip_output(), the caller then acquires the socket's
3167  * lock and synchronizes the cache; if there is no route cached in the inpcb,
3168  * it copies the local copy of struct route (which may or may not contain any
3169  * route) back into the cache; otherwise, if the inpcb has a route cached in
3170  * it, the one in the local copy will be freed, if there's any.  Trashing the
3171  * cached route in the inpcb can be avoided because ip_output() is single-
3172  * threaded per-PCB (i.e. multiple transmits on a PCB are always serialized
3173  * by the socket/transport layer.)
3174  */
3175 void
inp_route_copyout(struct inpcb * inp,struct route * dst)3176 inp_route_copyout(struct inpcb *inp, struct route *dst)
3177 {
3178 	struct route *src = &inp->inp_route;
3179 
3180 	socket_lock_assert_owned(inp->inp_socket);
3181 
3182 	/*
3183 	 * If the route in the PCB is stale or not for IPv4, blow it away;
3184 	 * this is possible in the case of IPv4-mapped address case.
3185 	 */
3186 	if (ROUTE_UNUSABLE(src) || rt_key(src->ro_rt)->sa_family != AF_INET) {
3187 		ROUTE_RELEASE(src);
3188 	}
3189 
3190 	route_copyout(dst, src, sizeof(*dst));
3191 }
3192 
3193 void
inp_route_copyin(struct inpcb * inp,struct route * src)3194 inp_route_copyin(struct inpcb *inp, struct route *src)
3195 {
3196 	struct route *dst = &inp->inp_route;
3197 
3198 	socket_lock_assert_owned(inp->inp_socket);
3199 
3200 	/* Minor sanity check */
3201 	if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3202 		panic("%s: wrong or corrupted route: %p", __func__, src);
3203 	}
3204 
3205 	route_copyin(src, dst, sizeof(*src));
3206 }
3207 
3208 /*
3209  * Handler for setting IP_BOUND_IF/IPV6_BOUND_IF socket option.
3210  */
3211 static void
inp_bindif_common(struct inpcb * inp,struct ifnet * ifp)3212 inp_bindif_common(struct inpcb *inp, struct ifnet *ifp)
3213 {
3214 	/*
3215 	 * A zero interface scope value indicates an "unbind".
3216 	 * Otherwise, take in whatever value the app desires;
3217 	 * the app may already know the scope (or force itself
3218 	 * to such a scope) ahead of time before the interface
3219 	 * gets attached.  It doesn't matter either way; any
3220 	 * route lookup from this point on will require an
3221 	 * exact match for the embedded interface scope.
3222 	 */
3223 	inp->inp_boundifp = ifp;
3224 	if (inp->inp_boundifp == NULL) {
3225 		inp->inp_flags &= ~INP_BOUND_IF;
3226 	} else {
3227 		inp->inp_flags |= INP_BOUND_IF;
3228 	}
3229 
3230 	/* Blow away any cached route in the PCB */
3231 	ROUTE_RELEASE(&inp->inp_route);
3232 }
3233 
3234 
3235 int
inp_bindif(struct inpcb * inp,unsigned int ifscope,struct ifnet ** pifp)3236 inp_bindif(struct inpcb *inp, unsigned int ifscope, struct ifnet **pifp)
3237 {
3238 	struct ifnet *ifp = NULL;
3239 
3240 	ifnet_head_lock_shared();
3241 	if ((ifscope > (unsigned)if_index) || (ifscope != IFSCOPE_NONE &&
3242 	    (ifp = ifindex2ifnet[ifscope]) == NULL)) {
3243 		ifnet_head_done();
3244 		return ENXIO;
3245 	}
3246 	ifnet_head_done();
3247 
3248 	VERIFY(ifp != NULL || ifscope == IFSCOPE_NONE);
3249 
3250 	inp_bindif_common(inp, ifp);
3251 
3252 	if (pifp != NULL) {
3253 		*pifp = ifp;
3254 	}
3255 
3256 	return 0;
3257 }
3258 
3259 int
inp_bindtodevice(struct inpcb * inp,const char * ifname)3260 inp_bindtodevice(struct inpcb *inp, const char *ifname)
3261 {
3262 	ifnet_ref_t ifp = NULL;
3263 
3264 	if (*ifname != 0) {
3265 		int error = ifnet_find_by_name(ifname, &ifp);
3266 		if (error != 0) {
3267 			return error;
3268 		}
3269 	}
3270 
3271 	inp_bindif_common(inp, ifp);
3272 
3273 	if (ifp != NULL) {
3274 		ifnet_release(ifp);
3275 	}
3276 	return 0;
3277 }
3278 
3279 /*
3280  * Handler for setting IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
3281  * as well as for setting PROC_UUID_NO_CELLULAR policy.
3282  */
3283 void
inp_set_nocellular(struct inpcb * inp)3284 inp_set_nocellular(struct inpcb *inp)
3285 {
3286 	inp->inp_flags |= INP_NO_IFT_CELLULAR;
3287 
3288 	/* Blow away any cached route in the PCB */
3289 	ROUTE_RELEASE(&inp->inp_route);
3290 }
3291 
3292 /*
3293  * Handler for clearing IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
3294  * as well as for clearing PROC_UUID_NO_CELLULAR policy.
3295  */
3296 void
inp_clear_nocellular(struct inpcb * inp)3297 inp_clear_nocellular(struct inpcb *inp)
3298 {
3299 	struct socket *so = inp->inp_socket;
3300 
3301 	/*
3302 	 * SO_RESTRICT_DENY_CELLULAR socket restriction issued on the socket
3303 	 * has a higher precendence than INP_NO_IFT_CELLULAR.  Clear the flag
3304 	 * if and only if the socket is unrestricted.
3305 	 */
3306 	if (so != NULL && !(so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
3307 		inp->inp_flags &= ~INP_NO_IFT_CELLULAR;
3308 
3309 		/* Blow away any cached route in the PCB */
3310 		ROUTE_RELEASE(&inp->inp_route);
3311 	}
3312 }
3313 
3314 void
inp_set_noexpensive(struct inpcb * inp)3315 inp_set_noexpensive(struct inpcb *inp)
3316 {
3317 	inp->inp_flags2 |= INP2_NO_IFF_EXPENSIVE;
3318 
3319 	/* Blow away any cached route in the PCB */
3320 	ROUTE_RELEASE(&inp->inp_route);
3321 }
3322 
3323 void
inp_set_noconstrained(struct inpcb * inp)3324 inp_set_noconstrained(struct inpcb *inp)
3325 {
3326 	inp->inp_flags2 |= INP2_NO_IFF_CONSTRAINED;
3327 
3328 	/* Blow away any cached route in the PCB */
3329 	ROUTE_RELEASE(&inp->inp_route);
3330 }
3331 
3332 void
inp_set_awdl_unrestricted(struct inpcb * inp)3333 inp_set_awdl_unrestricted(struct inpcb *inp)
3334 {
3335 	inp->inp_flags2 |= INP2_AWDL_UNRESTRICTED;
3336 
3337 	/* Blow away any cached route in the PCB */
3338 	ROUTE_RELEASE(&inp->inp_route);
3339 }
3340 
3341 boolean_t
inp_get_awdl_unrestricted(struct inpcb * inp)3342 inp_get_awdl_unrestricted(struct inpcb *inp)
3343 {
3344 	return (inp->inp_flags2 & INP2_AWDL_UNRESTRICTED) ? TRUE : FALSE;
3345 }
3346 
3347 void
inp_clear_awdl_unrestricted(struct inpcb * inp)3348 inp_clear_awdl_unrestricted(struct inpcb *inp)
3349 {
3350 	inp->inp_flags2 &= ~INP2_AWDL_UNRESTRICTED;
3351 
3352 	/* Blow away any cached route in the PCB */
3353 	ROUTE_RELEASE(&inp->inp_route);
3354 }
3355 
3356 void
inp_set_intcoproc_allowed(struct inpcb * inp)3357 inp_set_intcoproc_allowed(struct inpcb *inp)
3358 {
3359 	inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
3360 
3361 	/* Blow away any cached route in the PCB */
3362 	ROUTE_RELEASE(&inp->inp_route);
3363 }
3364 
3365 boolean_t
inp_get_intcoproc_allowed(struct inpcb * inp)3366 inp_get_intcoproc_allowed(struct inpcb *inp)
3367 {
3368 	return (inp->inp_flags2 & INP2_INTCOPROC_ALLOWED) ? TRUE : FALSE;
3369 }
3370 
3371 void
inp_clear_intcoproc_allowed(struct inpcb * inp)3372 inp_clear_intcoproc_allowed(struct inpcb *inp)
3373 {
3374 	inp->inp_flags2 &= ~INP2_INTCOPROC_ALLOWED;
3375 
3376 	/* Blow away any cached route in the PCB */
3377 	ROUTE_RELEASE(&inp->inp_route);
3378 }
3379 
3380 void
inp_set_management_allowed(struct inpcb * inp)3381 inp_set_management_allowed(struct inpcb *inp)
3382 {
3383 	inp->inp_flags2 |= INP2_MANAGEMENT_ALLOWED;
3384 	inp->inp_flags2 |= INP2_MANAGEMENT_CHECKED;
3385 
3386 	/* Blow away any cached route in the PCB */
3387 	ROUTE_RELEASE(&inp->inp_route);
3388 }
3389 
3390 boolean_t
inp_get_management_allowed(struct inpcb * inp)3391 inp_get_management_allowed(struct inpcb *inp)
3392 {
3393 	return (inp->inp_flags2 & INP2_MANAGEMENT_ALLOWED) ? TRUE : FALSE;
3394 }
3395 
3396 void
inp_clear_management_allowed(struct inpcb * inp)3397 inp_clear_management_allowed(struct inpcb *inp)
3398 {
3399 	inp->inp_flags2 &= ~INP2_MANAGEMENT_ALLOWED;
3400 
3401 	/* Blow away any cached route in the PCB */
3402 	ROUTE_RELEASE(&inp->inp_route);
3403 }
3404 
3405 void
inp_set_ultra_constrained_allowed(struct inpcb * inp)3406 inp_set_ultra_constrained_allowed(struct inpcb *inp)
3407 {
3408 	inp->inp_flags2 |= INP2_ULTRA_CONSTRAINED_ALLOWED;
3409 	inp->inp_flags2 |= INP2_ULTRA_CONSTRAINED_CHECKED;
3410 
3411 	/* Blow away any cached route in the PCB */
3412 	ROUTE_RELEASE(&inp->inp_route);
3413 }
3414 
3415 #if NECP
3416 /*
3417  * Called when PROC_UUID_NECP_APP_POLICY is set.
3418  */
3419 void
inp_set_want_app_policy(struct inpcb * inp)3420 inp_set_want_app_policy(struct inpcb *inp)
3421 {
3422 	inp->inp_flags2 |= INP2_WANT_APP_POLICY;
3423 }
3424 
3425 /*
3426  * Called when PROC_UUID_NECP_APP_POLICY is cleared.
3427  */
3428 void
inp_clear_want_app_policy(struct inpcb * inp)3429 inp_clear_want_app_policy(struct inpcb *inp)
3430 {
3431 	inp->inp_flags2 &= ~INP2_WANT_APP_POLICY;
3432 }
3433 #endif /* NECP */
3434 
3435 /*
3436  * Calculate flow hash for an inp, used by an interface to identify a
3437  * flow. When an interface provides flow control advisory, this flow
3438  * hash is used as an identifier.
3439  */
3440 u_int32_t
inp_calc_flowhash(struct inpcb * inp)3441 inp_calc_flowhash(struct inpcb *inp)
3442 {
3443 #if SKYWALK
3444 
3445 	uint32_t flowid;
3446 	struct flowidns_flow_key fk;
3447 
3448 	bzero(&fk, sizeof(fk));
3449 
3450 	if (inp->inp_vflag & INP_IPV4) {
3451 		fk.ffk_af = AF_INET;
3452 		fk.ffk_laddr_v4 = inp->inp_laddr;
3453 		fk.ffk_raddr_v4 = inp->inp_faddr;
3454 	} else {
3455 		fk.ffk_af = AF_INET6;
3456 		fk.ffk_laddr_v6 = inp->in6p_laddr;
3457 		fk.ffk_raddr_v6 = inp->in6p_faddr;
3458 		/* clear embedded scope ID */
3459 		if (IN6_IS_SCOPE_EMBED(&fk.ffk_laddr_v6)) {
3460 			fk.ffk_laddr_v6.s6_addr16[1] = 0;
3461 		}
3462 		if (IN6_IS_SCOPE_EMBED(&fk.ffk_raddr_v6)) {
3463 			fk.ffk_raddr_v6.s6_addr16[1] = 0;
3464 		}
3465 	}
3466 
3467 	fk.ffk_lport = inp->inp_lport;
3468 	fk.ffk_rport = inp->inp_fport;
3469 	fk.ffk_proto = (inp->inp_ip_p != 0) ? inp->inp_ip_p :
3470 	    (uint8_t)SOCK_PROTO(inp->inp_socket);
3471 	flowidns_allocate_flowid(FLOWIDNS_DOMAIN_INPCB, &fk, &flowid);
3472 	/* Insert the inp into inp_fc_tree */
3473 	lck_mtx_lock_spin(&inp_fc_lck);
3474 	ASSERT(inp->inp_flowhash == 0);
3475 	ASSERT((inp->inp_flags2 & INP2_IN_FCTREE) == 0);
3476 	inp->inp_flowhash = flowid;
3477 	VERIFY(RB_INSERT(inp_fc_tree, &inp_fc_tree, inp) == NULL);
3478 	inp->inp_flags2 |= INP2_IN_FCTREE;
3479 	lck_mtx_unlock(&inp_fc_lck);
3480 
3481 	return flowid;
3482 
3483 #else /* !SKYWALK */
3484 
3485 	struct inp_flowhash_key fh __attribute__((aligned(8)));
3486 	u_int32_t flowhash = 0;
3487 	struct inpcb *tmp_inp = NULL;
3488 
3489 	if (inp_hash_seed == 0) {
3490 		inp_hash_seed = RandomULong();
3491 	}
3492 
3493 	bzero(&fh, sizeof(fh));
3494 
3495 	bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof(fh.infh_laddr));
3496 	bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof(fh.infh_faddr));
3497 
3498 	fh.infh_lport = inp->inp_lport;
3499 	fh.infh_fport = inp->inp_fport;
3500 	fh.infh_af = (inp->inp_vflag & INP_IPV6) ? AF_INET6 : AF_INET;
3501 	fh.infh_proto = inp->inp_ip_p;
3502 	fh.infh_rand1 = RandomULong();
3503 	fh.infh_rand2 = RandomULong();
3504 
3505 try_again:
3506 	flowhash = net_flowhash(&fh, sizeof(fh), inp_hash_seed);
3507 	if (flowhash == 0) {
3508 		/* try to get a non-zero flowhash */
3509 		inp_hash_seed = RandomULong();
3510 		goto try_again;
3511 	}
3512 
3513 	inp->inp_flowhash = flowhash;
3514 
3515 	/* Insert the inp into inp_fc_tree */
3516 	lck_mtx_lock_spin(&inp_fc_lck);
3517 	tmp_inp = RB_FIND(inp_fc_tree, &inp_fc_tree, inp);
3518 	if (tmp_inp != NULL) {
3519 		/*
3520 		 * There is a different inp with the same flowhash.
3521 		 * There can be a collision on flow hash but the
3522 		 * probability is low.  Let's recompute the
3523 		 * flowhash.
3524 		 */
3525 		lck_mtx_unlock(&inp_fc_lck);
3526 		/* recompute hash seed */
3527 		inp_hash_seed = RandomULong();
3528 		goto try_again;
3529 	}
3530 
3531 	RB_INSERT(inp_fc_tree, &inp_fc_tree, inp);
3532 	inp->inp_flags2 |= INP2_IN_FCTREE;
3533 	lck_mtx_unlock(&inp_fc_lck);
3534 
3535 	return flowhash;
3536 
3537 #endif /* !SKYWALK */
3538 }
3539 
3540 void
inp_flowadv(uint32_t flowhash)3541 inp_flowadv(uint32_t flowhash)
3542 {
3543 	struct inpcb *inp;
3544 
3545 	inp = inp_fc_getinp(flowhash, 0);
3546 
3547 	if (inp == NULL) {
3548 		return;
3549 	}
3550 	inp_fc_feedback(inp);
3551 }
3552 
3553 /*
3554  * Function to compare inp_fc_entries in inp flow control tree
3555  */
3556 static inline int
infc_cmp(const struct inpcb * inp1,const struct inpcb * inp2)3557 infc_cmp(const struct inpcb *inp1, const struct inpcb *inp2)
3558 {
3559 	return memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash),
3560 	           sizeof(inp1->inp_flowhash));
3561 }
3562 
3563 static struct inpcb *
inp_fc_getinp(u_int32_t flowhash,u_int32_t flags)3564 inp_fc_getinp(u_int32_t flowhash, u_int32_t flags)
3565 {
3566 	struct inpcb *inp = NULL;
3567 	int locked = (flags & INPFC_SOLOCKED) ? 1 : 0;
3568 
3569 	lck_mtx_lock_spin(&inp_fc_lck);
3570 	key_inp.inp_flowhash = flowhash;
3571 	inp = RB_FIND(inp_fc_tree, &inp_fc_tree, &key_inp);
3572 	if (inp == NULL) {
3573 		/* inp is not present, return */
3574 		lck_mtx_unlock(&inp_fc_lck);
3575 		return NULL;
3576 	}
3577 
3578 	if (flags & INPFC_REMOVE) {
3579 		ASSERT((inp->inp_flags2 & INP2_IN_FCTREE) != 0);
3580 		lck_mtx_convert_spin(&inp_fc_lck);
3581 		RB_REMOVE(inp_fc_tree, &inp_fc_tree, inp);
3582 		bzero(&(inp->infc_link), sizeof(inp->infc_link));
3583 #if SKYWALK
3584 		VERIFY(inp->inp_flowhash != 0);
3585 		flowidns_release_flowid(inp->inp_flowhash);
3586 		inp->inp_flowhash = 0;
3587 #endif /* !SKYWALK */
3588 		inp->inp_flags2 &= ~INP2_IN_FCTREE;
3589 		lck_mtx_unlock(&inp_fc_lck);
3590 		return NULL;
3591 	}
3592 
3593 	if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
3594 		inp = NULL;
3595 	}
3596 	lck_mtx_unlock(&inp_fc_lck);
3597 
3598 	return inp;
3599 }
3600 
3601 static void
inp_fc_feedback(struct inpcb * inp)3602 inp_fc_feedback(struct inpcb *inp)
3603 {
3604 	struct socket *so = inp->inp_socket;
3605 
3606 	/* we already hold a want_cnt on this inp, socket can't be null */
3607 	VERIFY(so != NULL);
3608 	socket_lock(so, 1);
3609 
3610 	if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3611 		socket_unlock(so, 1);
3612 		return;
3613 	}
3614 
3615 	if (inp->inp_sndinprog_cnt > 0) {
3616 		inp->inp_flags |= INP_FC_FEEDBACK;
3617 	}
3618 
3619 	/*
3620 	 * Return if the connection is not in flow-controlled state.
3621 	 * This can happen if the connection experienced
3622 	 * loss while it was in flow controlled state
3623 	 */
3624 	if (!INP_WAIT_FOR_IF_FEEDBACK(inp)) {
3625 		socket_unlock(so, 1);
3626 		return;
3627 	}
3628 	inp_reset_fc_state(inp);
3629 
3630 	if (SOCK_TYPE(so) == SOCK_STREAM) {
3631 		inp_fc_unthrottle_tcp(inp);
3632 	}
3633 
3634 	socket_unlock(so, 1);
3635 }
3636 
3637 static void
inp_reset_fc_timerstat(struct inpcb * inp)3638 inp_reset_fc_timerstat(struct inpcb *inp)
3639 {
3640 	uint64_t now;
3641 
3642 	if (inp->inp_fadv_start_time == 0) {
3643 		return;
3644 	}
3645 
3646 	now = net_uptime_us();
3647 	ASSERT(now >= inp->inp_fadv_start_time);
3648 
3649 	inp->inp_fadv_total_time += (now - inp->inp_fadv_start_time);
3650 	inp->inp_fadv_cnt++;
3651 
3652 	inp->inp_fadv_start_time = 0;
3653 }
3654 
3655 static void
inp_set_fc_timerstat(struct inpcb * inp)3656 inp_set_fc_timerstat(struct inpcb *inp)
3657 {
3658 	if (inp->inp_fadv_start_time != 0) {
3659 		return;
3660 	}
3661 
3662 	inp->inp_fadv_start_time = net_uptime_us();
3663 }
3664 
3665 void
inp_reset_fc_state(struct inpcb * inp)3666 inp_reset_fc_state(struct inpcb *inp)
3667 {
3668 	struct socket *so = inp->inp_socket;
3669 	int suspended = (INP_IS_FLOW_SUSPENDED(inp)) ? 1 : 0;
3670 	int needwakeup = (INP_WAIT_FOR_IF_FEEDBACK(inp)) ? 1 : 0;
3671 
3672 	inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
3673 
3674 	inp_reset_fc_timerstat(inp);
3675 
3676 	if (suspended) {
3677 		so->so_flags &= ~(SOF_SUSPENDED);
3678 		soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_RESUME));
3679 	}
3680 
3681 	/* Give a write wakeup to unblock the socket */
3682 	if (needwakeup) {
3683 		sowwakeup(so);
3684 	}
3685 }
3686 
3687 int
inp_set_fc_state(struct inpcb * inp,int advcode)3688 inp_set_fc_state(struct inpcb *inp, int advcode)
3689 {
3690 	boolean_t is_flow_controlled = INP_WAIT_FOR_IF_FEEDBACK(inp);
3691 	struct inpcb *tmp_inp = NULL;
3692 	/*
3693 	 * If there was a feedback from the interface when
3694 	 * send operation was in progress, we should ignore
3695 	 * this flow advisory to avoid a race between setting
3696 	 * flow controlled state and receiving feedback from
3697 	 * the interface
3698 	 */
3699 	if (inp->inp_flags & INP_FC_FEEDBACK) {
3700 		return 0;
3701 	}
3702 
3703 	inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
3704 	if ((tmp_inp = inp_fc_getinp(inp->inp_flowhash,
3705 	    INPFC_SOLOCKED)) != NULL) {
3706 		if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3707 			goto exit_reset;
3708 		}
3709 		VERIFY(tmp_inp == inp);
3710 		switch (advcode) {
3711 		case FADV_FLOW_CONTROLLED:
3712 			inp->inp_flags |= INP_FLOW_CONTROLLED;
3713 			inp_set_fc_timerstat(inp);
3714 			break;
3715 		case FADV_SUSPENDED:
3716 			inp->inp_flags |= INP_FLOW_SUSPENDED;
3717 			inp_set_fc_timerstat(inp);
3718 
3719 			soevent(inp->inp_socket,
3720 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_SUSPEND));
3721 
3722 			/* Record the fact that suspend event was sent */
3723 			inp->inp_socket->so_flags |= SOF_SUSPENDED;
3724 			break;
3725 		}
3726 
3727 		if (!is_flow_controlled && SOCK_TYPE(inp->inp_socket) == SOCK_STREAM) {
3728 			inp_fc_throttle_tcp(inp);
3729 		}
3730 		return 1;
3731 	}
3732 
3733 exit_reset:
3734 	inp_reset_fc_timerstat(inp);
3735 
3736 	return 0;
3737 }
3738 
3739 /*
3740  * Handler for SO_FLUSH socket option.
3741  */
3742 int
inp_flush(struct inpcb * inp,int optval)3743 inp_flush(struct inpcb *inp, int optval)
3744 {
3745 	u_int32_t flowhash = inp->inp_flowhash;
3746 	struct ifnet *rtifp, *oifp;
3747 
3748 	/* Either all classes or one of the valid ones */
3749 	if (optval != SO_TC_ALL && !SO_VALID_TC(optval)) {
3750 		return EINVAL;
3751 	}
3752 
3753 	/* We need a flow hash for identification */
3754 	if (flowhash == 0) {
3755 		return 0;
3756 	}
3757 
3758 	/* Grab the interfaces from the route and pcb */
3759 	rtifp = ((inp->inp_route.ro_rt != NULL) ?
3760 	    inp->inp_route.ro_rt->rt_ifp : NULL);
3761 	oifp = inp->inp_last_outifp;
3762 
3763 	if (rtifp != NULL) {
3764 		if_qflush_sc(rtifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
3765 	}
3766 	if (oifp != NULL && oifp != rtifp) {
3767 		if_qflush_sc(oifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
3768 	}
3769 
3770 	return 0;
3771 }
3772 
3773 /*
3774  * Clear the INP_INADDR_ANY flag (special case for PPP only)
3775  */
3776 void
inp_clear_INP_INADDR_ANY(struct socket * so)3777 inp_clear_INP_INADDR_ANY(struct socket *so)
3778 {
3779 	struct inpcb *inp = NULL;
3780 
3781 	socket_lock(so, 1);
3782 	inp = sotoinpcb(so);
3783 	if (inp) {
3784 		inp->inp_flags &= ~INP_INADDR_ANY;
3785 	}
3786 	socket_unlock(so, 1);
3787 }
3788 
3789 void
inp_get_soprocinfo(struct inpcb * inp,struct so_procinfo * soprocinfo)3790 inp_get_soprocinfo(struct inpcb *inp, struct so_procinfo *soprocinfo)
3791 {
3792 	struct socket *so = inp->inp_socket;
3793 
3794 	soprocinfo->spi_pid = so->last_pid;
3795 	strbufcpy(soprocinfo->spi_proc_name, inp->inp_last_proc_name);
3796 	if (so->last_pid != 0) {
3797 		uuid_copy(soprocinfo->spi_uuid, so->last_uuid);
3798 	}
3799 	/*
3800 	 * When not delegated, the effective pid is the same as the real pid
3801 	 */
3802 	if (so->so_flags & SOF_DELEGATED) {
3803 		soprocinfo->spi_delegated = 1;
3804 		soprocinfo->spi_epid = so->e_pid;
3805 		uuid_copy(soprocinfo->spi_euuid, so->e_uuid);
3806 	} else {
3807 		soprocinfo->spi_delegated = 0;
3808 		soprocinfo->spi_epid = so->last_pid;
3809 	}
3810 	strbufcpy(soprocinfo->spi_e_proc_name, inp->inp_e_proc_name);
3811 }
3812 
3813 int
inp_findinpcb_procinfo(struct inpcbinfo * pcbinfo,uint32_t flowhash,struct so_procinfo * soprocinfo)3814 inp_findinpcb_procinfo(struct inpcbinfo *pcbinfo, uint32_t flowhash,
3815     struct so_procinfo *soprocinfo)
3816 {
3817 	struct inpcb *inp = NULL;
3818 	int found = 0;
3819 
3820 	bzero(soprocinfo, sizeof(struct so_procinfo));
3821 
3822 	if (!flowhash) {
3823 		return -1;
3824 	}
3825 
3826 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
3827 	LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
3828 		if (inp->inp_state != INPCB_STATE_DEAD &&
3829 		    inp->inp_socket != NULL &&
3830 		    inp->inp_flowhash == flowhash) {
3831 			found = 1;
3832 			inp_get_soprocinfo(inp, soprocinfo);
3833 			break;
3834 		}
3835 	}
3836 	lck_rw_done(&pcbinfo->ipi_lock);
3837 
3838 	return found;
3839 }
3840 
3841 #if CONFIG_PROC_UUID_POLICY
3842 static void
inp_update_cellular_policy(struct inpcb * inp,boolean_t set)3843 inp_update_cellular_policy(struct inpcb *inp, boolean_t set)
3844 {
3845 	struct socket *so = inp->inp_socket;
3846 	int before, after;
3847 
3848 	VERIFY(so != NULL);
3849 	VERIFY(inp->inp_state != INPCB_STATE_DEAD);
3850 
3851 	before = INP_NO_CELLULAR(inp);
3852 	if (set) {
3853 		inp_set_nocellular(inp);
3854 	} else {
3855 		inp_clear_nocellular(inp);
3856 	}
3857 	after = INP_NO_CELLULAR(inp);
3858 	if (net_io_policy_log && (before != after)) {
3859 		static const char *ok = "OK";
3860 		static const char *nok = "NOACCESS";
3861 		uuid_string_t euuid_buf;
3862 		pid_t epid;
3863 
3864 		if (so->so_flags & SOF_DELEGATED) {
3865 			uuid_unparse(so->e_uuid, euuid_buf);
3866 			epid = so->e_pid;
3867 		} else {
3868 			uuid_unparse(so->last_uuid, euuid_buf);
3869 			epid = so->last_pid;
3870 		}
3871 
3872 		/* allow this socket to generate another notification event */
3873 		so->so_ifdenied_notifies = 0;
3874 
3875 		log(LOG_DEBUG, "%s: so %llu [%d,%d] epid %d "
3876 		    "euuid %s%s %s->%s\n", __func__,
3877 		    so->so_gencnt, SOCK_DOM(so),
3878 		    SOCK_TYPE(so), epid, euuid_buf,
3879 		    (so->so_flags & SOF_DELEGATED) ?
3880 		    " [delegated]" : "",
3881 		    ((before < after) ? ok : nok),
3882 		    ((before < after) ? nok : ok));
3883 	}
3884 }
3885 
3886 #if NECP
3887 static void
inp_update_necp_want_app_policy(struct inpcb * inp,boolean_t set)3888 inp_update_necp_want_app_policy(struct inpcb *inp, boolean_t set)
3889 {
3890 	struct socket *so = inp->inp_socket;
3891 	int before, after;
3892 
3893 	VERIFY(so != NULL);
3894 	VERIFY(inp->inp_state != INPCB_STATE_DEAD);
3895 
3896 	before = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
3897 	if (set) {
3898 		inp_set_want_app_policy(inp);
3899 	} else {
3900 		inp_clear_want_app_policy(inp);
3901 	}
3902 	after = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
3903 	if (net_io_policy_log && (before != after)) {
3904 		static const char *wanted = "WANTED";
3905 		static const char *unwanted = "UNWANTED";
3906 		uuid_string_t euuid_buf;
3907 		pid_t epid;
3908 
3909 		if (so->so_flags & SOF_DELEGATED) {
3910 			uuid_unparse(so->e_uuid, euuid_buf);
3911 			epid = so->e_pid;
3912 		} else {
3913 			uuid_unparse(so->last_uuid, euuid_buf);
3914 			epid = so->last_pid;
3915 		}
3916 
3917 		log(LOG_DEBUG, "%s: so %llu [%d,%d] epid %d "
3918 		    "euuid %s%s %s->%s\n", __func__,
3919 		    so->so_gencnt, SOCK_DOM(so),
3920 		    SOCK_TYPE(so), epid, euuid_buf,
3921 		    (so->so_flags & SOF_DELEGATED) ?
3922 		    " [delegated]" : "",
3923 		    ((before < after) ? unwanted : wanted),
3924 		    ((before < after) ? wanted : unwanted));
3925 	}
3926 }
3927 #endif /* NECP */
3928 #endif /* !CONFIG_PROC_UUID_POLICY */
3929 
3930 #if NECP
3931 void
inp_update_necp_policy(struct inpcb * inp,struct sockaddr * override_local_addr,struct sockaddr * override_remote_addr,u_int override_bound_interface)3932 inp_update_necp_policy(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int override_bound_interface)
3933 {
3934 	necp_socket_find_policy_match(inp, override_local_addr, override_remote_addr, override_bound_interface);
3935 	if (necp_socket_should_rescope(inp) &&
3936 	    inp->inp_lport == 0 &&
3937 	    inp->inp_laddr.s_addr == INADDR_ANY &&
3938 	    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
3939 		// If we should rescope, and the socket is not yet bound
3940 		inp_bindif(inp, necp_socket_get_rescope_if_index(inp), NULL);
3941 		inp->inp_flags2 |= INP2_SCOPED_BY_NECP;
3942 	}
3943 }
3944 #endif /* NECP */
3945 
3946 int
inp_update_policy(struct inpcb * inp)3947 inp_update_policy(struct inpcb *inp)
3948 {
3949 #if CONFIG_PROC_UUID_POLICY
3950 	struct socket *so = inp->inp_socket;
3951 	uint32_t pflags = 0;
3952 	int32_t ogencnt;
3953 	int err = 0;
3954 	uint8_t *lookup_uuid = NULL;
3955 
3956 	if (!net_io_policy_uuid ||
3957 	    so == NULL || inp->inp_state == INPCB_STATE_DEAD) {
3958 		return 0;
3959 	}
3960 
3961 	/*
3962 	 * Kernel-created sockets that aren't delegating other sockets
3963 	 * are currently exempted from UUID policy checks.
3964 	 */
3965 	if (so->last_pid == 0 && !(so->so_flags & SOF_DELEGATED)) {
3966 		return 0;
3967 	}
3968 
3969 #if defined(XNU_TARGET_OS_OSX)
3970 	if (so->so_rpid > 0) {
3971 		lookup_uuid = so->so_ruuid;
3972 		ogencnt = so->so_policy_gencnt;
3973 		err = proc_uuid_policy_lookup(lookup_uuid, &pflags, &so->so_policy_gencnt);
3974 	}
3975 #endif
3976 	if (lookup_uuid == NULL || err == ENOENT) {
3977 		lookup_uuid = ((so->so_flags & SOF_DELEGATED) ? so->e_uuid : so->last_uuid);
3978 		ogencnt = so->so_policy_gencnt;
3979 		err = proc_uuid_policy_lookup(lookup_uuid, &pflags, &so->so_policy_gencnt);
3980 	}
3981 
3982 	/*
3983 	 * Discard cached generation count if the entry is gone (ENOENT),
3984 	 * so that we go thru the checks below.
3985 	 */
3986 	if (err == ENOENT && ogencnt != 0) {
3987 		so->so_policy_gencnt = 0;
3988 	}
3989 
3990 	/*
3991 	 * If the generation count has changed, inspect the policy flags
3992 	 * and act accordingly.  If a policy flag was previously set and
3993 	 * the UUID is no longer present in the table (ENOENT), treat it
3994 	 * as if the flag has been cleared.
3995 	 */
3996 	if ((err == 0 || err == ENOENT) && ogencnt != so->so_policy_gencnt) {
3997 		/* update cellular policy for this socket */
3998 		if (err == 0 && (pflags & PROC_UUID_NO_CELLULAR)) {
3999 			inp_update_cellular_policy(inp, TRUE);
4000 		} else if (!(pflags & PROC_UUID_NO_CELLULAR)) {
4001 			inp_update_cellular_policy(inp, FALSE);
4002 		}
4003 #if NECP
4004 		/* update necp want app policy for this socket */
4005 		if (err == 0 && (pflags & PROC_UUID_NECP_APP_POLICY)) {
4006 			inp_update_necp_want_app_policy(inp, TRUE);
4007 		} else if (!(pflags & PROC_UUID_NECP_APP_POLICY)) {
4008 			inp_update_necp_want_app_policy(inp, FALSE);
4009 		}
4010 #endif /* NECP */
4011 	}
4012 
4013 	return (err == ENOENT) ? 0 : err;
4014 #else /* !CONFIG_PROC_UUID_POLICY */
4015 #pragma unused(inp)
4016 	return 0;
4017 #endif /* !CONFIG_PROC_UUID_POLICY */
4018 }
4019 
4020 unsigned int log_restricted;
4021 SYSCTL_DECL(_net_inet);
4022 SYSCTL_INT(_net_inet, OID_AUTO, log_restricted,
4023     CTLFLAG_RW | CTLFLAG_LOCKED, &log_restricted, 0,
4024     "Log network restrictions");
4025 
4026 
4027 /*
4028  * Called when we need to enforce policy restrictions in the input path.
4029  *
4030  * Returns TRUE if we're not allowed to receive data, otherwise FALSE.
4031  */
4032 static boolean_t
_inp_restricted_recv(struct inpcb * inp,struct ifnet * ifp)4033 _inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
4034 {
4035 	VERIFY(inp != NULL);
4036 
4037 	/*
4038 	 * Inbound restrictions.
4039 	 */
4040 	if (!sorestrictrecv) {
4041 		return FALSE;
4042 	}
4043 
4044 	if (ifp == NULL) {
4045 		return FALSE;
4046 	}
4047 
4048 	if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
4049 		return TRUE;
4050 	}
4051 
4052 	if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
4053 		return TRUE;
4054 	}
4055 
4056 	if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
4057 		return TRUE;
4058 	}
4059 
4060 	if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
4061 		return TRUE;
4062 	}
4063 
4064 	if (!(ifp->if_eflags & IFEF_RESTRICTED_RECV)) {
4065 		return FALSE;
4066 	}
4067 
4068 	if (inp->inp_flags & INP_RECV_ANYIF) {
4069 		return FALSE;
4070 	}
4071 
4072 	/*
4073 	 * An entitled process can use the management interface without being bound
4074 	 * to the interface
4075 	 */
4076 	if (IFNET_IS_MANAGEMENT(ifp)) {
4077 		if (INP_MANAGEMENT_ALLOWED(inp)) {
4078 			return FALSE;
4079 		}
4080 		if (if_management_verbose > 1) {
4081 			os_log(OS_LOG_DEFAULT, "_inp_restricted_recv %s:%d not allowed on management interface %s",
4082 			    proc_best_name(current_proc()), proc_getpid(current_proc()),
4083 			    ifp->if_xname);
4084 		}
4085 		return TRUE;
4086 	}
4087 
4088 	if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp == ifp) {
4089 		return FALSE;
4090 	}
4091 
4092 	if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
4093 		return TRUE;
4094 	}
4095 
4096 
4097 	return TRUE;
4098 }
4099 
4100 boolean_t
inp_restricted_recv(struct inpcb * inp,struct ifnet * ifp)4101 inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
4102 {
4103 	boolean_t ret;
4104 
4105 	ret = _inp_restricted_recv(inp, ifp);
4106 	if (ret == TRUE && log_restricted) {
4107 		printf("pid %d (%s) is unable to receive packets on %s\n",
4108 		    proc_getpid(current_proc()), proc_best_name(current_proc()),
4109 		    ifp->if_xname);
4110 	}
4111 	return ret;
4112 }
4113 
4114 /*
4115  * Called when we need to enforce policy restrictions in the output path.
4116  *
4117  * Returns TRUE if we're not allowed to send data out, otherwise FALSE.
4118  */
4119 static boolean_t
_inp_restricted_send(struct inpcb * inp,struct ifnet * ifp)4120 _inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
4121 {
4122 	VERIFY(inp != NULL);
4123 
4124 	/*
4125 	 * Outbound restrictions.
4126 	 */
4127 	if (!sorestrictsend) {
4128 		return FALSE;
4129 	}
4130 
4131 	if (ifp == NULL) {
4132 		return FALSE;
4133 	}
4134 
4135 	if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
4136 		return TRUE;
4137 	}
4138 
4139 	if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
4140 		return TRUE;
4141 	}
4142 
4143 	if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
4144 		return TRUE;
4145 	}
4146 
4147 	if (IFNET_IS_ULTRA_CONSTRAINED(ifp) && uuid_is_null(inp->necp_client_uuid) &&
4148 	    !INP_ULTRA_CONSTRAINED_ALLOWED(inp)) {
4149 		// Non-NECP-aware sockets are not allowed to use ultra constrained interfaces
4150 		// without an entitlement
4151 		return TRUE;
4152 	}
4153 
4154 	if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
4155 		return TRUE;
4156 	}
4157 
4158 	if (IFNET_IS_MANAGEMENT(ifp)) {
4159 		if (!INP_MANAGEMENT_ALLOWED(inp)) {
4160 			if (if_management_verbose > 1) {
4161 				os_log(OS_LOG_DEFAULT, "_inp_restricted_send %s:%d not allowed on management interface %s",
4162 				    proc_best_name(current_proc()), proc_getpid(current_proc()),
4163 				    ifp->if_xname);
4164 			}
4165 			return TRUE;
4166 		}
4167 	}
4168 
4169 	if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
4170 		return TRUE;
4171 	}
4172 
4173 	return FALSE;
4174 }
4175 
4176 boolean_t
inp_restricted_send(struct inpcb * inp,struct ifnet * ifp)4177 inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
4178 {
4179 	boolean_t ret;
4180 
4181 	ret = _inp_restricted_send(inp, ifp);
4182 	if (ret == TRUE && log_restricted) {
4183 		printf("pid %d (%s) is unable to transmit packets on %s\n",
4184 		    proc_getpid(current_proc()), proc_best_name(current_proc()),
4185 		    ifp->if_xname);
4186 	}
4187 	return ret;
4188 }
4189 
4190 inline void
inp_count_sndbytes(struct inpcb * inp,u_int32_t th_ack)4191 inp_count_sndbytes(struct inpcb *inp, u_int32_t th_ack)
4192 {
4193 	struct ifnet *ifp = inp->inp_last_outifp;
4194 	struct socket *so = inp->inp_socket;
4195 	if (ifp != NULL && !(so->so_flags & SOF_MP_SUBFLOW) &&
4196 	    (ifp->if_type == IFT_CELLULAR || IFNET_IS_WIFI(ifp))) {
4197 		int32_t unsent;
4198 
4199 		so->so_snd.sb_flags |= SB_SNDBYTE_CNT;
4200 
4201 		/*
4202 		 * There can be data outstanding before the connection
4203 		 * becomes established -- TFO case
4204 		 */
4205 		if (so->so_snd.sb_cc > 0) {
4206 			inp_incr_sndbytes_total(so, so->so_snd.sb_cc);
4207 		}
4208 
4209 		unsent = inp_get_sndbytes_allunsent(so, th_ack);
4210 		if (unsent > 0) {
4211 			inp_incr_sndbytes_unsent(so, unsent);
4212 		}
4213 	}
4214 }
4215 
4216 inline void
inp_incr_sndbytes_total(struct socket * so,int32_t len)4217 inp_incr_sndbytes_total(struct socket *so, int32_t len)
4218 {
4219 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4220 	struct ifnet *ifp = inp->inp_last_outifp;
4221 
4222 	if (ifp != NULL) {
4223 		VERIFY(ifp->if_sndbyte_total >= 0);
4224 		OSAddAtomic64(len, &ifp->if_sndbyte_total);
4225 	}
4226 }
4227 
4228 inline void
inp_decr_sndbytes_total(struct socket * so,int32_t len)4229 inp_decr_sndbytes_total(struct socket *so, int32_t len)
4230 {
4231 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4232 	struct ifnet *ifp = inp->inp_last_outifp;
4233 
4234 	if (ifp != NULL) {
4235 		if (ifp->if_sndbyte_total >= len) {
4236 			OSAddAtomic64(-len, &ifp->if_sndbyte_total);
4237 		} else {
4238 			ifp->if_sndbyte_total = 0;
4239 		}
4240 	}
4241 }
4242 
4243 inline void
inp_incr_sndbytes_unsent(struct socket * so,int32_t len)4244 inp_incr_sndbytes_unsent(struct socket *so, int32_t len)
4245 {
4246 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4247 	struct ifnet *ifp = inp->inp_last_outifp;
4248 
4249 	if (ifp != NULL) {
4250 		VERIFY(ifp->if_sndbyte_unsent >= 0);
4251 		OSAddAtomic64(len, &ifp->if_sndbyte_unsent);
4252 	}
4253 }
4254 
4255 inline void
inp_decr_sndbytes_unsent(struct socket * so,int32_t len)4256 inp_decr_sndbytes_unsent(struct socket *so, int32_t len)
4257 {
4258 	if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
4259 		return;
4260 	}
4261 
4262 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4263 	struct ifnet *ifp = inp->inp_last_outifp;
4264 
4265 	if (ifp != NULL) {
4266 		if (ifp->if_sndbyte_unsent >= len) {
4267 			OSAddAtomic64(-len, &ifp->if_sndbyte_unsent);
4268 		} else {
4269 			ifp->if_sndbyte_unsent = 0;
4270 		}
4271 	}
4272 }
4273 
4274 inline void
inp_decr_sndbytes_allunsent(struct socket * so,u_int32_t th_ack)4275 inp_decr_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
4276 {
4277 	int32_t len;
4278 
4279 	if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
4280 		return;
4281 	}
4282 
4283 	len = inp_get_sndbytes_allunsent(so, th_ack);
4284 	inp_decr_sndbytes_unsent(so, len);
4285 }
4286 
4287 #if SKYWALK
4288 inline void
inp_update_netns_flags(struct socket * so)4289 inp_update_netns_flags(struct socket *so)
4290 {
4291 	struct inpcb *inp;
4292 	uint32_t set_flags = 0;
4293 	uint32_t clear_flags = 0;
4294 
4295 	if (!(SOCK_CHECK_DOM(so, AF_INET) || SOCK_CHECK_DOM(so, AF_INET6))) {
4296 		return;
4297 	}
4298 
4299 	inp = sotoinpcb(so);
4300 
4301 	if (inp == NULL) {
4302 		return;
4303 	}
4304 
4305 	if (!NETNS_TOKEN_VALID(&inp->inp_netns_token)) {
4306 		return;
4307 	}
4308 
4309 	if (so->so_options & SO_NOWAKEFROMSLEEP) {
4310 		set_flags |= NETNS_NOWAKEFROMSLEEP;
4311 	} else {
4312 		clear_flags |= NETNS_NOWAKEFROMSLEEP;
4313 	}
4314 
4315 	if (inp->inp_flags & INP_RECV_ANYIF) {
4316 		set_flags |= NETNS_RECVANYIF;
4317 	} else {
4318 		clear_flags |= NETNS_RECVANYIF;
4319 	}
4320 
4321 	if (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) {
4322 		set_flags |= NETNS_EXTBGIDLE;
4323 	} else {
4324 		clear_flags |= NETNS_EXTBGIDLE;
4325 	}
4326 
4327 	netns_change_flags(&inp->inp_netns_token, set_flags, clear_flags);
4328 }
4329 #endif /* SKYWALK */
4330 
4331 inline void
inp_set_activity_bitmap(struct inpcb * inp)4332 inp_set_activity_bitmap(struct inpcb *inp)
4333 {
4334 	in_stat_set_activity_bitmap(&inp->inp_nw_activity, net_uptime());
4335 }
4336 
4337 inline void
inp_get_activity_bitmap(struct inpcb * inp,activity_bitmap_t * ab)4338 inp_get_activity_bitmap(struct inpcb *inp, activity_bitmap_t *ab)
4339 {
4340 	bcopy(&inp->inp_nw_activity, ab, sizeof(*ab));
4341 }
4342 
4343 void
inp_update_last_owner(struct socket * so,struct proc * p,struct proc * ep)4344 inp_update_last_owner(struct socket *so, struct proc *p, struct proc *ep)
4345 {
4346 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4347 
4348 	if (inp == NULL) {
4349 		return;
4350 	}
4351 
4352 	if (p != NULL) {
4353 		strlcpy(&inp->inp_last_proc_name[0], proc_name_address(p), sizeof(inp->inp_last_proc_name));
4354 	}
4355 	if (so->so_flags & SOF_DELEGATED) {
4356 		if (ep != NULL) {
4357 			strlcpy(&inp->inp_e_proc_name[0], proc_name_address(ep), sizeof(inp->inp_e_proc_name));
4358 		} else {
4359 			inp->inp_e_proc_name[0] = 0;
4360 		}
4361 	} else {
4362 		inp->inp_e_proc_name[0] = 0;
4363 	}
4364 }
4365 
4366 void
inp_copy_last_owner(struct socket * so,struct socket * head)4367 inp_copy_last_owner(struct socket *so, struct socket *head)
4368 {
4369 	struct inpcb *inp = (struct inpcb *)so->so_pcb;
4370 	struct inpcb *head_inp = (struct inpcb *)head->so_pcb;
4371 
4372 	if (inp == NULL || head_inp == NULL) {
4373 		return;
4374 	}
4375 
4376 	strbufcpy(inp->inp_last_proc_name, head_inp->inp_last_proc_name);
4377 	strbufcpy(inp->inp_e_proc_name, head_inp->inp_e_proc_name);
4378 }
4379 
4380 static int
in_check_management_interface_proc_callout(proc_t proc,void * arg __unused)4381 in_check_management_interface_proc_callout(proc_t proc, void *arg __unused)
4382 {
4383 	struct fileproc *fp = NULL;
4384 	task_t __single task = proc_task(proc);
4385 	bool allowed = false;
4386 
4387 	if (IOTaskHasEntitlement(task, INTCOPROC_RESTRICTED_ENTITLEMENT) == true
4388 	    || IOTaskHasEntitlement(task, MANAGEMENT_DATA_ENTITLEMENT) == true
4389 #if DEBUG || DEVELOPMENT
4390 	    || IOTaskHasEntitlement(task, INTCOPROC_RESTRICTED_ENTITLEMENT_DEVELOPMENT) == true
4391 	    || IOTaskHasEntitlement(task, MANAGEMENT_DATA_ENTITLEMENT_DEVELOPMENT) == true
4392 #endif /* DEBUG || DEVELOPMENT */
4393 	    ) {
4394 		allowed = true;
4395 	}
4396 	if (allowed == false && management_data_unrestricted == false) {
4397 		return PROC_RETURNED;
4398 	}
4399 
4400 	proc_fdlock(proc);
4401 	fdt_foreach(fp, proc) {
4402 		struct fileglob *fg = fp->fp_glob;
4403 		struct socket *so;
4404 		struct inpcb *inp;
4405 
4406 		if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) {
4407 			continue;
4408 		}
4409 
4410 		so = (struct socket *)fp_get_data(fp);
4411 		if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
4412 			continue;
4413 		}
4414 
4415 		inp = (struct inpcb *)so->so_pcb;
4416 
4417 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
4418 			continue;
4419 		}
4420 
4421 		socket_lock(so, 1);
4422 
4423 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
4424 			socket_unlock(so, 1);
4425 			continue;
4426 		}
4427 		inp->inp_flags2 |= INP2_MANAGEMENT_ALLOWED;
4428 		inp->inp_flags2 |= INP2_MANAGEMENT_CHECKED;
4429 
4430 		socket_unlock(so, 1);
4431 	}
4432 	proc_fdunlock(proc);
4433 
4434 	return PROC_RETURNED;
4435 }
4436 
4437 static bool in_management_interface_checked = false;
4438 
4439 static void
in_management_interface_event_callback(struct nwk_wq_entry * nwk_item)4440 in_management_interface_event_callback(struct nwk_wq_entry *nwk_item)
4441 {
4442 	kfree_type(struct nwk_wq_entry, nwk_item);
4443 
4444 	if (in_management_interface_checked == true) {
4445 		return;
4446 	}
4447 	in_management_interface_checked = true;
4448 
4449 	proc_iterate(PROC_ALLPROCLIST,
4450 	    in_check_management_interface_proc_callout,
4451 	    NULL, NULL, NULL);
4452 }
4453 
4454 void
in_management_interface_check(void)4455 in_management_interface_check(void)
4456 {
4457 	struct nwk_wq_entry *nwk_item;
4458 
4459 	if (if_management_interface_check_needed == false ||
4460 	    in_management_interface_checked == true) {
4461 		return;
4462 	}
4463 
4464 	nwk_item  = kalloc_type(struct nwk_wq_entry,
4465 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
4466 
4467 	nwk_item->func = in_management_interface_event_callback;
4468 
4469 	nwk_wq_enqueue(nwk_item);
4470 }
4471 
4472 void
inp_enter_bind_in_progress(struct socket * so)4473 inp_enter_bind_in_progress(struct socket *so)
4474 {
4475 	struct inpcb *inp = sotoinpcb(so);
4476 
4477 #if (DEBUG || DEVELOPMENT)
4478 	socket_lock_assert_owned(so);
4479 #endif /* (DEBUG || DEVELOPMENT) */
4480 
4481 	VERIFY(inp->inp_bind_in_progress_waiters != UINT16_MAX);
4482 
4483 	while ((inp->inp_flags2 & INP2_BIND_IN_PROGRESS) != 0) {
4484 		lck_mtx_t *mutex_held;
4485 
4486 		inp->inp_bind_in_progress_waiters++;
4487 		inp->inp_bind_in_progress_last_waiter_thread = current_thread();
4488 
4489 		if (so->so_proto->pr_getlock != NULL) {
4490 			mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK);
4491 		} else {
4492 			mutex_held = so->so_proto->pr_domain->dom_mtx;
4493 		}
4494 		msleep(&inp->inp_bind_in_progress_waiters, mutex_held,
4495 		    PSOCK | PCATCH, "inp_enter_bind_in_progress", NULL);
4496 
4497 		inp->inp_bind_in_progress_last_waiter_thread = NULL;
4498 
4499 		inp->inp_bind_in_progress_waiters--;
4500 	}
4501 	inp->inp_flags2 |= INP2_BIND_IN_PROGRESS;
4502 	inp->inp_bind_in_progress_thread = current_thread();
4503 }
4504 
4505 void
inp_exit_bind_in_progress(struct socket * so)4506 inp_exit_bind_in_progress(struct socket *so)
4507 {
4508 	struct inpcb *inp = sotoinpcb(so);
4509 
4510 #if (DEBUG || DEVELOPMENT)
4511 	socket_lock_assert_owned(so);
4512 #endif /* (DEBUG || DEVELOPMENT) */
4513 
4514 	inp->inp_flags2 &= ~INP2_BIND_IN_PROGRESS;
4515 	inp->inp_bind_in_progress_thread = NULL;
4516 	if (__improbable(inp->inp_bind_in_progress_waiters > 0)) {
4517 		wakeup_one((caddr_t)&inp->inp_bind_in_progress_waiters);
4518 	}
4519 }
4520