1 /*
2 * Copyright (c) 1998-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1988, 1990, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/domain.h>
73 #include <sys/kernel.h>
74 #include <sys/proc_internal.h>
75 #include <sys/kauth.h>
76 #include <sys/malloc.h>
77 #include <sys/mbuf.h>
78 #include <sys/mcache.h>
79 #include <sys/protosw.h>
80 #include <sys/stat.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
85 #include <sys/syslog.h>
86 #include <sys/unpcb.h>
87 #include <sys/ev.h>
88 #include <kern/locks.h>
89 #include <net/route.h>
90 #include <net/content_filter.h>
91 #include <netinet/in.h>
92 #include <netinet/in_pcb.h>
93 #include <netinet/tcp_var.h>
94 #include <sys/kdebug.h>
95 #include <libkern/OSAtomic.h>
96
97 #if CONFIG_MACF
98 #include <security/mac_framework.h>
99 #endif
100
101 #include <mach/vm_param.h>
102
103 #if MPTCP
104 #include <netinet/mptcp_var.h>
105 #endif
106
107 extern uint32_t net_wake_pkt_debug;
108
109 #define DBG_FNC_SBDROP NETDBG_CODE(DBG_NETSOCK, 4)
110 #define DBG_FNC_SBAPPEND NETDBG_CODE(DBG_NETSOCK, 5)
111
112 SYSCTL_DECL(_kern_ipc);
113
114 __private_extern__ u_int32_t net_io_policy_throttle_best_effort = 0;
115 SYSCTL_INT(_kern_ipc, OID_AUTO, throttle_best_effort,
116 CTLFLAG_RW | CTLFLAG_LOCKED, &net_io_policy_throttle_best_effort, 0, "");
117
118 static inline void sbcompress(struct sockbuf *, struct mbuf *, struct mbuf *);
119 static struct socket *sonewconn_internal(struct socket *, int);
120 static int sbappendcontrol_internal(struct sockbuf *, struct mbuf *,
121 struct mbuf *);
122 static void soevent_ifdenied(struct socket *);
123
124 static int sbappendrecord_common(struct sockbuf *sb, struct mbuf *m0, boolean_t nodrop);
125 static int sbappend_common(struct sockbuf *sb, struct mbuf *m, boolean_t nodrop);
126
127 /*
128 * Primitive routines for operating on sockets and socket buffers
129 */
130 static int soqlimitcompat = 1;
131 static int soqlencomp = 0;
132
133 /*
134 * Based on the number of mbuf clusters configured, high_sb_max and sb_max can
135 * get scaled up or down to suit that memory configuration. high_sb_max is a
136 * higher limit on sb_max that is checked when sb_max gets set through sysctl.
137 */
138
139 u_int32_t sb_max = SB_MAX; /* XXX should be static */
140 u_int32_t high_sb_max = SB_MAX;
141
142 static u_int32_t sb_efficiency = 8; /* parameter for sbreserve() */
143 int32_t total_sbmb_cnt __attribute__((aligned(8))) = 0;
144 int32_t total_sbmb_cnt_floor __attribute__((aligned(8))) = 0;
145 int32_t total_sbmb_cnt_peak __attribute__((aligned(8))) = 0;
146 int64_t sbmb_limreached __attribute__((aligned(8))) = 0;
147
148 u_int32_t net_io_policy_log = 0; /* log socket policy changes */
149 #if CONFIG_PROC_UUID_POLICY
150 u_int32_t net_io_policy_uuid = 1; /* enable UUID socket policy */
151 #endif /* CONFIG_PROC_UUID_POLICY */
152
153 /*
154 * Procedures to manipulate state flags of socket
155 * and do appropriate wakeups. Normal sequence from the
156 * active (originating) side is that soisconnecting() is
157 * called during processing of connect() call,
158 * resulting in an eventual call to soisconnected() if/when the
159 * connection is established. When the connection is torn down
160 * soisdisconnecting() is called during processing of disconnect() call,
161 * and soisdisconnected() is called when the connection to the peer
162 * is totally severed. The semantics of these routines are such that
163 * connectionless protocols can call soisconnected() and soisdisconnected()
164 * only, bypassing the in-progress calls when setting up a ``connection''
165 * takes no time.
166 *
167 * From the passive side, a socket is created with
168 * two queues of sockets: so_incomp for connections in progress
169 * and so_comp for connections already made and awaiting user acceptance.
170 * As a protocol is preparing incoming connections, it creates a socket
171 * structure queued on so_incomp by calling sonewconn(). When the connection
172 * is established, soisconnected() is called, and transfers the
173 * socket structure to so_comp, making it available to accept().
174 *
175 * If a socket is closed with sockets on either
176 * so_incomp or so_comp, these sockets are dropped.
177 *
178 * If higher level protocols are implemented in
179 * the kernel, the wakeups done here will sometimes
180 * cause software-interrupt process scheduling.
181 */
182 void
soisconnecting(struct socket * so)183 soisconnecting(struct socket *so)
184 {
185 so->so_state &= ~(SS_ISCONNECTED | SS_ISDISCONNECTING);
186 so->so_state |= SS_ISCONNECTING;
187
188 sflt_notify(so, sock_evt_connecting, NULL);
189 }
190
191 void
soisconnected(struct socket * so)192 soisconnected(struct socket *so)
193 {
194 /*
195 * If socket is subject to filter and is pending initial verdict,
196 * delay marking socket as connected and do not present the connected
197 * socket to user just yet.
198 */
199 if (cfil_sock_connected_pending_verdict(so)) {
200 return;
201 }
202
203 so->so_state &= ~(SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING);
204 so->so_state |= SS_ISCONNECTED;
205
206 soreserve_preconnect(so, 0);
207
208 sflt_notify(so, sock_evt_connected, NULL);
209
210 if (so->so_head != NULL && (so->so_state & SS_INCOMP)) {
211 struct socket *head = so->so_head;
212 int locked = 0;
213
214 /*
215 * Enforce lock order when the protocol has per socket locks
216 */
217 if (head->so_proto->pr_getlock != NULL) {
218 socket_lock(head, 1);
219 so_acquire_accept_list(head, so);
220 locked = 1;
221 }
222 if (so->so_head == head && (so->so_state & SS_INCOMP)) {
223 so->so_state &= ~SS_INCOMP;
224 so->so_state |= SS_COMP;
225 TAILQ_REMOVE(&head->so_incomp, so, so_list);
226 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
227 head->so_incqlen--;
228
229 /*
230 * We have to release the accept list in
231 * case a socket callback calls sock_accept()
232 */
233 if (locked != 0) {
234 so_release_accept_list(head);
235 socket_unlock(so, 0);
236 }
237 sorwakeup(head);
238 wakeup_one((caddr_t)&head->so_timeo);
239
240 if (locked != 0) {
241 socket_unlock(head, 1);
242 socket_lock(so, 0);
243 }
244 } else if (locked != 0) {
245 so_release_accept_list(head);
246 socket_unlock(head, 1);
247 }
248 } else {
249 wakeup((caddr_t)&so->so_timeo);
250 sorwakeup(so);
251 sowwakeup(so);
252 soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_CONNECTED |
253 SO_FILT_HINT_CONNINFO_UPDATED);
254 }
255 }
256
257 boolean_t
socanwrite(struct socket * so)258 socanwrite(struct socket *so)
259 {
260 return (so->so_state & SS_ISCONNECTED) ||
261 !(so->so_proto->pr_flags & PR_CONNREQUIRED) ||
262 (so->so_flags1 & SOF1_PRECONNECT_DATA);
263 }
264
265 void
soisdisconnecting(struct socket * so)266 soisdisconnecting(struct socket *so)
267 {
268 so->so_state &= ~SS_ISCONNECTING;
269 so->so_state |= (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE);
270 soevent(so, SO_FILT_HINT_LOCKED);
271 sflt_notify(so, sock_evt_disconnecting, NULL);
272 wakeup((caddr_t)&so->so_timeo);
273 sowwakeup(so);
274 sorwakeup(so);
275 }
276
277 void
soisdisconnected(struct socket * so)278 soisdisconnected(struct socket *so)
279 {
280 so->so_state &= ~(SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING);
281 so->so_state |= (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED);
282 soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_DISCONNECTED |
283 SO_FILT_HINT_CONNINFO_UPDATED);
284 sflt_notify(so, sock_evt_disconnected, NULL);
285 wakeup((caddr_t)&so->so_timeo);
286 sowwakeup(so);
287 sorwakeup(so);
288
289 #if CONTENT_FILTER
290 /* Notify content filters as soon as we cannot send/receive data */
291 cfil_sock_notify_shutdown(so, SHUT_RDWR);
292 #endif /* CONTENT_FILTER */
293 }
294
295 /*
296 * This function will issue a wakeup like soisdisconnected but it will not
297 * notify the socket filters. This will avoid unlocking the socket
298 * in the midst of closing it.
299 */
300 void
sodisconnectwakeup(struct socket * so)301 sodisconnectwakeup(struct socket *so)
302 {
303 so->so_state &= ~(SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING);
304 so->so_state |= (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED);
305 soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_DISCONNECTED |
306 SO_FILT_HINT_CONNINFO_UPDATED);
307 wakeup((caddr_t)&so->so_timeo);
308 sowwakeup(so);
309 sorwakeup(so);
310
311 #if CONTENT_FILTER
312 /* Notify content filters as soon as we cannot send/receive data */
313 cfil_sock_notify_shutdown(so, SHUT_RDWR);
314 #endif /* CONTENT_FILTER */
315 }
316
317 /*
318 * When an attempt at a new connection is noted on a socket
319 * which accepts connections, sonewconn is called. If the
320 * connection is possible (subject to space constraints, etc.)
321 * then we allocate a new structure, propoerly linked into the
322 * data structure of the original socket, and return this.
323 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
324 */
325 static struct socket *
sonewconn_internal(struct socket * head,int connstatus)326 sonewconn_internal(struct socket *head, int connstatus)
327 {
328 int so_qlen, error = 0;
329 struct socket *so;
330 lck_mtx_t *mutex_held;
331
332 if (head->so_proto->pr_getlock != NULL) {
333 mutex_held = (*head->so_proto->pr_getlock)(head, 0);
334 } else {
335 mutex_held = head->so_proto->pr_domain->dom_mtx;
336 }
337 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
338
339 if (!soqlencomp) {
340 /*
341 * This is the default case; so_qlen represents the
342 * sum of both incomplete and completed queues.
343 */
344 so_qlen = head->so_qlen;
345 } else {
346 /*
347 * When kern.ipc.soqlencomp is set to 1, so_qlen
348 * represents only the completed queue. Since we
349 * cannot let the incomplete queue goes unbounded
350 * (in case of SYN flood), we cap the incomplete
351 * queue length to at most somaxconn, and use that
352 * as so_qlen so that we fail immediately below.
353 */
354 so_qlen = head->so_qlen - head->so_incqlen;
355 if (head->so_incqlen > somaxconn) {
356 so_qlen = somaxconn;
357 }
358 }
359
360 if (so_qlen >=
361 (soqlimitcompat ? head->so_qlimit : (3 * head->so_qlimit / 2))) {
362 return (struct socket *)0;
363 }
364 so = soalloc(1, SOCK_DOM(head), head->so_type);
365 if (so == NULL) {
366 return (struct socket *)0;
367 }
368 /* check if head was closed during the soalloc */
369 if (head->so_proto == NULL) {
370 sodealloc(so);
371 return (struct socket *)0;
372 }
373
374 so->so_type = head->so_type;
375 so->so_options = head->so_options & ~SO_ACCEPTCONN;
376 so->so_linger = head->so_linger;
377 so->so_state = head->so_state | SS_NOFDREF;
378 so->so_proto = head->so_proto;
379 so->so_timeo = head->so_timeo;
380 so->so_pgid = head->so_pgid;
381 kauth_cred_ref(head->so_cred);
382 so->so_cred = head->so_cred;
383 so->last_pid = head->last_pid;
384 so->last_upid = head->last_upid;
385 memcpy(so->last_uuid, head->last_uuid, sizeof(so->last_uuid));
386 if (head->so_flags & SOF_DELEGATED) {
387 so->e_pid = head->e_pid;
388 so->e_upid = head->e_upid;
389 memcpy(so->e_uuid, head->e_uuid, sizeof(so->e_uuid));
390 }
391 /* inherit socket options stored in so_flags */
392 so->so_flags = head->so_flags &
393 (SOF_NOSIGPIPE | SOF_NOADDRAVAIL | SOF_REUSESHAREUID |
394 SOF_NOTIFYCONFLICT | SOF_BINDRANDOMPORT | SOF_NPX_SETOPTSHUT |
395 SOF_NODEFUNCT | SOF_PRIVILEGED_TRAFFIC_CLASS | SOF_NOTSENT_LOWAT |
396 SOF_DELEGATED);
397 so->so_flags1 |= SOF1_INBOUND;
398 so->so_usecount = 1;
399 so->next_lock_lr = 0;
400 so->next_unlock_lr = 0;
401
402 so->so_rcv.sb_flags |= SB_RECV; /* XXX */
403 so->so_rcv.sb_so = so->so_snd.sb_so = so;
404
405 /* inherit traffic management properties of listener */
406 so->so_flags1 |=
407 head->so_flags1 & (SOF1_TRAFFIC_MGT_SO_BACKGROUND | SOF1_TC_NET_SERV_TYPE |
408 SOF1_QOSMARKING_ALLOWED | SOF1_QOSMARKING_POLICY_OVERRIDE);
409 so->so_background_thread = head->so_background_thread;
410 so->so_traffic_class = head->so_traffic_class;
411 so->so_netsvctype = head->so_netsvctype;
412
413 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
414 sodealloc(so);
415 return (struct socket *)0;
416 }
417 so->so_rcv.sb_flags |= (head->so_rcv.sb_flags & SB_USRSIZE);
418 so->so_snd.sb_flags |= (head->so_snd.sb_flags & SB_USRSIZE);
419
420 /*
421 * Must be done with head unlocked to avoid deadlock
422 * for protocol with per socket mutexes.
423 */
424 if (head->so_proto->pr_unlock) {
425 socket_unlock(head, 0);
426 }
427 if (((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL) != 0) ||
428 error) {
429 sodealloc(so);
430 if (head->so_proto->pr_unlock) {
431 socket_lock(head, 0);
432 }
433 return (struct socket *)0;
434 }
435 if (head->so_proto->pr_unlock) {
436 socket_lock(head, 0);
437 /*
438 * Radar 7385998 Recheck that the head is still accepting
439 * to avoid race condition when head is getting closed.
440 */
441 if ((head->so_options & SO_ACCEPTCONN) == 0) {
442 so->so_state &= ~SS_NOFDREF;
443 soclose(so);
444 return (struct socket *)0;
445 }
446 }
447
448 if (so->so_proto->pr_copy_last_owner != NULL) {
449 (*so->so_proto->pr_copy_last_owner)(so, head);
450 }
451 atomic_add_32(&so->so_proto->pr_domain->dom_refs, 1);
452
453 /* Insert in head appropriate lists */
454 so_acquire_accept_list(head, NULL);
455
456 so->so_head = head;
457
458 /*
459 * Since this socket is going to be inserted into the incomp
460 * queue, it can be picked up by another thread in
461 * tcp_dropdropablreq to get dropped before it is setup..
462 * To prevent this race, set in-progress flag which can be
463 * cleared later
464 */
465 so->so_flags |= SOF_INCOMP_INPROGRESS;
466
467 if (connstatus) {
468 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
469 so->so_state |= SS_COMP;
470 } else {
471 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
472 so->so_state |= SS_INCOMP;
473 head->so_incqlen++;
474 }
475 head->so_qlen++;
476
477 so_release_accept_list(head);
478
479 /* Attach socket filters for this protocol */
480 sflt_initsock(so);
481
482 if (connstatus) {
483 so->so_state |= (short)connstatus;
484 sorwakeup(head);
485 wakeup((caddr_t)&head->so_timeo);
486 }
487 return so;
488 }
489
490
491 struct socket *
sonewconn(struct socket * head,int connstatus,const struct sockaddr * from)492 sonewconn(struct socket *head, int connstatus, const struct sockaddr *from)
493 {
494 int error = sflt_connectin(head, from);
495 if (error) {
496 return NULL;
497 }
498
499 return sonewconn_internal(head, connstatus);
500 }
501
502 /*
503 * Socantsendmore indicates that no more data will be sent on the
504 * socket; it would normally be applied to a socket when the user
505 * informs the system that no more data is to be sent, by the protocol
506 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data
507 * will be received, and will normally be applied to the socket by a
508 * protocol when it detects that the peer will send no more data.
509 * Data queued for reading in the socket may yet be read.
510 */
511
512 void
socantsendmore(struct socket * so)513 socantsendmore(struct socket *so)
514 {
515 so->so_state |= SS_CANTSENDMORE;
516 soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_CANTSENDMORE);
517 sflt_notify(so, sock_evt_cantsendmore, NULL);
518 sowwakeup(so);
519 }
520
521 void
socantrcvmore(struct socket * so)522 socantrcvmore(struct socket *so)
523 {
524 so->so_state |= SS_CANTRCVMORE;
525 soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_CANTRCVMORE);
526 sflt_notify(so, sock_evt_cantrecvmore, NULL);
527 sorwakeup(so);
528 }
529
530 /*
531 * Wait for data to arrive at/drain from a socket buffer.
532 */
533 int
sbwait(struct sockbuf * sb)534 sbwait(struct sockbuf *sb)
535 {
536 boolean_t nointr = (sb->sb_flags & SB_NOINTR);
537 void *lr_saved = __builtin_return_address(0);
538 struct socket *so = sb->sb_so;
539 lck_mtx_t *mutex_held;
540 struct timespec ts;
541 int error = 0;
542
543 if (so == NULL) {
544 panic("%s: null so, sb=%p sb_flags=0x%x lr=%p",
545 __func__, sb, sb->sb_flags, lr_saved);
546 /* NOTREACHED */
547 } else if (so->so_usecount < 1) {
548 panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
549 "lrh= %s\n", __func__, sb, sb->sb_flags, so,
550 so->so_usecount, lr_saved, solockhistory_nr(so));
551 /* NOTREACHED */
552 }
553
554 if ((so->so_state & SS_DRAINING) || (so->so_flags & SOF_DEFUNCT)) {
555 error = EBADF;
556 if (so->so_flags & SOF_DEFUNCT) {
557 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] "
558 "(%d)\n", __func__, proc_selfpid(),
559 proc_best_name(current_proc()),
560 so->so_gencnt,
561 SOCK_DOM(so), SOCK_TYPE(so), error);
562 }
563 return error;
564 }
565
566 if (so->so_proto->pr_getlock != NULL) {
567 mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK);
568 } else {
569 mutex_held = so->so_proto->pr_domain->dom_mtx;
570 }
571
572 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
573
574 ts.tv_sec = sb->sb_timeo.tv_sec;
575 ts.tv_nsec = sb->sb_timeo.tv_usec * 1000;
576
577 sb->sb_waiters++;
578 VERIFY(sb->sb_waiters != 0);
579
580 error = msleep((caddr_t)&sb->sb_cc, mutex_held,
581 nointr ? PSOCK : PSOCK | PCATCH,
582 nointr ? "sbwait_nointr" : "sbwait", &ts);
583
584 VERIFY(sb->sb_waiters != 0);
585 sb->sb_waiters--;
586
587 if (so->so_usecount < 1) {
588 panic("%s: 2 sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
589 "lrh= %s\n", __func__, sb, sb->sb_flags, so,
590 so->so_usecount, lr_saved, solockhistory_nr(so));
591 /* NOTREACHED */
592 }
593
594 if ((so->so_state & SS_DRAINING) || (so->so_flags & SOF_DEFUNCT)) {
595 error = EBADF;
596 if (so->so_flags & SOF_DEFUNCT) {
597 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] "
598 "(%d)\n", __func__, proc_selfpid(),
599 proc_best_name(current_proc()),
600 so->so_gencnt,
601 SOCK_DOM(so), SOCK_TYPE(so), error);
602 }
603 }
604
605 return error;
606 }
607
608 void
sbwakeup(struct sockbuf * sb)609 sbwakeup(struct sockbuf *sb)
610 {
611 if (sb->sb_waiters > 0) {
612 wakeup((caddr_t)&sb->sb_cc);
613 }
614 }
615
616 /*
617 * Wakeup processes waiting on a socket buffer.
618 * Do asynchronous notification via SIGIO
619 * if the socket has the SS_ASYNC flag set.
620 */
621 void
sowakeup(struct socket * so,struct sockbuf * sb,struct socket * so2)622 sowakeup(struct socket *so, struct sockbuf *sb, struct socket *so2)
623 {
624 if (so->so_flags & SOF_DEFUNCT) {
625 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] si 0x%x, "
626 "fl 0x%x [%s]\n", __func__, proc_selfpid(),
627 proc_best_name(current_proc()),
628 so->so_gencnt, SOCK_DOM(so),
629 SOCK_TYPE(so), (uint32_t)sb->sb_sel.si_flags, sb->sb_flags,
630 (sb->sb_flags & SB_RECV) ? "rcv" : "snd");
631 }
632
633 sb->sb_flags &= ~SB_SEL;
634 selwakeup(&sb->sb_sel);
635 sbwakeup(sb);
636 if (so->so_state & SS_ASYNC) {
637 if (so->so_pgid < 0) {
638 gsignal(-so->so_pgid, SIGIO);
639 } else if (so->so_pgid > 0) {
640 proc_signal(so->so_pgid, SIGIO);
641 }
642 }
643 if (sb->sb_flags & SB_KNOTE) {
644 KNOTE(&sb->sb_sel.si_note, SO_FILT_HINT_LOCKED);
645 }
646 if (sb->sb_flags & SB_UPCALL) {
647 void (*sb_upcall)(struct socket *, void *, int);
648 caddr_t sb_upcallarg;
649 int lock = !(sb->sb_flags & SB_UPCALL_LOCK);
650
651 sb_upcall = sb->sb_upcall;
652 sb_upcallarg = sb->sb_upcallarg;
653 /* Let close know that we're about to do an upcall */
654 so->so_upcallusecount++;
655
656 if (lock) {
657 if (so2) {
658 struct unpcb *unp = sotounpcb(so2);
659 unp->unp_flags |= UNP_DONTDISCONNECT;
660 unp->rw_thrcount++;
661
662 socket_unlock(so2, 0);
663 }
664 socket_unlock(so, 0);
665 }
666 (*sb_upcall)(so, sb_upcallarg, M_DONTWAIT);
667 if (lock) {
668 if (so2 && so > so2) {
669 struct unpcb *unp;
670 socket_lock(so2, 0);
671
672 unp = sotounpcb(so2);
673 unp->rw_thrcount--;
674 if (unp->rw_thrcount == 0) {
675 unp->unp_flags &= ~UNP_DONTDISCONNECT;
676 wakeup(unp);
677 }
678 }
679
680 socket_lock(so, 0);
681
682 if (so2 && so < so2) {
683 struct unpcb *unp;
684 socket_lock(so2, 0);
685
686 unp = sotounpcb(so2);
687 unp->rw_thrcount--;
688 if (unp->rw_thrcount == 0) {
689 unp->unp_flags &= ~UNP_DONTDISCONNECT;
690 wakeup(unp);
691 }
692 }
693 }
694
695 so->so_upcallusecount--;
696 /* Tell close that it's safe to proceed */
697 if ((so->so_flags & SOF_CLOSEWAIT) &&
698 so->so_upcallusecount == 0) {
699 wakeup((caddr_t)&so->so_upcallusecount);
700 }
701 }
702 #if CONTENT_FILTER
703 /*
704 * Trap disconnection events for content filters
705 */
706 if ((so->so_flags & SOF_CONTENT_FILTER) != 0) {
707 if ((sb->sb_flags & SB_RECV)) {
708 if (so->so_state & (SS_CANTRCVMORE)) {
709 cfil_sock_notify_shutdown(so, SHUT_RD);
710 }
711 } else {
712 if (so->so_state & (SS_CANTSENDMORE)) {
713 cfil_sock_notify_shutdown(so, SHUT_WR);
714 }
715 }
716 }
717 #endif /* CONTENT_FILTER */
718 }
719
720 /*
721 * Socket buffer (struct sockbuf) utility routines.
722 *
723 * Each socket contains two socket buffers: one for sending data and
724 * one for receiving data. Each buffer contains a queue of mbufs,
725 * information about the number of mbufs and amount of data in the
726 * queue, and other fields allowing select() statements and notification
727 * on data availability to be implemented.
728 *
729 * Data stored in a socket buffer is maintained as a list of records.
730 * Each record is a list of mbufs chained together with the m_next
731 * field. Records are chained together with the m_nextpkt field. The upper
732 * level routine soreceive() expects the following conventions to be
733 * observed when placing information in the receive buffer:
734 *
735 * 1. If the protocol requires each message be preceded by the sender's
736 * name, then a record containing that name must be present before
737 * any associated data (mbuf's must be of type MT_SONAME).
738 * 2. If the protocol supports the exchange of ``access rights'' (really
739 * just additional data associated with the message), and there are
740 * ``rights'' to be received, then a record containing this data
741 * should be present (mbuf's must be of type MT_RIGHTS).
742 * 3. If a name or rights record exists, then it must be followed by
743 * a data record, perhaps of zero length.
744 *
745 * Before using a new socket structure it is first necessary to reserve
746 * buffer space to the socket, by calling sbreserve(). This should commit
747 * some of the available buffer space in the system buffer pool for the
748 * socket (currently, it does nothing but enforce limits). The space
749 * should be released by calling sbrelease() when the socket is destroyed.
750 */
751
752 /*
753 * Returns: 0 Success
754 * ENOBUFS
755 */
756 int
soreserve(struct socket * so,uint32_t sndcc,uint32_t rcvcc)757 soreserve(struct socket *so, uint32_t sndcc, uint32_t rcvcc)
758 {
759 /*
760 * We do not want to fail the creation of a socket
761 * when kern.ipc.maxsockbuf is less than the
762 * default socket buffer socket size of the protocol
763 * so force the buffer sizes to be at most the
764 * limit enforced by sbreserve()
765 */
766 uint64_t maxcc = (uint64_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
767 if (sndcc > maxcc) {
768 sndcc = (uint32_t)maxcc;
769 }
770 if (rcvcc > maxcc) {
771 rcvcc = (uint32_t)maxcc;
772 }
773 if (sbreserve(&so->so_snd, sndcc) == 0) {
774 goto bad;
775 } else {
776 so->so_snd.sb_idealsize = sndcc;
777 }
778
779 if (sbreserve(&so->so_rcv, rcvcc) == 0) {
780 goto bad2;
781 } else {
782 so->so_rcv.sb_idealsize = rcvcc;
783 }
784
785 if (so->so_rcv.sb_lowat == 0) {
786 so->so_rcv.sb_lowat = 1;
787 }
788 if (so->so_snd.sb_lowat == 0) {
789 so->so_snd.sb_lowat = MCLBYTES;
790 }
791 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) {
792 so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
793 }
794 return 0;
795 bad2:
796 so->so_snd.sb_flags &= ~SB_SEL;
797 selthreadclear(&so->so_snd.sb_sel);
798 sbrelease(&so->so_snd);
799 bad:
800 return ENOBUFS;
801 }
802
803 void
soreserve_preconnect(struct socket * so,unsigned int pre_cc)804 soreserve_preconnect(struct socket *so, unsigned int pre_cc)
805 {
806 /* As of now, same bytes for both preconnect read and write */
807 so->so_snd.sb_preconn_hiwat = pre_cc;
808 so->so_rcv.sb_preconn_hiwat = pre_cc;
809 }
810
811 /*
812 * Allot mbufs to a sockbuf.
813 * Attempt to scale mbmax so that mbcnt doesn't become limiting
814 * if buffering efficiency is near the normal case.
815 */
816 int
sbreserve(struct sockbuf * sb,u_int32_t cc)817 sbreserve(struct sockbuf *sb, u_int32_t cc)
818 {
819 if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES) ||
820 (cc > sb->sb_hiwat && (sb->sb_flags & SB_LIMITED))) {
821 return 0;
822 }
823 sb->sb_hiwat = cc;
824 sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
825 if (sb->sb_lowat > sb->sb_hiwat) {
826 sb->sb_lowat = sb->sb_hiwat;
827 }
828 return 1;
829 }
830
831 /*
832 * Free mbufs held by a socket, and reserved mbuf space.
833 */
834 /* WARNING needs to do selthreadclear() before calling this */
835 void
sbrelease(struct sockbuf * sb)836 sbrelease(struct sockbuf *sb)
837 {
838 sbflush(sb);
839 sb->sb_hiwat = 0;
840 sb->sb_mbmax = 0;
841 }
842
843 /*
844 * Routines to add and remove
845 * data from an mbuf queue.
846 *
847 * The routines sbappend() or sbappendrecord() are normally called to
848 * append new mbufs to a socket buffer, after checking that adequate
849 * space is available, comparing the function sbspace() with the amount
850 * of data to be added. sbappendrecord() differs from sbappend() in
851 * that data supplied is treated as the beginning of a new record.
852 * To place a sender's address, optional access rights, and data in a
853 * socket receive buffer, sbappendaddr() should be used. To place
854 * access rights and data in a socket receive buffer, sbappendrights()
855 * should be used. In either case, the new data begins a new record.
856 * Note that unlike sbappend() and sbappendrecord(), these routines check
857 * for the caller that there will be enough space to store the data.
858 * Each fails if there is not enough space, or if it cannot find mbufs
859 * to store additional information in.
860 *
861 * Reliable protocols may use the socket send buffer to hold data
862 * awaiting acknowledgement. Data is normally copied from a socket
863 * send buffer in a protocol with m_copy for output to a peer,
864 * and then removing the data from the socket buffer with sbdrop()
865 * or sbdroprecord() when the data is acknowledged by the peer.
866 */
867
868 /*
869 * Append mbuf chain m to the last record in the
870 * socket buffer sb. The additional space associated
871 * the mbuf chain is recorded in sb. Empty mbufs are
872 * discarded and mbufs are compacted where possible.
873 */
874 static int
sbappend_common(struct sockbuf * sb,struct mbuf * m,boolean_t nodrop)875 sbappend_common(struct sockbuf *sb, struct mbuf *m, boolean_t nodrop)
876 {
877 struct socket *so = sb->sb_so;
878 struct soflow_hash_entry *dgram_flow_entry = NULL;
879
880 if (m == NULL || (sb->sb_flags & SB_DROP)) {
881 if (m != NULL && !nodrop) {
882 m_freem(m);
883 }
884 return 0;
885 }
886
887 SBLASTRECORDCHK(sb, "sbappend 1");
888
889 if (sb->sb_lastrecord != NULL && (sb->sb_mbtail->m_flags & M_EOR)) {
890 return sbappendrecord_common(sb, m, nodrop);
891 }
892
893 if (SOCK_DOM(sb->sb_so) == PF_INET || SOCK_DOM(sb->sb_so) == PF_INET6) {
894 ASSERT(nodrop == FALSE);
895
896 if (NEED_DGRAM_FLOW_TRACKING(so)) {
897 dgram_flow_entry = soflow_get_flow(so, NULL, NULL, NULL, m != NULL ? m_length(m) : 0, false, (m != NULL && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0);
898 }
899
900 if (sb->sb_flags & SB_RECV && !(m && m->m_flags & M_SKIPCFIL)) {
901 int error = sflt_data_in(so, NULL, &m, NULL, 0);
902 SBLASTRECORDCHK(sb, "sbappend 2");
903
904 #if CONTENT_FILTER
905 if (error == 0) {
906 error = cfil_sock_data_in(so, NULL, m, NULL, 0, dgram_flow_entry);
907 }
908 #endif /* CONTENT_FILTER */
909
910 if (error != 0) {
911 if (error != EJUSTRETURN) {
912 m_freem(m);
913 }
914 if (dgram_flow_entry != NULL) {
915 soflow_free_flow(dgram_flow_entry);
916 }
917 return 0;
918 }
919 } else if (m) {
920 m->m_flags &= ~M_SKIPCFIL;
921 }
922
923 if (dgram_flow_entry != NULL) {
924 soflow_free_flow(dgram_flow_entry);
925 }
926 }
927
928 /* If this is the first record, it's also the last record */
929 if (sb->sb_lastrecord == NULL) {
930 sb->sb_lastrecord = m;
931 }
932
933 sbcompress(sb, m, sb->sb_mbtail);
934 SBLASTRECORDCHK(sb, "sbappend 3");
935 return 1;
936 }
937
938 int
sbappend(struct sockbuf * sb,struct mbuf * m)939 sbappend(struct sockbuf *sb, struct mbuf *m)
940 {
941 return sbappend_common(sb, m, FALSE);
942 }
943
944 int
sbappend_nodrop(struct sockbuf * sb,struct mbuf * m)945 sbappend_nodrop(struct sockbuf *sb, struct mbuf *m)
946 {
947 return sbappend_common(sb, m, TRUE);
948 }
949
950 /*
951 * Similar to sbappend, except that this is optimized for stream sockets.
952 */
953 int
sbappendstream(struct sockbuf * sb,struct mbuf * m)954 sbappendstream(struct sockbuf *sb, struct mbuf *m)
955 {
956 struct soflow_hash_entry *dgram_flow_entry = NULL;
957 struct socket *so = sb->sb_so;
958
959 if (m == NULL || (sb->sb_flags & SB_DROP)) {
960 if (m != NULL) {
961 m_freem(m);
962 }
963 return 0;
964 }
965
966 if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord)) {
967 panic("sbappendstream: nexpkt %p || mb %p != lastrecord %p",
968 m->m_nextpkt, sb->sb_mb, sb->sb_lastrecord);
969 /* NOTREACHED */
970 }
971
972 SBLASTMBUFCHK(sb, __func__);
973
974 if (SOCK_DOM(sb->sb_so) == PF_INET || SOCK_DOM(sb->sb_so) == PF_INET6) {
975 if (NEED_DGRAM_FLOW_TRACKING(so)) {
976 dgram_flow_entry = soflow_get_flow(so, NULL, NULL, NULL, m != NULL ? m_length(m) : 0, false, (m != NULL && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0);
977 }
978
979 if (sb->sb_flags & SB_RECV && !(m && m->m_flags & M_SKIPCFIL)) {
980 int error = sflt_data_in(so, NULL, &m, NULL, 0);
981 SBLASTRECORDCHK(sb, "sbappendstream 1");
982
983 #if CONTENT_FILTER
984 if (error == 0) {
985 error = cfil_sock_data_in(so, NULL, m, NULL, 0, dgram_flow_entry);
986 }
987 #endif /* CONTENT_FILTER */
988
989 if (error != 0) {
990 if (error != EJUSTRETURN) {
991 m_freem(m);
992 }
993 if (dgram_flow_entry != NULL) {
994 soflow_free_flow(dgram_flow_entry);
995 }
996 return 0;
997 }
998 } else if (m) {
999 m->m_flags &= ~M_SKIPCFIL;
1000 }
1001
1002 if (dgram_flow_entry != NULL) {
1003 soflow_free_flow(dgram_flow_entry);
1004 }
1005 }
1006
1007 sbcompress(sb, m, sb->sb_mbtail);
1008 sb->sb_lastrecord = sb->sb_mb;
1009 SBLASTRECORDCHK(sb, "sbappendstream 2");
1010 return 1;
1011 }
1012
1013 #ifdef SOCKBUF_DEBUG
1014 void
sbcheck(struct sockbuf * sb)1015 sbcheck(struct sockbuf *sb)
1016 {
1017 struct mbuf *m;
1018 struct mbuf *n = 0;
1019 u_int32_t len = 0, mbcnt = 0;
1020 lck_mtx_t *mutex_held;
1021
1022 if (sb->sb_so->so_proto->pr_getlock != NULL) {
1023 mutex_held = (*sb->sb_so->so_proto->pr_getlock)(sb->sb_so, 0);
1024 } else {
1025 mutex_held = sb->sb_so->so_proto->pr_domain->dom_mtx;
1026 }
1027
1028 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
1029
1030 if (sbchecking == 0) {
1031 return;
1032 }
1033
1034 for (m = sb->sb_mb; m; m = n) {
1035 n = m->m_nextpkt;
1036 for (; m; m = m->m_next) {
1037 len += m->m_len;
1038 mbcnt += MSIZE;
1039 /* XXX pretty sure this is bogus */
1040 if (m->m_flags & M_EXT) {
1041 mbcnt += m->m_ext.ext_size;
1042 }
1043 }
1044 }
1045 if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
1046 panic("cc %ld != %ld || mbcnt %ld != %ld", len, sb->sb_cc,
1047 mbcnt, sb->sb_mbcnt);
1048 }
1049 }
1050 #endif
1051
1052 void
sblastrecordchk(struct sockbuf * sb,const char * where)1053 sblastrecordchk(struct sockbuf *sb, const char *where)
1054 {
1055 struct mbuf *m = sb->sb_mb;
1056
1057 while (m && m->m_nextpkt) {
1058 m = m->m_nextpkt;
1059 }
1060
1061 if (m != sb->sb_lastrecord) {
1062 printf("sblastrecordchk: mb 0x%llx lastrecord 0x%llx "
1063 "last 0x%llx\n",
1064 (uint64_t)VM_KERNEL_ADDRPERM(sb->sb_mb),
1065 (uint64_t)VM_KERNEL_ADDRPERM(sb->sb_lastrecord),
1066 (uint64_t)VM_KERNEL_ADDRPERM(m));
1067 printf("packet chain:\n");
1068 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
1069 printf("\t0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(m));
1070 }
1071 panic("sblastrecordchk from %s", where);
1072 }
1073 }
1074
1075 void
sblastmbufchk(struct sockbuf * sb,const char * where)1076 sblastmbufchk(struct sockbuf *sb, const char *where)
1077 {
1078 struct mbuf *m = sb->sb_mb;
1079 struct mbuf *n;
1080
1081 while (m && m->m_nextpkt) {
1082 m = m->m_nextpkt;
1083 }
1084
1085 while (m && m->m_next) {
1086 m = m->m_next;
1087 }
1088
1089 if (m != sb->sb_mbtail) {
1090 printf("sblastmbufchk: mb 0x%llx mbtail 0x%llx last 0x%llx\n",
1091 (uint64_t)VM_KERNEL_ADDRPERM(sb->sb_mb),
1092 (uint64_t)VM_KERNEL_ADDRPERM(sb->sb_mbtail),
1093 (uint64_t)VM_KERNEL_ADDRPERM(m));
1094 printf("packet tree:\n");
1095 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
1096 printf("\t");
1097 for (n = m; n != NULL; n = n->m_next) {
1098 printf("0x%llx ",
1099 (uint64_t)VM_KERNEL_ADDRPERM(n));
1100 }
1101 printf("\n");
1102 }
1103 panic("sblastmbufchk from %s", where);
1104 }
1105 }
1106
1107 /*
1108 * Similar to sbappend, except the mbuf chain begins a new record.
1109 */
1110 static int
sbappendrecord_common(struct sockbuf * sb,struct mbuf * m0,boolean_t nodrop)1111 sbappendrecord_common(struct sockbuf *sb, struct mbuf *m0, boolean_t nodrop)
1112 {
1113 struct soflow_hash_entry *dgram_flow_entry = NULL;
1114 struct socket *so = sb->sb_so;
1115 struct mbuf *m;
1116 int space = 0;
1117
1118 if (m0 == NULL || (sb->sb_flags & SB_DROP)) {
1119 if (m0 != NULL && nodrop == FALSE) {
1120 m_freem(m0);
1121 }
1122 return 0;
1123 }
1124
1125 for (m = m0; m != NULL; m = m->m_next) {
1126 space += m->m_len;
1127 }
1128
1129 if (space > sbspace(sb) && !(sb->sb_flags & SB_UNIX)) {
1130 if (nodrop == FALSE) {
1131 m_freem(m0);
1132 }
1133 return 0;
1134 }
1135
1136 if (SOCK_DOM(sb->sb_so) == PF_INET || SOCK_DOM(sb->sb_so) == PF_INET6) {
1137 ASSERT(nodrop == FALSE);
1138
1139 if (NEED_DGRAM_FLOW_TRACKING(so)) {
1140 dgram_flow_entry = soflow_get_flow(so, NULL, NULL, NULL, m0 != NULL ? m_length(m0) : 0, false, (m0 != NULL && m0->m_pkthdr.rcvif) ? m0->m_pkthdr.rcvif->if_index : 0);
1141 }
1142
1143 if (sb->sb_flags & SB_RECV && !(m0 && m0->m_flags & M_SKIPCFIL)) {
1144 int error = sflt_data_in(sb->sb_so, NULL, &m0, NULL,
1145 sock_data_filt_flag_record);
1146
1147 #if CONTENT_FILTER
1148 if (error == 0) {
1149 error = cfil_sock_data_in(sb->sb_so, NULL, m0, NULL, 0, dgram_flow_entry);
1150 }
1151 #endif /* CONTENT_FILTER */
1152
1153 if (error != 0) {
1154 SBLASTRECORDCHK(sb, "sbappendrecord 1");
1155 if (error != EJUSTRETURN) {
1156 m_freem(m0);
1157 }
1158 if (dgram_flow_entry != NULL) {
1159 soflow_free_flow(dgram_flow_entry);
1160 }
1161 return 0;
1162 }
1163 } else if (m0) {
1164 m0->m_flags &= ~M_SKIPCFIL;
1165 }
1166
1167 if (dgram_flow_entry != NULL) {
1168 soflow_free_flow(dgram_flow_entry);
1169 }
1170 }
1171
1172 /*
1173 * Note this permits zero length records.
1174 */
1175 sballoc(sb, m0);
1176 SBLASTRECORDCHK(sb, "sbappendrecord 2");
1177 if (sb->sb_lastrecord != NULL) {
1178 sb->sb_lastrecord->m_nextpkt = m0;
1179 } else {
1180 sb->sb_mb = m0;
1181 }
1182 sb->sb_lastrecord = m0;
1183 sb->sb_mbtail = m0;
1184
1185 m = m0->m_next;
1186 m0->m_next = 0;
1187 if (m && (m0->m_flags & M_EOR)) {
1188 m0->m_flags &= ~M_EOR;
1189 m->m_flags |= M_EOR;
1190 }
1191 sbcompress(sb, m, m0);
1192 SBLASTRECORDCHK(sb, "sbappendrecord 3");
1193 return 1;
1194 }
1195
1196 int
sbappendrecord(struct sockbuf * sb,struct mbuf * m0)1197 sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
1198 {
1199 return sbappendrecord_common(sb, m0, FALSE);
1200 }
1201
1202 int
sbappendrecord_nodrop(struct sockbuf * sb,struct mbuf * m0)1203 sbappendrecord_nodrop(struct sockbuf *sb, struct mbuf *m0)
1204 {
1205 return sbappendrecord_common(sb, m0, TRUE);
1206 }
1207
1208 /*
1209 * Concatenate address (optional), control (optional) and data into one
1210 * single mbuf chain. If sockbuf *sb is passed in, space check will be
1211 * performed.
1212 *
1213 * Returns: mbuf chain pointer if succeeded, NULL if failed
1214 */
1215 struct mbuf *
sbconcat_mbufs(struct sockbuf * sb,struct sockaddr * asa,struct mbuf * m0,struct mbuf * control)1216 sbconcat_mbufs(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0, struct mbuf *control)
1217 {
1218 struct mbuf *m = NULL, *n = NULL;
1219 int space = 0;
1220
1221 if (m0 && (m0->m_flags & M_PKTHDR) == 0) {
1222 panic("sbconcat_mbufs");
1223 }
1224
1225 if (m0) {
1226 space += m0->m_pkthdr.len;
1227 }
1228 for (n = control; n; n = n->m_next) {
1229 space += n->m_len;
1230 if (n->m_next == 0) { /* keep pointer to last control buf */
1231 break;
1232 }
1233 }
1234
1235 if (asa != NULL) {
1236 if (asa->sa_len > MLEN) {
1237 return NULL;
1238 }
1239 space += asa->sa_len;
1240 }
1241
1242 if (sb != NULL && space > sbspace(sb)) {
1243 return NULL;
1244 }
1245
1246 if (n) {
1247 n->m_next = m0; /* concatenate data to control */
1248 } else {
1249 control = m0;
1250 }
1251
1252 if (asa != NULL) {
1253 MGET(m, M_DONTWAIT, MT_SONAME);
1254 if (m == 0) {
1255 if (n) {
1256 /* unchain control and data if necessary */
1257 n->m_next = NULL;
1258 }
1259 return NULL;
1260 }
1261 m->m_len = asa->sa_len;
1262 bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len);
1263
1264 m->m_next = control;
1265 } else {
1266 m = control;
1267 }
1268
1269 return m;
1270 }
1271
1272 /*
1273 * Queue mbuf chain to the receive queue of a socket.
1274 * Parameter space is the total len of the mbuf chain.
1275 * If passed in, sockbuf space will be checked.
1276 *
1277 * Returns: 0 Invalid mbuf chain
1278 * 1 Success
1279 */
1280 int
sbappendchain(struct sockbuf * sb,struct mbuf * m,int space)1281 sbappendchain(struct sockbuf *sb, struct mbuf *m, int space)
1282 {
1283 struct mbuf *n, *nlast;
1284
1285 if (m == NULL) {
1286 return 0;
1287 }
1288
1289 if (space != 0 && space > sbspace(sb)) {
1290 return 0;
1291 }
1292
1293 for (n = m; n->m_next != NULL; n = n->m_next) {
1294 sballoc(sb, n);
1295 }
1296 sballoc(sb, n);
1297 nlast = n;
1298
1299 if (sb->sb_lastrecord != NULL) {
1300 sb->sb_lastrecord->m_nextpkt = m;
1301 } else {
1302 sb->sb_mb = m;
1303 }
1304 sb->sb_lastrecord = m;
1305 sb->sb_mbtail = nlast;
1306
1307 SBLASTMBUFCHK(sb, __func__);
1308 SBLASTRECORDCHK(sb, "sbappendadddr 2");
1309 return 1;
1310 }
1311
1312 /*
1313 * Returns: 0 Error: No space/out of mbufs/etc.
1314 * 1 Success
1315 *
1316 * Imputed: (*error_out) errno for error
1317 * ENOBUFS
1318 * sflt_data_in:??? [whatever a filter author chooses]
1319 */
1320 int
sbappendaddr(struct sockbuf * sb,struct sockaddr * asa,struct mbuf * m0,struct mbuf * control,int * error_out)1321 sbappendaddr(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0,
1322 struct mbuf *control, int *error_out)
1323 {
1324 int result = 0;
1325 boolean_t sb_unix = (sb->sb_flags & SB_UNIX);
1326 struct mbuf *mbuf_chain = NULL;
1327 struct soflow_hash_entry *dgram_flow_entry = NULL;
1328 struct socket *so = sb->sb_so;
1329
1330 if (error_out) {
1331 *error_out = 0;
1332 }
1333
1334 if (m0 && (m0->m_flags & M_PKTHDR) == 0) {
1335 panic("sbappendaddrorfree");
1336 }
1337
1338 if (sb->sb_flags & SB_DROP) {
1339 if (m0 != NULL) {
1340 m_freem(m0);
1341 }
1342 if (control != NULL && !sb_unix) {
1343 m_freem(control);
1344 }
1345 if (error_out != NULL) {
1346 *error_out = EINVAL;
1347 }
1348 return 0;
1349 }
1350
1351 if (SOCK_DOM(sb->sb_so) == PF_INET || SOCK_DOM(sb->sb_so) == PF_INET6) {
1352 /* Call socket data in filters */
1353
1354 if (NEED_DGRAM_FLOW_TRACKING(so)) {
1355 dgram_flow_entry = soflow_get_flow(so, NULL, asa, control, m0 != NULL ? m_length(m0) : 0, false, (m0 != NULL && m0->m_pkthdr.rcvif) ? m0->m_pkthdr.rcvif->if_index : 0);
1356 }
1357
1358 if (sb->sb_flags & SB_RECV && !(m0 && m0->m_flags & M_SKIPCFIL)) {
1359 int error;
1360 error = sflt_data_in(sb->sb_so, asa, &m0, &control, 0);
1361 SBLASTRECORDCHK(sb, __func__);
1362
1363 #if CONTENT_FILTER
1364 if (error == 0) {
1365 error = cfil_sock_data_in(sb->sb_so, asa, m0, control,
1366 0, dgram_flow_entry);
1367 }
1368 #endif /* CONTENT_FILTER */
1369
1370 if (error) {
1371 if (error != EJUSTRETURN) {
1372 if (m0) {
1373 m_freem(m0);
1374 }
1375 if (control != NULL && !sb_unix) {
1376 m_freem(control);
1377 }
1378 if (error_out) {
1379 *error_out = error;
1380 }
1381 }
1382 if (dgram_flow_entry != NULL) {
1383 soflow_free_flow(dgram_flow_entry);
1384 }
1385 return 0;
1386 }
1387 } else if (m0) {
1388 m0->m_flags &= ~M_SKIPCFIL;
1389 }
1390
1391 if (dgram_flow_entry != NULL) {
1392 soflow_free_flow(dgram_flow_entry);
1393 }
1394 }
1395
1396 mbuf_chain = sbconcat_mbufs(sb, asa, m0, control);
1397 SBLASTRECORDCHK(sb, "sbappendadddr 1");
1398 result = sbappendchain(sb, mbuf_chain, 0);
1399 if (result == 0) {
1400 if (m0) {
1401 m_freem(m0);
1402 }
1403 if (control != NULL && !sb_unix) {
1404 m_freem(control);
1405 }
1406 if (error_out) {
1407 *error_out = ENOBUFS;
1408 }
1409 }
1410
1411 return result;
1412 }
1413
1414 inline boolean_t
is_cmsg_valid(struct mbuf * control,struct cmsghdr * cmsg)1415 is_cmsg_valid(struct mbuf *control, struct cmsghdr *cmsg)
1416 {
1417 if (cmsg == NULL) {
1418 return FALSE;
1419 }
1420
1421 if (cmsg->cmsg_len < sizeof(struct cmsghdr)) {
1422 return FALSE;
1423 }
1424
1425 if ((uint8_t *)control->m_data >= (uint8_t *)cmsg + cmsg->cmsg_len) {
1426 return FALSE;
1427 }
1428
1429 if ((uint8_t *)control->m_data + control->m_len <
1430 (uint8_t *)cmsg + cmsg->cmsg_len) {
1431 return FALSE;
1432 }
1433
1434 return TRUE;
1435 }
1436
1437 static int
sbappendcontrol_internal(struct sockbuf * sb,struct mbuf * m0,struct mbuf * control)1438 sbappendcontrol_internal(struct sockbuf *sb, struct mbuf *m0,
1439 struct mbuf *control)
1440 {
1441 struct mbuf *m, *mlast, *n;
1442 int space = 0;
1443
1444 if (control == 0) {
1445 panic("sbappendcontrol");
1446 }
1447
1448 for (m = control;; m = m->m_next) {
1449 space += m->m_len;
1450 if (m->m_next == 0) {
1451 break;
1452 }
1453 }
1454 n = m; /* save pointer to last control buffer */
1455 for (m = m0; m; m = m->m_next) {
1456 space += m->m_len;
1457 }
1458 if (space > sbspace(sb) && !(sb->sb_flags & SB_UNIX)) {
1459 return 0;
1460 }
1461 n->m_next = m0; /* concatenate data to control */
1462 SBLASTRECORDCHK(sb, "sbappendcontrol 1");
1463
1464 for (m = control; m->m_next != NULL; m = m->m_next) {
1465 sballoc(sb, m);
1466 }
1467 sballoc(sb, m);
1468 mlast = m;
1469
1470 if (sb->sb_lastrecord != NULL) {
1471 sb->sb_lastrecord->m_nextpkt = control;
1472 } else {
1473 sb->sb_mb = control;
1474 }
1475 sb->sb_lastrecord = control;
1476 sb->sb_mbtail = mlast;
1477
1478 SBLASTMBUFCHK(sb, __func__);
1479 SBLASTRECORDCHK(sb, "sbappendcontrol 2");
1480 return 1;
1481 }
1482
1483 int
sbappendcontrol(struct sockbuf * sb,struct mbuf * m0,struct mbuf * control,int * error_out)1484 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control,
1485 int *error_out)
1486 {
1487 struct soflow_hash_entry *dgram_flow_entry = NULL;
1488 struct socket *so = sb->sb_so;
1489 int result = 0;
1490 boolean_t sb_unix = (sb->sb_flags & SB_UNIX);
1491
1492 if (error_out) {
1493 *error_out = 0;
1494 }
1495
1496 if (sb->sb_flags & SB_DROP) {
1497 if (m0 != NULL) {
1498 m_freem(m0);
1499 }
1500 if (control != NULL && !sb_unix) {
1501 m_freem(control);
1502 }
1503 if (error_out != NULL) {
1504 *error_out = EINVAL;
1505 }
1506 return 0;
1507 }
1508
1509 if (SOCK_DOM(sb->sb_so) == PF_INET || SOCK_DOM(sb->sb_so) == PF_INET6) {
1510 if (NEED_DGRAM_FLOW_TRACKING(so)) {
1511 dgram_flow_entry = soflow_get_flow(so, NULL, NULL, control, m0 != NULL ? m_length(m0) : 0, false, (m0 != NULL && m0->m_pkthdr.rcvif) ? m0->m_pkthdr.rcvif->if_index : 0);
1512 }
1513
1514 if (sb->sb_flags & SB_RECV && !(m0 && m0->m_flags & M_SKIPCFIL)) {
1515 int error;
1516
1517 error = sflt_data_in(sb->sb_so, NULL, &m0, &control, 0);
1518 SBLASTRECORDCHK(sb, __func__);
1519
1520 #if CONTENT_FILTER
1521 if (error == 0) {
1522 error = cfil_sock_data_in(sb->sb_so, NULL, m0, control,
1523 0, dgram_flow_entry);
1524 }
1525 #endif /* CONTENT_FILTER */
1526
1527 if (error) {
1528 if (error != EJUSTRETURN) {
1529 if (m0) {
1530 m_freem(m0);
1531 }
1532 if (control != NULL && !sb_unix) {
1533 m_freem(control);
1534 }
1535 if (error_out) {
1536 *error_out = error;
1537 }
1538 }
1539 if (dgram_flow_entry != NULL) {
1540 soflow_free_flow(dgram_flow_entry);
1541 }
1542 return 0;
1543 }
1544 } else if (m0) {
1545 m0->m_flags &= ~M_SKIPCFIL;
1546 }
1547
1548 if (dgram_flow_entry != NULL) {
1549 soflow_free_flow(dgram_flow_entry);
1550 }
1551 }
1552
1553 result = sbappendcontrol_internal(sb, m0, control);
1554 if (result == 0) {
1555 if (m0) {
1556 m_freem(m0);
1557 }
1558 if (control != NULL && !sb_unix) {
1559 m_freem(control);
1560 }
1561 if (error_out) {
1562 *error_out = ENOBUFS;
1563 }
1564 }
1565
1566 return result;
1567 }
1568
1569 /*
1570 * TCP streams have Multipath TCP support or are regular TCP sockets.
1571 */
1572 int
sbappendstream_rcvdemux(struct socket * so,struct mbuf * m)1573 sbappendstream_rcvdemux(struct socket *so, struct mbuf *m)
1574 {
1575 int ret = 0;
1576
1577 if ((m != NULL) &&
1578 m_pktlen(m) <= 0 &&
1579 !((so->so_flags & SOF_MP_SUBFLOW) &&
1580 (m->m_flags & M_PKTHDR) &&
1581 (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN))) {
1582 m_freem(m);
1583 return ret;
1584 }
1585
1586 #if MPTCP
1587 if (so->so_flags & SOF_MP_SUBFLOW) {
1588 return sbappendmptcpstream_rcv(&so->so_rcv, m);
1589 } else
1590 #endif /* MPTCP */
1591 {
1592 return sbappendstream(&so->so_rcv, m);
1593 }
1594 }
1595
1596 #if MPTCP
1597 int
sbappendmptcpstream_rcv(struct sockbuf * sb,struct mbuf * m)1598 sbappendmptcpstream_rcv(struct sockbuf *sb, struct mbuf *m)
1599 {
1600 struct socket *so = sb->sb_so;
1601
1602 VERIFY(m == NULL || (m->m_flags & M_PKTHDR));
1603 /* SB_NOCOMPRESS must be set prevent loss of M_PKTHDR data */
1604 VERIFY((sb->sb_flags & (SB_RECV | SB_NOCOMPRESS)) ==
1605 (SB_RECV | SB_NOCOMPRESS));
1606
1607 if (m == NULL || m_pktlen(m) == 0 || (sb->sb_flags & SB_DROP) ||
1608 (so->so_state & SS_CANTRCVMORE)) {
1609 if (m && (m->m_flags & M_PKTHDR) &&
1610 m_pktlen(m) == 0 &&
1611 (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN)) {
1612 mptcp_input(tptomptp(sototcpcb(so))->mpt_mpte, m);
1613 return 1;
1614 } else if (m != NULL) {
1615 m_freem(m);
1616 }
1617 return 0;
1618 }
1619 /* the socket is not closed, so SOF_MP_SUBFLOW must be set */
1620 VERIFY(so->so_flags & SOF_MP_SUBFLOW);
1621
1622 if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord)) {
1623 panic("%s: nexpkt %p || mb %p != lastrecord %p", __func__,
1624 m->m_nextpkt, sb->sb_mb, sb->sb_lastrecord);
1625 /* NOTREACHED */
1626 }
1627
1628 SBLASTMBUFCHK(sb, __func__);
1629
1630 /* No filter support (SB_RECV) on mptcp subflow sockets */
1631
1632 sbcompress(sb, m, sb->sb_mbtail);
1633 sb->sb_lastrecord = sb->sb_mb;
1634 SBLASTRECORDCHK(sb, __func__);
1635 return 1;
1636 }
1637 #endif /* MPTCP */
1638
1639 /*
1640 * Compress mbuf chain m into the socket
1641 * buffer sb following mbuf n. If n
1642 * is null, the buffer is presumed empty.
1643 */
1644 static inline void
sbcompress(struct sockbuf * sb,struct mbuf * m,struct mbuf * n)1645 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
1646 {
1647 int eor = 0, compress = (!(sb->sb_flags & SB_NOCOMPRESS));
1648 struct mbuf *o;
1649
1650 if (m == NULL) {
1651 /* There is nothing to compress; just update the tail */
1652 for (; n->m_next != NULL; n = n->m_next) {
1653 ;
1654 }
1655 sb->sb_mbtail = n;
1656 goto done;
1657 }
1658
1659 while (m != NULL) {
1660 eor |= m->m_flags & M_EOR;
1661 if (compress && m->m_len == 0 && (eor == 0 ||
1662 (((o = m->m_next) || (o = n)) && o->m_type == m->m_type))) {
1663 if (sb->sb_lastrecord == m) {
1664 sb->sb_lastrecord = m->m_next;
1665 }
1666 m = m_free(m);
1667 continue;
1668 }
1669 if (compress && n != NULL && (n->m_flags & M_EOR) == 0 &&
1670 #ifndef __APPLE__
1671 M_WRITABLE(n) &&
1672 #endif
1673 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
1674 m->m_len <= M_TRAILINGSPACE(n) &&
1675 n->m_type == m->m_type) {
1676 bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
1677 (unsigned)m->m_len);
1678 n->m_len += m->m_len;
1679 sb->sb_cc += m->m_len;
1680 if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
1681 m->m_type != MT_OOBDATA) {
1682 /* XXX: Probably don't need */
1683 sb->sb_ctl += m->m_len;
1684 }
1685
1686 /* update send byte count */
1687 if (sb->sb_flags & SB_SNDBYTE_CNT) {
1688 inp_incr_sndbytes_total(sb->sb_so,
1689 m->m_len);
1690 inp_incr_sndbytes_unsent(sb->sb_so,
1691 m->m_len);
1692 }
1693 m = m_free(m);
1694 continue;
1695 }
1696 if (n != NULL) {
1697 n->m_next = m;
1698 } else {
1699 sb->sb_mb = m;
1700 }
1701 sb->sb_mbtail = m;
1702 sballoc(sb, m);
1703 n = m;
1704 m->m_flags &= ~M_EOR;
1705 m = m->m_next;
1706 n->m_next = NULL;
1707 }
1708 if (eor != 0) {
1709 if (n != NULL) {
1710 n->m_flags |= M_EOR;
1711 } else {
1712 printf("semi-panic: sbcompress\n");
1713 }
1714 }
1715 done:
1716 SBLASTMBUFCHK(sb, __func__);
1717 }
1718
1719 void
sb_empty_assert(struct sockbuf * sb,const char * where)1720 sb_empty_assert(struct sockbuf *sb, const char *where)
1721 {
1722 if (!(sb->sb_cc == 0 && sb->sb_mb == NULL && sb->sb_mbcnt == 0 &&
1723 sb->sb_mbtail == NULL && sb->sb_lastrecord == NULL)) {
1724 panic("%s: sb %p so %p cc %d mbcnt %d mb %p mbtail %p "
1725 "lastrecord %p\n", where, sb, sb->sb_so, sb->sb_cc,
1726 sb->sb_mbcnt, sb->sb_mb, sb->sb_mbtail,
1727 sb->sb_lastrecord);
1728 /* NOTREACHED */
1729 }
1730 }
1731
1732 /*
1733 * Free all mbufs in a sockbuf.
1734 * Check that all resources are reclaimed.
1735 */
1736 void
sbflush(struct sockbuf * sb)1737 sbflush(struct sockbuf *sb)
1738 {
1739 void *lr_saved = __builtin_return_address(0);
1740 struct socket *so = sb->sb_so;
1741
1742 /* so_usecount may be 0 if we get here from sofreelastref() */
1743 if (so == NULL) {
1744 panic("%s: null so, sb=%p sb_flags=0x%x lr=%p",
1745 __func__, sb, sb->sb_flags, lr_saved);
1746 /* NOTREACHED */
1747 } else if (so->so_usecount < 0) {
1748 panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
1749 "lrh= %s\n", __func__, sb, sb->sb_flags, so,
1750 so->so_usecount, lr_saved, solockhistory_nr(so));
1751 /* NOTREACHED */
1752 }
1753
1754 /*
1755 * Obtain lock on the socket buffer (SB_LOCK). This is required
1756 * to prevent the socket buffer from being unexpectedly altered
1757 * while it is used by another thread in socket send/receive.
1758 *
1759 * sblock() must not fail here, hence the assertion.
1760 */
1761 (void) sblock(sb, SBL_WAIT | SBL_NOINTR | SBL_IGNDEFUNCT);
1762 VERIFY(sb->sb_flags & SB_LOCK);
1763
1764 while (sb->sb_mbcnt > 0) {
1765 /*
1766 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
1767 * we would loop forever. Panic instead.
1768 */
1769 if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len)) {
1770 break;
1771 }
1772 sbdrop(sb, (int)sb->sb_cc);
1773 }
1774
1775 sb_empty_assert(sb, __func__);
1776 sbunlock(sb, TRUE); /* keep socket locked */
1777 }
1778
1779 /*
1780 * Drop data from (the front of) a sockbuf.
1781 * use m_freem_list to free the mbuf structures
1782 * under a single lock... this is done by pruning
1783 * the top of the tree from the body by keeping track
1784 * of where we get to in the tree and then zeroing the
1785 * two pertinent pointers m_nextpkt and m_next
1786 * the socket buffer is then updated to point at the new
1787 * top of the tree and the pruned area is released via
1788 * m_freem_list.
1789 */
1790 void
sbdrop(struct sockbuf * sb,int len)1791 sbdrop(struct sockbuf *sb, int len)
1792 {
1793 struct mbuf *m, *free_list, *ml;
1794 struct mbuf *next, *last;
1795
1796 next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1797 #if MPTCP
1798 if (m != NULL && len > 0 && !(sb->sb_flags & SB_RECV) &&
1799 ((sb->sb_so->so_flags & SOF_MP_SUBFLOW) ||
1800 (SOCK_CHECK_DOM(sb->sb_so, PF_MULTIPATH) &&
1801 SOCK_CHECK_PROTO(sb->sb_so, IPPROTO_TCP))) &&
1802 !(sb->sb_so->so_flags1 & SOF1_POST_FALLBACK_SYNC)) {
1803 mptcp_preproc_sbdrop(sb->sb_so, m, (unsigned int)len);
1804 }
1805 if (m != NULL && len > 0 && !(sb->sb_flags & SB_RECV) &&
1806 (sb->sb_so->so_flags & SOF_MP_SUBFLOW) &&
1807 (sb->sb_so->so_flags1 & SOF1_POST_FALLBACK_SYNC)) {
1808 mptcp_fallback_sbdrop(sb->sb_so, m, len);
1809 }
1810 #endif /* MPTCP */
1811 KERNEL_DEBUG((DBG_FNC_SBDROP | DBG_FUNC_START), sb, len, 0, 0, 0);
1812
1813 free_list = last = m;
1814 ml = (struct mbuf *)0;
1815
1816 while (len > 0) {
1817 if (m == NULL) {
1818 if (next == NULL) {
1819 /*
1820 * temporarily replacing this panic with printf
1821 * because it occurs occasionally when closing
1822 * a socket when there is no harm in ignoring
1823 * it. This problem will be investigated
1824 * further.
1825 */
1826 /* panic("sbdrop"); */
1827 printf("sbdrop - count not zero\n");
1828 len = 0;
1829 /*
1830 * zero the counts. if we have no mbufs,
1831 * we have no data (PR-2986815)
1832 */
1833 sb->sb_cc = 0;
1834 sb->sb_mbcnt = 0;
1835 break;
1836 }
1837 m = last = next;
1838 next = m->m_nextpkt;
1839 continue;
1840 }
1841 if (m->m_len > len) {
1842 m->m_len -= len;
1843 m->m_data += len;
1844 sb->sb_cc -= len;
1845 /* update the send byte count */
1846 if (sb->sb_flags & SB_SNDBYTE_CNT) {
1847 inp_decr_sndbytes_total(sb->sb_so, len);
1848 }
1849 if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
1850 m->m_type != MT_OOBDATA) {
1851 sb->sb_ctl -= len;
1852 }
1853 break;
1854 }
1855 len -= m->m_len;
1856 sbfree(sb, m);
1857
1858 ml = m;
1859 m = m->m_next;
1860 }
1861 while (m && m->m_len == 0) {
1862 sbfree(sb, m);
1863
1864 ml = m;
1865 m = m->m_next;
1866 }
1867 if (ml) {
1868 ml->m_next = (struct mbuf *)0;
1869 last->m_nextpkt = (struct mbuf *)0;
1870 m_freem_list(free_list);
1871 }
1872 if (m) {
1873 sb->sb_mb = m;
1874 m->m_nextpkt = next;
1875 } else {
1876 sb->sb_mb = next;
1877 }
1878
1879 /*
1880 * First part is an inline SB_EMPTY_FIXUP(). Second part
1881 * makes sure sb_lastrecord is up-to-date if we dropped
1882 * part of the last record.
1883 */
1884 m = sb->sb_mb;
1885 if (m == NULL) {
1886 sb->sb_mbtail = NULL;
1887 sb->sb_lastrecord = NULL;
1888 } else if (m->m_nextpkt == NULL) {
1889 sb->sb_lastrecord = m;
1890 }
1891
1892 #if CONTENT_FILTER
1893 cfil_sock_buf_update(sb);
1894 #endif /* CONTENT_FILTER */
1895
1896 KERNEL_DEBUG((DBG_FNC_SBDROP | DBG_FUNC_END), sb, 0, 0, 0, 0);
1897 }
1898
1899 /*
1900 * Drop a record off the front of a sockbuf
1901 * and move the next record to the front.
1902 */
1903 void
sbdroprecord(struct sockbuf * sb)1904 sbdroprecord(struct sockbuf *sb)
1905 {
1906 struct mbuf *m, *mn;
1907
1908 m = sb->sb_mb;
1909 if (m) {
1910 sb->sb_mb = m->m_nextpkt;
1911 do {
1912 sbfree(sb, m);
1913 MFREE(m, mn);
1914 m = mn;
1915 } while (m);
1916 }
1917 SB_EMPTY_FIXUP(sb);
1918 }
1919
1920 /*
1921 * Create a "control" mbuf containing the specified data
1922 * with the specified type for presentation on a socket buffer.
1923 */
1924 struct mbuf *
sbcreatecontrol(caddr_t p,int size,int type,int level)1925 sbcreatecontrol(caddr_t p, int size, int type, int level)
1926 {
1927 struct cmsghdr *cp;
1928 struct mbuf *m;
1929
1930 if (CMSG_SPACE((u_int)size) > MLEN) {
1931 return (struct mbuf *)NULL;
1932 }
1933 if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL) {
1934 return (struct mbuf *)NULL;
1935 }
1936 cp = mtod(m, struct cmsghdr *);
1937 VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
1938 /* XXX check size? */
1939 (void) memcpy(CMSG_DATA(cp), p, size);
1940 m->m_len = (int32_t)CMSG_SPACE(size);
1941 cp->cmsg_len = CMSG_LEN(size);
1942 cp->cmsg_level = level;
1943 cp->cmsg_type = type;
1944 return m;
1945 }
1946
1947 struct mbuf **
sbcreatecontrol_mbuf(caddr_t p,int size,int type,int level,struct mbuf ** mp)1948 sbcreatecontrol_mbuf(caddr_t p, int size, int type, int level, struct mbuf **mp)
1949 {
1950 struct mbuf *m;
1951 struct cmsghdr *cp;
1952
1953 if (*mp == NULL) {
1954 *mp = sbcreatecontrol(p, size, type, level);
1955 return mp;
1956 }
1957
1958 if (CMSG_SPACE((u_int)size) + (*mp)->m_len > MLEN) {
1959 mp = &(*mp)->m_next;
1960 *mp = sbcreatecontrol(p, size, type, level);
1961 return mp;
1962 }
1963
1964 m = *mp;
1965
1966 cp = (struct cmsghdr *)(void *)(mtod(m, char *) + m->m_len);
1967 /* CMSG_SPACE ensures 32-bit alignment */
1968 VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
1969 m->m_len += (int32_t)CMSG_SPACE(size);
1970
1971 /* XXX check size? */
1972 (void) memcpy(CMSG_DATA(cp), p, size);
1973 cp->cmsg_len = CMSG_LEN(size);
1974 cp->cmsg_level = level;
1975 cp->cmsg_type = type;
1976
1977 return mp;
1978 }
1979
1980
1981 /*
1982 * Some routines that return EOPNOTSUPP for entry points that are not
1983 * supported by a protocol. Fill in as needed.
1984 */
1985 int
pru_abort_notsupp(struct socket * so)1986 pru_abort_notsupp(struct socket *so)
1987 {
1988 #pragma unused(so)
1989 return EOPNOTSUPP;
1990 }
1991
1992 int
pru_accept_notsupp(struct socket * so,struct sockaddr ** nam)1993 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
1994 {
1995 #pragma unused(so, nam)
1996 return EOPNOTSUPP;
1997 }
1998
1999 int
pru_attach_notsupp(struct socket * so,int proto,struct proc * p)2000 pru_attach_notsupp(struct socket *so, int proto, struct proc *p)
2001 {
2002 #pragma unused(so, proto, p)
2003 return EOPNOTSUPP;
2004 }
2005
2006 int
pru_bind_notsupp(struct socket * so,struct sockaddr * nam,struct proc * p)2007 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p)
2008 {
2009 #pragma unused(so, nam, p)
2010 return EOPNOTSUPP;
2011 }
2012
2013 int
pru_connect_notsupp(struct socket * so,struct sockaddr * nam,struct proc * p)2014 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p)
2015 {
2016 #pragma unused(so, nam, p)
2017 return EOPNOTSUPP;
2018 }
2019
2020 int
pru_connect2_notsupp(struct socket * so1,struct socket * so2)2021 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
2022 {
2023 #pragma unused(so1, so2)
2024 return EOPNOTSUPP;
2025 }
2026
2027 int
pru_connectx_notsupp(struct socket * so,struct sockaddr * src,struct sockaddr * dst,struct proc * p,uint32_t ifscope,sae_associd_t aid,sae_connid_t * pcid,uint32_t flags,void * arg,uint32_t arglen,struct uio * uio,user_ssize_t * bytes_written)2028 pru_connectx_notsupp(struct socket *so, struct sockaddr *src,
2029 struct sockaddr *dst, struct proc *p, uint32_t ifscope,
2030 sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg,
2031 uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written)
2032 {
2033 #pragma unused(so, src, dst, p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written)
2034 return EOPNOTSUPP;
2035 }
2036
2037 int
pru_control_notsupp(struct socket * so,u_long cmd,caddr_t data,struct ifnet * ifp,struct proc * p)2038 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
2039 struct ifnet *ifp, struct proc *p)
2040 {
2041 #pragma unused(so, cmd, data, ifp, p)
2042 return EOPNOTSUPP;
2043 }
2044
2045 int
pru_detach_notsupp(struct socket * so)2046 pru_detach_notsupp(struct socket *so)
2047 {
2048 #pragma unused(so)
2049 return EOPNOTSUPP;
2050 }
2051
2052 int
pru_disconnect_notsupp(struct socket * so)2053 pru_disconnect_notsupp(struct socket *so)
2054 {
2055 #pragma unused(so)
2056 return EOPNOTSUPP;
2057 }
2058
2059 int
pru_disconnectx_notsupp(struct socket * so,sae_associd_t aid,sae_connid_t cid)2060 pru_disconnectx_notsupp(struct socket *so, sae_associd_t aid, sae_connid_t cid)
2061 {
2062 #pragma unused(so, aid, cid)
2063 return EOPNOTSUPP;
2064 }
2065
2066 int
pru_listen_notsupp(struct socket * so,struct proc * p)2067 pru_listen_notsupp(struct socket *so, struct proc *p)
2068 {
2069 #pragma unused(so, p)
2070 return EOPNOTSUPP;
2071 }
2072
2073 int
pru_peeraddr_notsupp(struct socket * so,struct sockaddr ** nam)2074 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
2075 {
2076 #pragma unused(so, nam)
2077 return EOPNOTSUPP;
2078 }
2079
2080 int
pru_rcvd_notsupp(struct socket * so,int flags)2081 pru_rcvd_notsupp(struct socket *so, int flags)
2082 {
2083 #pragma unused(so, flags)
2084 return EOPNOTSUPP;
2085 }
2086
2087 int
pru_rcvoob_notsupp(struct socket * so,struct mbuf * m,int flags)2088 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
2089 {
2090 #pragma unused(so, m, flags)
2091 return EOPNOTSUPP;
2092 }
2093
2094 int
pru_send_notsupp(struct socket * so,int flags,struct mbuf * m,struct sockaddr * addr,struct mbuf * control,struct proc * p)2095 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
2096 struct sockaddr *addr, struct mbuf *control, struct proc *p)
2097 {
2098 #pragma unused(so, flags, m, addr, control, p)
2099 return EOPNOTSUPP;
2100 }
2101
2102 int
pru_send_list_notsupp(struct socket * so,int flags,struct mbuf * m,struct sockaddr * addr,struct mbuf * control,struct proc * p)2103 pru_send_list_notsupp(struct socket *so, int flags, struct mbuf *m,
2104 struct sockaddr *addr, struct mbuf *control, struct proc *p)
2105 {
2106 #pragma unused(so, flags, m, addr, control, p)
2107 return EOPNOTSUPP;
2108 }
2109
2110 /*
2111 * This isn't really a ``null'' operation, but it's the default one
2112 * and doesn't do anything destructive.
2113 */
2114 int
pru_sense_null(struct socket * so,void * ub,int isstat64)2115 pru_sense_null(struct socket *so, void *ub, int isstat64)
2116 {
2117 if (isstat64 != 0) {
2118 struct stat64 *sb64;
2119
2120 sb64 = (struct stat64 *)ub;
2121 sb64->st_blksize = so->so_snd.sb_hiwat;
2122 } else {
2123 struct stat *sb;
2124
2125 sb = (struct stat *)ub;
2126 sb->st_blksize = so->so_snd.sb_hiwat;
2127 }
2128
2129 return 0;
2130 }
2131
2132
2133 int
pru_sosend_notsupp(struct socket * so,struct sockaddr * addr,struct uio * uio,struct mbuf * top,struct mbuf * control,int flags)2134 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
2135 struct mbuf *top, struct mbuf *control, int flags)
2136 {
2137 #pragma unused(so, addr, uio, top, control, flags)
2138 return EOPNOTSUPP;
2139 }
2140
2141 int
pru_sosend_list_notsupp(struct socket * so,struct uio ** uio,u_int uiocnt,int flags)2142 pru_sosend_list_notsupp(struct socket *so, struct uio **uio,
2143 u_int uiocnt, int flags)
2144 {
2145 #pragma unused(so, uio, uiocnt, flags)
2146 return EOPNOTSUPP;
2147 }
2148
2149 int
pru_soreceive_notsupp(struct socket * so,struct sockaddr ** paddr,struct uio * uio,struct mbuf ** mp0,struct mbuf ** controlp,int * flagsp)2150 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
2151 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2152 {
2153 #pragma unused(so, paddr, uio, mp0, controlp, flagsp)
2154 return EOPNOTSUPP;
2155 }
2156
2157 int
pru_soreceive_list_notsupp(struct socket * so,struct recv_msg_elem * recv_msg_array,u_int uiocnt,int * flagsp)2158 pru_soreceive_list_notsupp(struct socket *so,
2159 struct recv_msg_elem *recv_msg_array, u_int uiocnt, int *flagsp)
2160 {
2161 #pragma unused(so, recv_msg_array, uiocnt, flagsp)
2162 return EOPNOTSUPP;
2163 }
2164
2165 int
pru_shutdown_notsupp(struct socket * so)2166 pru_shutdown_notsupp(struct socket *so)
2167 {
2168 #pragma unused(so)
2169 return EOPNOTSUPP;
2170 }
2171
2172 int
pru_sockaddr_notsupp(struct socket * so,struct sockaddr ** nam)2173 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
2174 {
2175 #pragma unused(so, nam)
2176 return EOPNOTSUPP;
2177 }
2178
2179 int
pru_sopoll_notsupp(struct socket * so,int events,kauth_cred_t cred,void * wql)2180 pru_sopoll_notsupp(struct socket *so, int events, kauth_cred_t cred, void *wql)
2181 {
2182 #pragma unused(so, events, cred, wql)
2183 return EOPNOTSUPP;
2184 }
2185
2186 int
pru_socheckopt_null(struct socket * so,struct sockopt * sopt)2187 pru_socheckopt_null(struct socket *so, struct sockopt *sopt)
2188 {
2189 #pragma unused(so, sopt)
2190 /*
2191 * Allow all options for set/get by default.
2192 */
2193 return 0;
2194 }
2195
2196 static int
pru_preconnect_null(struct socket * so)2197 pru_preconnect_null(struct socket *so)
2198 {
2199 #pragma unused(so)
2200 return 0;
2201 }
2202
2203 static int
pru_defunct_null(struct socket * so)2204 pru_defunct_null(struct socket *so)
2205 {
2206 #pragma unused(so)
2207 return 0;
2208 }
2209
2210
2211 void
pru_sanitize(struct pr_usrreqs * pru)2212 pru_sanitize(struct pr_usrreqs *pru)
2213 {
2214 #define DEFAULT(foo, bar) if ((foo) == NULL) (foo) = (bar)
2215 DEFAULT(pru->pru_abort, pru_abort_notsupp);
2216 DEFAULT(pru->pru_accept, pru_accept_notsupp);
2217 DEFAULT(pru->pru_attach, pru_attach_notsupp);
2218 DEFAULT(pru->pru_bind, pru_bind_notsupp);
2219 DEFAULT(pru->pru_connect, pru_connect_notsupp);
2220 DEFAULT(pru->pru_connect2, pru_connect2_notsupp);
2221 DEFAULT(pru->pru_connectx, pru_connectx_notsupp);
2222 DEFAULT(pru->pru_control, pru_control_notsupp);
2223 DEFAULT(pru->pru_detach, pru_detach_notsupp);
2224 DEFAULT(pru->pru_disconnect, pru_disconnect_notsupp);
2225 DEFAULT(pru->pru_disconnectx, pru_disconnectx_notsupp);
2226 DEFAULT(pru->pru_listen, pru_listen_notsupp);
2227 DEFAULT(pru->pru_peeraddr, pru_peeraddr_notsupp);
2228 DEFAULT(pru->pru_rcvd, pru_rcvd_notsupp);
2229 DEFAULT(pru->pru_rcvoob, pru_rcvoob_notsupp);
2230 DEFAULT(pru->pru_send, pru_send_notsupp);
2231 DEFAULT(pru->pru_send_list, pru_send_list_notsupp);
2232 DEFAULT(pru->pru_sense, pru_sense_null);
2233 DEFAULT(pru->pru_shutdown, pru_shutdown_notsupp);
2234 DEFAULT(pru->pru_sockaddr, pru_sockaddr_notsupp);
2235 DEFAULT(pru->pru_sopoll, pru_sopoll_notsupp);
2236 DEFAULT(pru->pru_soreceive, pru_soreceive_notsupp);
2237 DEFAULT(pru->pru_soreceive_list, pru_soreceive_list_notsupp);
2238 DEFAULT(pru->pru_sosend, pru_sosend_notsupp);
2239 DEFAULT(pru->pru_sosend_list, pru_sosend_list_notsupp);
2240 DEFAULT(pru->pru_socheckopt, pru_socheckopt_null);
2241 DEFAULT(pru->pru_preconnect, pru_preconnect_null);
2242 DEFAULT(pru->pru_defunct, pru_defunct_null);
2243 #undef DEFAULT
2244 }
2245
2246 /*
2247 * The following are macros on BSD and functions on Darwin
2248 */
2249
2250 /*
2251 * Do we need to notify the other side when I/O is possible?
2252 */
2253
2254 int
sb_notify(struct sockbuf * sb)2255 sb_notify(struct sockbuf *sb)
2256 {
2257 return sb->sb_waiters > 0 ||
2258 (sb->sb_flags & (SB_SEL | SB_ASYNC | SB_UPCALL | SB_KNOTE));
2259 }
2260
2261 /*
2262 * How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
2263 * This is problematical if the fields are unsigned, as the space might
2264 * still be negative (cc > hiwat or mbcnt > mbmax). Should detect
2265 * overflow and return 0.
2266 */
2267 int
sbspace(struct sockbuf * sb)2268 sbspace(struct sockbuf *sb)
2269 {
2270 int pending = 0;
2271 int space = imin((int)(sb->sb_hiwat - sb->sb_cc),
2272 (int)(sb->sb_mbmax - sb->sb_mbcnt));
2273
2274 if (sb->sb_preconn_hiwat != 0) {
2275 space = imin((int)(sb->sb_preconn_hiwat - sb->sb_cc), space);
2276 }
2277
2278 if (space < 0) {
2279 space = 0;
2280 }
2281
2282 /* Compensate for data being processed by content filters */
2283 #if CONTENT_FILTER
2284 pending = cfil_sock_data_space(sb);
2285 #endif /* CONTENT_FILTER */
2286 if (pending > space) {
2287 space = 0;
2288 } else {
2289 space -= pending;
2290 }
2291
2292 return space;
2293 }
2294
2295 /* do we have to send all at once on a socket? */
2296 int
sosendallatonce(struct socket * so)2297 sosendallatonce(struct socket *so)
2298 {
2299 return so->so_proto->pr_flags & PR_ATOMIC;
2300 }
2301
2302 /* can we read something from so? */
2303 int
soreadable(struct socket * so)2304 soreadable(struct socket *so)
2305 {
2306 return so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
2307 ((so->so_state & SS_CANTRCVMORE)
2308 #if CONTENT_FILTER
2309 && cfil_sock_data_pending(&so->so_rcv) == 0
2310 #endif /* CONTENT_FILTER */
2311 ) ||
2312 so->so_comp.tqh_first || so->so_error;
2313 }
2314
2315 /* can we write something to so? */
2316
2317 int
sowriteable(struct socket * so)2318 sowriteable(struct socket *so)
2319 {
2320 if ((so->so_state & SS_CANTSENDMORE) ||
2321 so->so_error > 0) {
2322 return 1;
2323 }
2324 if (so_wait_for_if_feedback(so) || !socanwrite(so)) {
2325 return 0;
2326 }
2327 if (so->so_flags1 & SOF1_PRECONNECT_DATA) {
2328 return 1;
2329 }
2330
2331 int64_t data = sbspace(&so->so_snd);
2332 int64_t lowat = so->so_snd.sb_lowat;
2333 /*
2334 * Deal with connected UNIX domain sockets which
2335 * rely on the fact that the sender's socket buffer is
2336 * actually the receiver's socket buffer.
2337 */
2338 if (SOCK_DOM(so) == PF_LOCAL) {
2339 struct unpcb *unp = sotounpcb(so);
2340 if (unp != NULL && unp->unp_conn != NULL &&
2341 unp->unp_conn->unp_socket != NULL) {
2342 struct socket *so2 = unp->unp_conn->unp_socket;
2343 /*
2344 * At this point we know that `so' is locked
2345 * and that `unp_conn` isn't going to change.
2346 * However, we don't lock `so2` because doing so
2347 * may require unlocking `so'
2348 * (see unp_get_locks_in_order()).
2349 *
2350 * Two cases can happen:
2351 *
2352 * 1) we return 1 and tell the application that
2353 * it can write. Meanwhile, another thread
2354 * fills up the socket buffer. This will either
2355 * lead to a blocking send or EWOULDBLOCK
2356 * which the application should deal with.
2357 * 2) we return 0 and tell the application that
2358 * the socket is not writable. Meanwhile,
2359 * another thread depletes the receive socket
2360 * buffer. In this case the application will
2361 * be woken up by sb_notify().
2362 *
2363 * MIN() is required because otherwise sosendcheck()
2364 * may return EWOULDBLOCK since it only considers
2365 * so->so_snd.
2366 */
2367 data = MIN(data, sbspace(&so2->so_rcv));
2368 }
2369 }
2370
2371 if (data >= lowat) {
2372 if (so->so_flags & SOF_NOTSENT_LOWAT) {
2373 if ((SOCK_DOM(so) == PF_INET6 ||
2374 SOCK_DOM(so) == PF_INET) &&
2375 so->so_type == SOCK_STREAM) {
2376 return tcp_notsent_lowat_check(so);
2377 }
2378 #if MPTCP
2379 else if ((SOCK_DOM(so) == PF_MULTIPATH) &&
2380 (SOCK_PROTO(so) == IPPROTO_TCP)) {
2381 return mptcp_notsent_lowat_check(so);
2382 }
2383 #endif
2384 else {
2385 return 1;
2386 }
2387 } else {
2388 return 1;
2389 }
2390 }
2391 return 0;
2392 }
2393
2394 /* adjust counters in sb reflecting allocation of m */
2395
2396 void
sballoc(struct sockbuf * sb,struct mbuf * m)2397 sballoc(struct sockbuf *sb, struct mbuf *m)
2398 {
2399 u_int32_t cnt = 1;
2400 sb->sb_cc += m->m_len;
2401 if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
2402 m->m_type != MT_OOBDATA) {
2403 sb->sb_ctl += m->m_len;
2404 }
2405 sb->sb_mbcnt += MSIZE;
2406
2407 if (m->m_flags & M_EXT) {
2408 sb->sb_mbcnt += m->m_ext.ext_size;
2409 cnt += (m->m_ext.ext_size >> MSIZESHIFT);
2410 }
2411 OSAddAtomic(cnt, &total_sbmb_cnt);
2412 VERIFY(total_sbmb_cnt > 0);
2413 if (total_sbmb_cnt > total_sbmb_cnt_peak) {
2414 total_sbmb_cnt_peak = total_sbmb_cnt;
2415 }
2416
2417 /*
2418 * If data is being added to the send socket buffer,
2419 * update the send byte count
2420 */
2421 if (sb->sb_flags & SB_SNDBYTE_CNT) {
2422 inp_incr_sndbytes_total(sb->sb_so, m->m_len);
2423 inp_incr_sndbytes_unsent(sb->sb_so, m->m_len);
2424 }
2425 }
2426
2427 /* adjust counters in sb reflecting freeing of m */
2428 void
sbfree(struct sockbuf * sb,struct mbuf * m)2429 sbfree(struct sockbuf *sb, struct mbuf *m)
2430 {
2431 int cnt = -1;
2432
2433 sb->sb_cc -= m->m_len;
2434 if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
2435 m->m_type != MT_OOBDATA) {
2436 sb->sb_ctl -= m->m_len;
2437 }
2438 sb->sb_mbcnt -= MSIZE;
2439 if (m->m_flags & M_EXT) {
2440 sb->sb_mbcnt -= m->m_ext.ext_size;
2441 cnt -= (m->m_ext.ext_size >> MSIZESHIFT);
2442 }
2443 OSAddAtomic(cnt, &total_sbmb_cnt);
2444 VERIFY(total_sbmb_cnt >= 0);
2445 if (total_sbmb_cnt < total_sbmb_cnt_floor) {
2446 total_sbmb_cnt_floor = total_sbmb_cnt;
2447 }
2448
2449 /*
2450 * If data is being removed from the send socket buffer,
2451 * update the send byte count
2452 */
2453 if (sb->sb_flags & SB_SNDBYTE_CNT) {
2454 inp_decr_sndbytes_total(sb->sb_so, m->m_len);
2455 }
2456 }
2457
2458 /*
2459 * Set lock on sockbuf sb; sleep if lock is already held.
2460 * Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
2461 * Returns error without lock if sleep is interrupted.
2462 */
2463 int
sblock(struct sockbuf * sb,uint32_t flags)2464 sblock(struct sockbuf *sb, uint32_t flags)
2465 {
2466 boolean_t nointr = ((sb->sb_flags & SB_NOINTR) || (flags & SBL_NOINTR));
2467 void *lr_saved = __builtin_return_address(0);
2468 struct socket *so = sb->sb_so;
2469 void * wchan;
2470 int error = 0;
2471 thread_t tp = current_thread();
2472
2473 VERIFY((flags & SBL_VALID) == flags);
2474
2475 /* so_usecount may be 0 if we get here from sofreelastref() */
2476 if (so == NULL) {
2477 panic("%s: null so, sb=%p sb_flags=0x%x lr=%p",
2478 __func__, sb, sb->sb_flags, lr_saved);
2479 /* NOTREACHED */
2480 } else if (so->so_usecount < 0) {
2481 panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
2482 "lrh= %s\n", __func__, sb, sb->sb_flags, so,
2483 so->so_usecount, lr_saved, solockhistory_nr(so));
2484 /* NOTREACHED */
2485 }
2486
2487 /*
2488 * The content filter thread must hold the sockbuf lock
2489 */
2490 if ((so->so_flags & SOF_CONTENT_FILTER) && sb->sb_cfil_thread == tp) {
2491 /*
2492 * Don't panic if we are defunct because SB_LOCK has
2493 * been cleared by sodefunct()
2494 */
2495 if (!(so->so_flags & SOF_DEFUNCT) && !(sb->sb_flags & SB_LOCK)) {
2496 panic("%s: SB_LOCK not held for %p",
2497 __func__, sb);
2498 }
2499
2500 /* Keep the sockbuf locked */
2501 return 0;
2502 }
2503
2504 if ((sb->sb_flags & SB_LOCK) && !(flags & SBL_WAIT)) {
2505 return EWOULDBLOCK;
2506 }
2507 /*
2508 * We may get here from sorflush(), in which case "sb" may not
2509 * point to the real socket buffer. Use the actual socket buffer
2510 * address from the socket instead.
2511 */
2512 wchan = (sb->sb_flags & SB_RECV) ?
2513 &so->so_rcv.sb_flags : &so->so_snd.sb_flags;
2514
2515 /*
2516 * A content filter thread has exclusive access to the sockbuf
2517 * until it clears the
2518 */
2519 while ((sb->sb_flags & SB_LOCK) ||
2520 ((so->so_flags & SOF_CONTENT_FILTER) &&
2521 sb->sb_cfil_thread != NULL)) {
2522 lck_mtx_t *mutex_held;
2523
2524 /*
2525 * XXX: This code should be moved up above outside of this loop;
2526 * however, we may get here as part of sofreelastref(), and
2527 * at that time pr_getlock() may no longer be able to return
2528 * us the lock. This will be fixed in future.
2529 */
2530 if (so->so_proto->pr_getlock != NULL) {
2531 mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK);
2532 } else {
2533 mutex_held = so->so_proto->pr_domain->dom_mtx;
2534 }
2535
2536 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
2537
2538 sb->sb_wantlock++;
2539 VERIFY(sb->sb_wantlock != 0);
2540
2541 error = msleep(wchan, mutex_held,
2542 nointr ? PSOCK : PSOCK | PCATCH,
2543 nointr ? "sb_lock_nointr" : "sb_lock", NULL);
2544
2545 VERIFY(sb->sb_wantlock != 0);
2546 sb->sb_wantlock--;
2547
2548 if (error == 0 && (so->so_flags & SOF_DEFUNCT) &&
2549 !(flags & SBL_IGNDEFUNCT)) {
2550 error = EBADF;
2551 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] "
2552 "(%d)\n", __func__, proc_selfpid(),
2553 proc_best_name(current_proc()),
2554 so->so_gencnt,
2555 SOCK_DOM(so), SOCK_TYPE(so), error);
2556 }
2557
2558 if (error != 0) {
2559 return error;
2560 }
2561 }
2562 sb->sb_flags |= SB_LOCK;
2563 return 0;
2564 }
2565
2566 /*
2567 * Release lock on sockbuf sb
2568 */
2569 void
sbunlock(struct sockbuf * sb,boolean_t keeplocked)2570 sbunlock(struct sockbuf *sb, boolean_t keeplocked)
2571 {
2572 void *lr_saved = __builtin_return_address(0);
2573 struct socket *so = sb->sb_so;
2574 thread_t tp = current_thread();
2575
2576 /* so_usecount may be 0 if we get here from sofreelastref() */
2577 if (so == NULL) {
2578 panic("%s: null so, sb=%p sb_flags=0x%x lr=%p",
2579 __func__, sb, sb->sb_flags, lr_saved);
2580 /* NOTREACHED */
2581 } else if (so->so_usecount < 0) {
2582 panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
2583 "lrh= %s\n", __func__, sb, sb->sb_flags, so,
2584 so->so_usecount, lr_saved, solockhistory_nr(so));
2585 /* NOTREACHED */
2586 }
2587
2588 /*
2589 * The content filter thread must hold the sockbuf lock
2590 */
2591 if ((so->so_flags & SOF_CONTENT_FILTER) && sb->sb_cfil_thread == tp) {
2592 /*
2593 * Don't panic if we are defunct because SB_LOCK has
2594 * been cleared by sodefunct()
2595 */
2596 if (!(so->so_flags & SOF_DEFUNCT) &&
2597 !(sb->sb_flags & SB_LOCK) &&
2598 !(so->so_state & SS_DEFUNCT) &&
2599 !(so->so_flags1 & SOF1_DEFUNCTINPROG)) {
2600 panic("%s: SB_LOCK not held for %p",
2601 __func__, sb);
2602 }
2603 /* Keep the sockbuf locked and proceed */
2604 } else {
2605 VERIFY((sb->sb_flags & SB_LOCK) ||
2606 (so->so_state & SS_DEFUNCT) ||
2607 (so->so_flags1 & SOF1_DEFUNCTINPROG));
2608
2609 sb->sb_flags &= ~SB_LOCK;
2610
2611 if (sb->sb_wantlock > 0) {
2612 /*
2613 * We may get here from sorflush(), in which case "sb"
2614 * may not point to the real socket buffer. Use the
2615 * actual socket buffer address from the socket instead.
2616 */
2617 wakeup((sb->sb_flags & SB_RECV) ? &so->so_rcv.sb_flags :
2618 &so->so_snd.sb_flags);
2619 }
2620 }
2621
2622 if (!keeplocked) { /* unlock on exit */
2623 if (so->so_flags & SOF_MP_SUBFLOW || SOCK_DOM(so) == PF_MULTIPATH) {
2624 (*so->so_proto->pr_unlock)(so, 1, lr_saved);
2625 } else {
2626 lck_mtx_t *mutex_held;
2627
2628 if (so->so_proto->pr_getlock != NULL) {
2629 mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK);
2630 } else {
2631 mutex_held = so->so_proto->pr_domain->dom_mtx;
2632 }
2633
2634 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
2635
2636 VERIFY(so->so_usecount > 0);
2637 so->so_usecount--;
2638 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2639 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2640 lck_mtx_unlock(mutex_held);
2641 }
2642 }
2643 }
2644
2645 void
sorwakeup(struct socket * so)2646 sorwakeup(struct socket *so)
2647 {
2648 if (sb_notify(&so->so_rcv)) {
2649 sowakeup(so, &so->so_rcv, NULL);
2650 }
2651 }
2652
2653 void
sowwakeup(struct socket * so)2654 sowwakeup(struct socket *so)
2655 {
2656 if (sb_notify(&so->so_snd)) {
2657 sowakeup(so, &so->so_snd, NULL);
2658 }
2659 }
2660
2661 static void
soevupcall(struct socket * so,uint32_t hint)2662 soevupcall(struct socket *so, uint32_t hint)
2663 {
2664 if (so->so_event != NULL) {
2665 caddr_t so_eventarg = so->so_eventarg;
2666
2667 hint &= so->so_eventmask;
2668 if (hint != 0) {
2669 so->so_event(so, so_eventarg, hint);
2670 }
2671 }
2672 }
2673
2674 void
soevent(struct socket * so,uint32_t hint)2675 soevent(struct socket *so, uint32_t hint)
2676 {
2677 if (net_wake_pkt_debug > 0 && (hint & SO_FILT_HINT_WAKE_PKT)) {
2678 os_log(OS_LOG_DEFAULT, "%s: SO_FILT_HINT_WAKE_PKT so %p",
2679 __func__, so);
2680 }
2681
2682 if (so->so_flags & SOF_KNOTE) {
2683 KNOTE(&so->so_klist, hint);
2684 }
2685
2686 soevupcall(so, hint);
2687
2688 /*
2689 * Don't post an event if this a subflow socket or
2690 * the app has opted out of using cellular interface
2691 */
2692 if ((hint & SO_FILT_HINT_IFDENIED) &&
2693 !(so->so_flags & SOF_MP_SUBFLOW) &&
2694 !(so->so_restrictions & SO_RESTRICT_DENY_CELLULAR) &&
2695 !(so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE) &&
2696 !(so->so_restrictions & SO_RESTRICT_DENY_CONSTRAINED)) {
2697 soevent_ifdenied(so);
2698 }
2699 }
2700
2701 static void
soevent_ifdenied(struct socket * so)2702 soevent_ifdenied(struct socket *so)
2703 {
2704 struct kev_netpolicy_ifdenied ev_ifdenied;
2705
2706 bzero(&ev_ifdenied, sizeof(ev_ifdenied));
2707 /*
2708 * The event consumer is interested about the effective {upid,pid,uuid}
2709 * info which can be different than the those related to the process
2710 * that recently performed a system call on the socket, i.e. when the
2711 * socket is delegated.
2712 */
2713 if (so->so_flags & SOF_DELEGATED) {
2714 ev_ifdenied.ev_data.eupid = so->e_upid;
2715 ev_ifdenied.ev_data.epid = so->e_pid;
2716 uuid_copy(ev_ifdenied.ev_data.euuid, so->e_uuid);
2717 } else {
2718 ev_ifdenied.ev_data.eupid = so->last_upid;
2719 ev_ifdenied.ev_data.epid = so->last_pid;
2720 uuid_copy(ev_ifdenied.ev_data.euuid, so->last_uuid);
2721 }
2722
2723 if (++so->so_ifdenied_notifies > 1) {
2724 /*
2725 * Allow for at most one kernel event to be generated per
2726 * socket; so_ifdenied_notifies is reset upon changes in
2727 * the UUID policy. See comments in inp_update_policy.
2728 */
2729 if (net_io_policy_log) {
2730 uuid_string_t buf;
2731
2732 uuid_unparse(ev_ifdenied.ev_data.euuid, buf);
2733 log(LOG_DEBUG, "%s[%d]: so 0x%llx [%d,%d] epid %llu "
2734 "euuid %s%s has %d redundant events supressed\n",
2735 __func__, so->last_pid,
2736 (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
2737 SOCK_TYPE(so), ev_ifdenied.ev_data.epid, buf,
2738 ((so->so_flags & SOF_DELEGATED) ?
2739 " [delegated]" : ""), so->so_ifdenied_notifies);
2740 }
2741 } else {
2742 if (net_io_policy_log) {
2743 uuid_string_t buf;
2744
2745 uuid_unparse(ev_ifdenied.ev_data.euuid, buf);
2746 log(LOG_DEBUG, "%s[%d]: so 0x%llx [%d,%d] epid %llu "
2747 "euuid %s%s event posted\n", __func__,
2748 so->last_pid, (uint64_t)VM_KERNEL_ADDRPERM(so),
2749 SOCK_DOM(so), SOCK_TYPE(so),
2750 ev_ifdenied.ev_data.epid, buf,
2751 ((so->so_flags & SOF_DELEGATED) ?
2752 " [delegated]" : ""));
2753 }
2754 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED, &ev_ifdenied.ev_data,
2755 sizeof(ev_ifdenied));
2756 }
2757 }
2758
2759 /*
2760 * Make a copy of a sockaddr in a malloced buffer of type SONAME.
2761 */
2762 struct sockaddr *
dup_sockaddr(struct sockaddr * sa,int canwait)2763 dup_sockaddr(struct sockaddr *sa, int canwait)
2764 {
2765 struct sockaddr *sa2;
2766
2767 sa2 = (struct sockaddr *)alloc_sockaddr(sa->sa_len, canwait ? Z_WAITOK : Z_NOWAIT);
2768 if (sa2 != NULL) {
2769 bcopy(sa, sa2, sa->sa_len);
2770 }
2771 return sa2;
2772 }
2773
2774 void *
alloc_sockaddr(size_t size,zalloc_flags_t flags)2775 alloc_sockaddr(size_t size, zalloc_flags_t flags)
2776 {
2777 VERIFY((size) <= UINT8_MAX);
2778
2779 __typed_allocators_ignore_push
2780 struct sockaddr *sa = kheap_alloc(KHEAP_SONAME, size, flags | Z_ZERO);
2781 __typed_allocators_ignore_pop
2782 if (sa != NULL) {
2783 sa->sa_len = (uint8_t)size;
2784 }
2785
2786 return sa;
2787 }
2788
2789 /*
2790 * Create an external-format (``xsocket'') structure using the information
2791 * in the kernel-format socket structure pointed to by so. This is done
2792 * to reduce the spew of irrelevant information over this interface,
2793 * to isolate user code from changes in the kernel structure, and
2794 * potentially to provide information-hiding if we decide that
2795 * some of this information should be hidden from users.
2796 */
2797 void
sotoxsocket(struct socket * so,struct xsocket * xso)2798 sotoxsocket(struct socket *so, struct xsocket *xso)
2799 {
2800 xso->xso_len = sizeof(*xso);
2801 xso->xso_so = (_XSOCKET_PTR(struct socket *))VM_KERNEL_ADDRPERM(so);
2802 xso->so_type = so->so_type;
2803 xso->so_options = (short)(so->so_options & 0xffff);
2804 xso->so_linger = so->so_linger;
2805 xso->so_state = so->so_state;
2806 xso->so_pcb = (_XSOCKET_PTR(caddr_t))VM_KERNEL_ADDRPERM(so->so_pcb);
2807 if (so->so_proto) {
2808 xso->xso_protocol = SOCK_PROTO(so);
2809 xso->xso_family = SOCK_DOM(so);
2810 } else {
2811 xso->xso_protocol = xso->xso_family = 0;
2812 }
2813 xso->so_qlen = so->so_qlen;
2814 xso->so_incqlen = so->so_incqlen;
2815 xso->so_qlimit = so->so_qlimit;
2816 xso->so_timeo = so->so_timeo;
2817 xso->so_error = so->so_error;
2818 xso->so_pgid = so->so_pgid;
2819 xso->so_oobmark = so->so_oobmark;
2820 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
2821 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
2822 xso->so_uid = kauth_cred_getuid(so->so_cred);
2823 }
2824
2825
2826 #if XNU_TARGET_OS_OSX
2827
2828 void
sotoxsocket64(struct socket * so,struct xsocket64 * xso)2829 sotoxsocket64(struct socket *so, struct xsocket64 *xso)
2830 {
2831 xso->xso_len = sizeof(*xso);
2832 xso->xso_so = (u_int64_t)VM_KERNEL_ADDRPERM(so);
2833 xso->so_type = so->so_type;
2834 xso->so_options = (short)(so->so_options & 0xffff);
2835 xso->so_linger = so->so_linger;
2836 xso->so_state = so->so_state;
2837 xso->so_pcb = (u_int64_t)VM_KERNEL_ADDRPERM(so->so_pcb);
2838 if (so->so_proto) {
2839 xso->xso_protocol = SOCK_PROTO(so);
2840 xso->xso_family = SOCK_DOM(so);
2841 } else {
2842 xso->xso_protocol = xso->xso_family = 0;
2843 }
2844 xso->so_qlen = so->so_qlen;
2845 xso->so_incqlen = so->so_incqlen;
2846 xso->so_qlimit = so->so_qlimit;
2847 xso->so_timeo = so->so_timeo;
2848 xso->so_error = so->so_error;
2849 xso->so_pgid = so->so_pgid;
2850 xso->so_oobmark = so->so_oobmark;
2851 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
2852 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
2853 xso->so_uid = kauth_cred_getuid(so->so_cred);
2854 }
2855
2856 #endif /* XNU_TARGET_OS_OSX */
2857
2858 /*
2859 * This does the same for sockbufs. Note that the xsockbuf structure,
2860 * since it is always embedded in a socket, does not include a self
2861 * pointer nor a length. We make this entry point public in case
2862 * some other mechanism needs it.
2863 */
2864 void
sbtoxsockbuf(struct sockbuf * sb,struct xsockbuf * xsb)2865 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
2866 {
2867 xsb->sb_cc = sb->sb_cc;
2868 xsb->sb_hiwat = sb->sb_hiwat;
2869 xsb->sb_mbcnt = sb->sb_mbcnt;
2870 xsb->sb_mbmax = sb->sb_mbmax;
2871 xsb->sb_lowat = sb->sb_lowat;
2872 xsb->sb_flags = (short)sb->sb_flags;
2873 xsb->sb_timeo = (short)
2874 ((sb->sb_timeo.tv_sec * hz) + sb->sb_timeo.tv_usec / tick);
2875 if (xsb->sb_timeo == 0 && sb->sb_timeo.tv_usec != 0) {
2876 xsb->sb_timeo = 1;
2877 }
2878 }
2879
2880 /*
2881 * Based on the policy set by an all knowing decison maker, throttle sockets
2882 * that either have been marked as belonging to "background" process.
2883 */
2884 inline int
soisthrottled(struct socket * so)2885 soisthrottled(struct socket *so)
2886 {
2887 return so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND;
2888 }
2889
2890 inline int
soisprivilegedtraffic(struct socket * so)2891 soisprivilegedtraffic(struct socket *so)
2892 {
2893 return (so->so_flags & SOF_PRIVILEGED_TRAFFIC_CLASS) ? 1 : 0;
2894 }
2895
2896 inline int
soissrcbackground(struct socket * so)2897 soissrcbackground(struct socket *so)
2898 {
2899 return (so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND) ||
2900 IS_SO_TC_BACKGROUND(so->so_traffic_class);
2901 }
2902
2903 inline int
soissrcrealtime(struct socket * so)2904 soissrcrealtime(struct socket *so)
2905 {
2906 return so->so_traffic_class >= SO_TC_AV &&
2907 so->so_traffic_class <= SO_TC_VO;
2908 }
2909
2910 inline int
soissrcbesteffort(struct socket * so)2911 soissrcbesteffort(struct socket *so)
2912 {
2913 return so->so_traffic_class == SO_TC_BE ||
2914 so->so_traffic_class == SO_TC_RD ||
2915 so->so_traffic_class == SO_TC_OAM;
2916 }
2917
2918 void
soclearfastopen(struct socket * so)2919 soclearfastopen(struct socket *so)
2920 {
2921 if (so->so_flags1 & SOF1_PRECONNECT_DATA) {
2922 so->so_flags1 &= ~SOF1_PRECONNECT_DATA;
2923 }
2924
2925 if (so->so_flags1 & SOF1_DATA_IDEMPOTENT) {
2926 so->so_flags1 &= ~SOF1_DATA_IDEMPOTENT;
2927 }
2928 }
2929
2930 void
sonullevent(struct socket * so,void * arg,uint32_t hint)2931 sonullevent(struct socket *so, void *arg, uint32_t hint)
2932 {
2933 #pragma unused(so, arg, hint)
2934 }
2935
2936 /*
2937 * Here is the definition of some of the basic objects in the kern.ipc
2938 * branch of the MIB.
2939 */
2940 SYSCTL_NODE(_kern, KERN_IPC, ipc,
2941 CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "IPC");
2942
2943 /* Check that the maximum socket buffer size is within a range */
2944
2945 static int
2946 sysctl_sb_max SYSCTL_HANDLER_ARGS
2947 {
2948 #pragma unused(oidp, arg1, arg2)
2949 u_int32_t new_value;
2950 int changed = 0;
2951 int error = sysctl_io_number(req, sb_max, sizeof(u_int32_t),
2952 &new_value, &changed);
2953 if (!error && changed) {
2954 if (new_value > LOW_SB_MAX && new_value <= high_sb_max) {
2955 sb_max = new_value;
2956 } else {
2957 error = ERANGE;
2958 }
2959 }
2960 return error;
2961 }
2962
2963 SYSCTL_PROC(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf,
2964 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2965 &sb_max, 0, &sysctl_sb_max, "IU", "Maximum socket buffer size");
2966
2967 SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor,
2968 CTLFLAG_RW | CTLFLAG_LOCKED, &sb_efficiency, 0, "");
2969
2970 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters,
2971 CTLFLAG_RD | CTLFLAG_LOCKED, &nmbclusters, 0, "");
2972
2973 SYSCTL_INT(_kern_ipc, OID_AUTO, njcl,
2974 CTLFLAG_RD | CTLFLAG_LOCKED, &njcl, 0, "");
2975
2976 SYSCTL_INT(_kern_ipc, OID_AUTO, njclbytes,
2977 CTLFLAG_RD | CTLFLAG_LOCKED, &njclbytes, 0, "");
2978
2979 SYSCTL_INT(_kern_ipc, KIPC_SOQLIMITCOMPAT, soqlimitcompat,
2980 CTLFLAG_RW | CTLFLAG_LOCKED, &soqlimitcompat, 1,
2981 "Enable socket queue limit compatibility");
2982
2983 /*
2984 * Hack alert -- rdar://33572856
2985 * A loopback test we cannot change was failing because it sets
2986 * SO_SENDTIMEO to 5 seconds and that's also the value
2987 * of the minimum persist timer. Because of the persist timer,
2988 * the connection was not idle for 5 seconds and SO_SNDTIMEO
2989 * was not triggering at 5 seconds causing the test failure.
2990 * As a workaround we check the sysctl soqlencomp the test is already
2991 * setting to set disable auto tuning of the receive buffer.
2992 */
2993
2994 extern u_int32_t tcp_do_autorcvbuf;
2995
2996 static int
2997 sysctl_soqlencomp SYSCTL_HANDLER_ARGS
2998 {
2999 #pragma unused(oidp, arg1, arg2)
3000 u_int32_t new_value;
3001 int changed = 0;
3002 int error = sysctl_io_number(req, soqlencomp, sizeof(u_int32_t),
3003 &new_value, &changed);
3004 if (!error && changed) {
3005 soqlencomp = new_value;
3006 if (new_value != 0) {
3007 tcp_do_autorcvbuf = 0;
3008 tcptv_persmin_val = 6 * TCP_RETRANSHZ;
3009 }
3010 }
3011 return error;
3012 }
3013 SYSCTL_PROC(_kern_ipc, OID_AUTO, soqlencomp,
3014 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3015 &soqlencomp, 0, &sysctl_soqlencomp, "IU", "");
3016
3017 SYSCTL_INT(_kern_ipc, OID_AUTO, sbmb_cnt, CTLFLAG_RD | CTLFLAG_LOCKED,
3018 &total_sbmb_cnt, 0, "");
3019 SYSCTL_INT(_kern_ipc, OID_AUTO, sbmb_cnt_peak, CTLFLAG_RD | CTLFLAG_LOCKED,
3020 &total_sbmb_cnt_peak, 0, "");
3021 SYSCTL_INT(_kern_ipc, OID_AUTO, sbmb_cnt_floor, CTLFLAG_RD | CTLFLAG_LOCKED,
3022 &total_sbmb_cnt_floor, 0, "");
3023 SYSCTL_QUAD(_kern_ipc, OID_AUTO, sbmb_limreached, CTLFLAG_RD | CTLFLAG_LOCKED,
3024 &sbmb_limreached, "");
3025
3026
3027 SYSCTL_NODE(_kern_ipc, OID_AUTO, io_policy, CTLFLAG_RW, 0, "network IO policy");
3028
3029 SYSCTL_INT(_kern_ipc_io_policy, OID_AUTO, log, CTLFLAG_RW | CTLFLAG_LOCKED,
3030 &net_io_policy_log, 0, "");
3031
3032 #if CONFIG_PROC_UUID_POLICY
3033 SYSCTL_INT(_kern_ipc_io_policy, OID_AUTO, uuid, CTLFLAG_RW | CTLFLAG_LOCKED,
3034 &net_io_policy_uuid, 0, "");
3035 #endif /* CONFIG_PROC_UUID_POLICY */
3036